#include "../be_t.h"
#include "../beabi.h"
+#include "../benode_t.h"
+#include "../besched_t.h"
#include "ia32_new_nodes.h"
#include "bearch_ia32_t.h"
#undef is_NoMem
#define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
-static int be_is_NoReg(be_abi_irg_t *babi, const ir_node *irn) {
- if (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_XXX]) == irn ||
- be_abi_get_callee_save_irn(babi, &ia32_fp_regs[REG_XXXX]) == irn)
- {
- return 1;
- }
+typedef int is_op_func_t(const ir_node *n);
- return 0;
+/**
+ * checks if a node represents the NOREG value
+ */
+static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
+ be_abi_irg_t *babi = cg->birg->abi;
+ const arch_register_t *fp_noreg = USE_SSE2(cg) ?
+ &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG];
+
+ return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) ||
+ (be_abi_get_callee_save_irn(babi, fp_noreg) == irn);
}
+
+
+/*************************************************
+ * _____ _ _
+ * / ____| | | | |
+ * | | ___ _ __ ___| |_ __ _ _ __ | |_ ___
+ * | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __|
+ * | |___| (_) | | | \__ \ || (_| | | | | |_\__ \
+ * \_____\___/|_| |_|___/\__\__,_|_| |_|\__|___/
+ *
+ *************************************************/
+
/**
* creates a unique ident by adding a number to a tag
*
ir_graph *irg = env->irg;
ir_node *block = env->block;
- cnst = new_rd_ia32_Const(dbg, irg, block, mode);
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ cnst = new_rd_ia32_fConst(dbg, irg, block, mode);
+ else
+ cnst = new_rd_ia32_vfConst(dbg, irg, block, mode);
+ }
+ else {
+ cnst = new_rd_ia32_Const(dbg, irg, block, mode);
+ }
set_ia32_Const_attr(cnst, env->irn);
return cnst;
}
ir_mode *mode = env->mode;
if (mode_is_float(mode)) {
+ if (! USE_SSE2(env->cg)) {
+ cnst_classify_t clss = classify_Const(node);
+
+ if (clss == CNST_NULL)
+ return new_rd_ia32_vfldz(dbg, irg, block, mode);
+ else if (clss == CNST_ONE)
+ return new_rd_ia32_vfld1(dbg, irg, block, mode);
+ }
sym.entity_p = get_entity_for_tv(env->cg, node);
cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
/**
* Transforms (all) Const's into ia32_Const and places them in the
- * block where they are used (or in the cfg-pred Block in case of Phi's)
+ * block where they are used (or in the cfg-pred Block in case of Phi's).
+ * Additionally all reference nodes are changed into mode_Is nodes.
*/
-void ia32_place_consts(ir_node *irn, void *env) {
+void ia32_place_consts_set_modes(ir_node *irn, void *env) {
ia32_code_gen_t *cg = env;
ia32_transform_env_t tenv;
ir_mode *mode;
mode = get_irn_mode(irn);
+ /* transform all reference nodes into mode_Is nodes */
+ if (mode_is_reference(mode)) {
+ mode = mode_Is;
+ set_irn_mode(irn, mode);
+ }
+
tenv.block = get_nodes_block(irn);
tenv.cg = cg;
tenv.irg = cg->irg;
- tenv.mod = cg->mod;
+ DEBUG_ONLY(tenv.mod = cg->mod;)
/* Loop over all predecessors and check for Sym/Const nodes */
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
}
+
+/********************************************************************************************************
+ * _____ _ _ ____ _ _ _ _ _
+ * | __ \ | | | | / __ \ | | (_) (_) | | (_)
+ * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
+ * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
+ * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
+ * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
+ * | | | |
+ * |_| |_|
+ ********************************************************************************************************/
+
+/**
+ * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
+ */
+
+static int ia32_cnst_compare(ir_node *n1, ir_node *n2) {
+ return get_ia32_id_cnst(n1) == get_ia32_id_cnst(n2);
+}
+
+/**
+ * Checks for potential CJmp/CJmpAM optimization candidates.
+ */
+static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
+ ir_node *cand = NULL;
+ ir_node *prev = sched_prev(irn);
+
+ if (is_Block(prev)) {
+ if (get_Block_n_cfgpreds(prev) == 1)
+ prev = get_Block_cfgpred(prev, 0);
+ else
+ prev = NULL;
+ }
+
+ /* The predecessor must be a ProjX. */
+ if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
+ prev = get_Proj_pred(prev);
+
+ if (is_op_func(prev))
+ cand = prev;
+ }
+
+ return cand;
+}
+
+static int is_TestJmp_cand(const ir_node *irn) {
+ return is_ia32_TestJmp(irn) || is_ia32_And(irn);
+}
+
+/**
+ * Checks if two consecutive arguments of cand matches
+ * the two arguments of irn (TestJmp).
+ */
+static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
+ ir_node *in1 = get_irn_n(irn, 0);
+ ir_node *in2 = get_irn_n(irn, 1);
+ int i, n = get_irn_arity(cand);
+ int same_args = 0;
+
+ for (i = 0; i < n - 1; i++) {
+ if (get_irn_n(cand, i) == in1 &&
+ get_irn_n(cand, i + 1) == in2)
+ {
+ same_args = 1;
+ break;
+ }
+ }
+
+ if (same_args)
+ return ia32_cnst_compare(cand, irn);
+
+ return 0;
+}
+
+/**
+ * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
+ */
+static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
+ ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
+ int replace = 0;
+
+ /* we found a possible candidate */
+ replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
+
+ if (replace) {
+ DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
+
+ if (is_ia32_And(cand))
+ set_irn_op(irn, op_ia32_CJmpAM);
+ else
+ set_irn_op(irn, op_ia32_CJmp);
+
+ DB((cg->mod, LEVEL_1, "%+F\n", irn));
+ }
+}
+
+static int is_CondJmp_cand(const ir_node *irn) {
+ return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
+}
+
+/**
+ * Checks if the arguments of cand are the same of irn.
+ */
+static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
+ int i, n = get_irn_arity(cand);
+ int same_args = 1;
+
+ for (i = 0; i < n; i++) {
+ if (get_irn_n(cand, i) == get_irn_n(irn, i)) {
+ same_args = 0;
+ break;
+ }
+ }
+
+ if (same_args)
+ return ia32_cnst_compare(cand, irn);
+
+ return 0;
+}
+
+/**
+ * Tries to replace a CondJmp by a CJmpAM
+ */
+static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
+ ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
+ int replace = 0;
+
+ /* we found a possible candidate */
+ replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
+
+ if (replace) {
+ DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
+
+ set_irn_op(irn, op_ia32_CJmp);
+
+ DB((cg->mod, LEVEL_1, "%+F\n", irn));
+ }
+}
+
+/**
+ * Tries to optimize two following IncSP.
+ */
+static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
+ ir_node *prev = be_get_IncSP_pred(irn);
+ int real_uses = get_irn_n_edges(prev);
+
+ if (real_uses != 1) {
+ /*
+ This is a hack that should be removed if be_abi_fix_stack_nodes()
+ is fixed. Currently it leaves some IncSP's outside the chain ...
+ The previous IncSp is NOT our prev, but directly scheduled before ...
+ Impossible in a bug-free implementation :-)
+ */
+ prev = sched_prev(irn);
+ real_uses = 1;
+ }
+
+ if (be_is_IncSP(prev) && real_uses == 1) {
+ /* first IncSP has only one IncSP user, kill the first one */
+ unsigned prev_offs = be_get_IncSP_offset(prev);
+ be_stack_dir_t prev_dir = be_get_IncSP_direction(prev);
+ unsigned curr_offs = be_get_IncSP_offset(irn);
+ be_stack_dir_t curr_dir = be_get_IncSP_direction(irn);
+
+ int new_ofs = prev_offs * (prev_dir == be_stack_dir_expand ? -1 : +1) +
+ curr_offs * (curr_dir == be_stack_dir_expand ? -1 : +1);
+
+ if (new_ofs < 0) {
+ new_ofs = -new_ofs;
+ curr_dir = be_stack_dir_expand;
+ }
+ else
+ curr_dir = be_stack_dir_shrink;
+ be_set_IncSP_offset(prev, 0);
+ be_set_IncSP_offset(irn, (unsigned)new_ofs);
+ be_set_IncSP_direction(irn, curr_dir);
+ }
+}
+
+/**
+ * Performs Peephole Optimizations.
+ */
+void ia32_peephole_optimization(ir_node *irn, void *env) {
+ ia32_code_gen_t *cg = env;
+
+ if (is_ia32_TestJmp(irn))
+ ia32_optimize_TestJmp(irn, cg);
+ else if (is_ia32_CondJmp(irn))
+ ia32_optimize_CondJmp(irn, cg);
+ else if (be_is_IncSP(irn))
+ ia32_optimize_IncSP(irn, cg);
+}
+
+
+
/******************************************************************
* _ _ __ __ _
* /\ | | | | | \/ | | |
*
******************************************************************/
-static int node_is_comm(const ir_node *irn) {
+static int node_is_ia32_comm(const ir_node *irn) {
return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
}
}
/**
- * Returns the Proj with number 0 connected to irn.
+ * Returns the first Proj with mode != mode_M connected to irn.
*/
static ir_node *get_res_proj(const ir_node *irn) {
const ir_edge_t *edge;
assert(is_Proj(src) && "Proj expected");
- if (get_Proj_proj(src) == 0)
+ if (get_irn_mode(src) != mode_M)
return src;
}
* @param is_op_func The check-function
* @return 1 if conditions are fulfilled, 0 otherwise
*/
-static int pred_is_specific_node(const ir_node *pred, int (*is_op_func)(const ir_node *n)) {
+static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
return 1;
}
return 0;
}
+
+
/**
* Checks if irn is a candidate for address calculation or address mode.
*
* return 1 if irn is a candidate for AC or AM, 0 otherwise
*/
static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) {
- ir_node *load_proj;
+ ir_node *in;
int n, is_cand = check_addr;
- if (pred_is_specific_nodeblock(block, get_irn_n(irn, 2), is_ia32_Load)) {
- load_proj = get_irn_n(irn, 2);
- n = ia32_get_irn_n_edges(load_proj);
+ in = get_irn_n(irn, 2);
+
+ if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
+ n = ia32_get_irn_n_edges(in);
is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
}
- if (pred_is_specific_nodeblock(block, get_irn_n(irn, 3), is_ia32_Load)) {
- load_proj = get_irn_n(irn, 3);
- n = ia32_get_irn_n_edges(load_proj);
+ in = get_irn_n(irn, 3);
+
+ if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
+ n = ia32_get_irn_n_edges(in);
is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
}
- is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : (check_addr ? 0 : 1);
+ is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : is_cand;
return is_cand;
}
int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
entity *lent = get_ia32_frame_ent(load);
entity *sent = get_ia32_frame_ent(store);
+ ident *lid = get_ia32_am_sc(load);
+ ident *sid = get_ia32_am_sc(store);
+ char *loffs = get_ia32_am_offs(load);
+ char *soffs = get_ia32_am_offs(store);
/* are both entities set and equal? */
- is_equal = (lent && sent && (lent == sent)) ? 1 : is_equal;
+ if (is_equal && (lent || sent))
+ is_equal = lent && sent && (lent == sent);
+
+ /* are address mode idents set and equal? */
+ if (is_equal && (lid || sid))
+ is_equal = lid && sid && (lid == sid);
+
+ /* are offsets set and equal */
+ if (is_equal && (loffs || soffs))
+ is_equal = loffs && soffs && strcmp(loffs, soffs) == 0;
+
+ /* are the load and the store of the same mode? */
+ is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0;
return is_equal;
}
+
+
/**
* Folds Add or Sub to LEA if possible
*/
-static ir_node *fold_addr(be_abi_irg_t *babi, ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
- ir_graph *irg = get_irn_irg(irn);
- ir_mode *mode = get_irn_mode(irn);
- dbg_info *dbg = get_irn_dbg_info(irn);
- ir_node *block = get_nodes_block(irn);
- ir_node *res = irn;
- char *offs = NULL;
- char *offs_cnst = NULL;
- char *offs_lea = NULL;
- int scale = 0;
- int isadd = 0;
- int dolea = 0;
- ir_node *left, *right, *temp;
- ir_node *base, *index;
+static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) {
+ ir_graph *irg = get_irn_irg(irn);
+ dbg_info *dbg = get_irn_dbg_info(irn);
+ ir_node *block = get_nodes_block(irn);
+ ir_node *res = irn;
+ char *offs = NULL;
+ const char *offs_cnst = NULL;
+ char *offs_lea = NULL;
+ int scale = 0;
+ int isadd = 0;
+ int dolea = 0;
+ int have_am_sc = 0;
+ int am_sc_sign = 0;
+ ident *am_sc = NULL;
+ ir_node *left, *right, *temp;
+ ir_node *base, *index;
ia32_am_flavour_t am_flav;
+ DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
if (is_ia32_Add(irn))
isadd = 1;
left = get_irn_n(irn, 2);
right = get_irn_n(irn, 3);
- base = left;
- index = noreg;
- offs = NULL;
- scale = 0;
- am_flav = 0;
-
/* "normalize" arguments in case of add with two operands */
- if (isadd && ! be_is_NoReg(babi, right)) {
+ if (isadd && ! be_is_NoReg(cg, right)) {
/* put LEA == ia32_am_O as right operand */
if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
set_irn_n(irn, 2, right);
}
}
- /* check if operand is either const */
- if (get_ia32_cnst(irn)) {
- DBG((mod, LEVEL_1, "\tfound op with imm"));
+ base = left;
+ index = noreg;
+ offs = NULL;
+ scale = 0;
+ am_flav = 0;
+
+ /* check for operation with immediate */
+ if (is_ia32_ImmConst(irn)) {
+ DBG((mod, LEVEL_1, "\tfound op with imm const"));
offs_cnst = get_ia32_cnst(irn);
dolea = 1;
}
+ else if (is_ia32_ImmSymConst(irn)) {
+ DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
+
+ have_am_sc = 1;
+ dolea = 1;
+ am_sc = get_ia32_id_cnst(irn);
+ am_sc_sign = is_ia32_am_sc_sign(irn);
+ }
/* determine the operand which needs to be checked */
- if (be_is_NoReg(babi, right)) {
+ if (be_is_NoReg(cg, right)) {
temp = left;
}
else {
temp = right;
}
- /* check if right operand is AMConst (LEA with ia32_am_O) */
- if (is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
+ /* check if right operand is AMConst (LEA with ia32_am_O) */
+ /* but we can only eat it up if there is no other symconst */
+ /* because the linker won't accept two symconsts */
+ if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
- offs_lea = get_ia32_am_offs(temp);
- dolea = 1;
+ offs_lea = get_ia32_am_offs(temp);
+ am_sc = get_ia32_am_sc(temp);
+ am_sc_sign = is_ia32_am_sc_sign(temp);
+ have_am_sc = 1;
+ dolea = 1;
}
if (isadd) {
}
/* fix base */
- if (! be_is_NoReg(babi, index)) {
+ if (! be_is_NoReg(cg, index)) {
/* if we have index, but left == right -> no base */
if (left == right) {
base = noreg;
/* a new LEA. */
/* If the LEA contains already a frame_entity then we also */
/* create a new one otherwise we would loose it. */
- if (isadd && ((!be_is_NoReg(babi, index) && (am_flav & ia32_am_I)) || get_ia32_frame_ent(left))) {
+ if ((isadd && !be_is_NoReg(cg, index) && (am_flav & ia32_am_I)) || /* no new LEA if index already set */
+ get_ia32_frame_ent(left) || /* no new LEA if stack access */
+ (have_am_sc && get_ia32_am_sc(left))) /* no new LEA if AM symconst already present */
+ {
DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
}
else {
DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
- offs = get_ia32_am_offs(left);
- base = get_irn_n(left, 0);
- index = get_irn_n(left, 1);
- scale = get_ia32_am_scale(left);
+ offs = get_ia32_am_offs(left);
+ am_sc = have_am_sc ? am_sc : get_ia32_am_sc(left);
+ have_am_sc = am_sc ? 1 : 0;
+ am_sc_sign = is_ia32_am_sc_sign(left);
+ base = get_irn_n(left, 0);
+ index = get_irn_n(left, 1);
+ scale = get_ia32_am_scale(left);
}
}
}
}
+ /* set the address mode symconst */
+ if (have_am_sc) {
+ set_ia32_am_sc(res, am_sc);
+ if (am_sc_sign)
+ set_ia32_am_sc_sign(res);
+ }
+
/* copy the frame entity (could be set in case of Add */
/* which was a FrameAddr) */
set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
if (offs || offs_cnst || offs_lea) {
am_flav |= ia32_O;
}
- if (! be_is_NoReg(babi, base)) {
+ if (! be_is_NoReg(cg, base)) {
am_flav |= ia32_B;
}
- if (! be_is_NoReg(babi, index)) {
+ if (! be_is_NoReg(cg, index)) {
am_flav |= ia32_I;
}
if (scale > 0) {
*/
void ia32_optimize_am(ir_node *irn, void *env) {
ia32_code_gen_t *cg = env;
- ir_graph *irg = cg->irg;
- firm_dbg_module_t *mod = cg->mod;
ir_node *res = irn;
- be_abi_irg_t *babi = cg->birg->abi;
dbg_info *dbg;
ir_mode *mode;
ir_node *block, *noreg_gp, *noreg_fp;
ir_node *store, *load, *mem_proj;
ir_node *succ, *addr_b, *addr_i;
int check_am_src = 0;
+ DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
if (! is_ia32_irn(irn))
return;
/* Do not try to create a LEA if one of the operands is a Load. */
/* check is irn is a candidate for address calculation */
if (is_candidate(block, irn, 1)) {
- res = fold_addr(babi, irn, mod, noreg_gp);
+ DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
+ res = fold_addr(cg, irn, noreg_gp);
+
+ if (res == irn)
+ DB((mod, LEVEL_1, "transformed into %+F\n", res));
+ else
+ DB((mod, LEVEL_1, "not transformed\n"));
}
}
if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
/* 1st: check for Load/Store -> LEA */
- if (is_ia32_Ld(irn) || is_ia32_St(irn)) {
+ if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
left = get_irn_n(irn, 0);
if (is_ia32_Lea(left)) {
+ DBG((mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
+
/* get the AM attributes from the LEA */
add_ia32_am_offs(irn, get_ia32_am_offs(left));
set_ia32_am_scale(irn, get_ia32_am_scale(left));
set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
- set_ia32_op_type(irn, is_ia32_St(irn) ? ia32_AddrModeD : ia32_AddrModeS);
+ set_ia32_am_sc(irn, get_ia32_am_sc(left));
+ if (is_ia32_am_sc_sign(left))
+ set_ia32_am_sc_sign(irn);
+
+ set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
/* set base and index */
set_irn_n(irn, 0, get_irn_n(left, 0));
set_irn_n(irn, 1, get_irn_n(left, 1));
+
+ /* clear remat flag */
+ set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
}
}
/* check if the node is an address mode candidate */
else if (is_candidate(block, irn, 0)) {
+ DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
+
left = get_irn_n(irn, 2);
if (get_irn_arity(irn) == 4) {
/* it's an "unary" operation */
}
/* normalize commutative ops */
- if (node_is_comm(irn)) {
+ if (node_is_ia32_comm(irn)) {
/* Assure that right operand is always a Load if there is one */
/* because non-commutative ops can only use Dest AM if the right */
/* operand is a load, so we only need to check right operand. */
/* Extra check for commutative ops with two Loads */
/* -> put the interesting Load right */
- if (node_is_comm(irn) &&
+ if (node_is_ia32_comm(irn) &&
pred_is_specific_nodeblock(block, left, is_ia32_Ld))
{
if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
+ set_ia32_am_sc(irn, get_ia32_am_sc(load));
+ if (is_ia32_am_sc_sign(load))
+ set_ia32_am_sc_sign(irn);
+
if (is_ia32_use_frame(load))
set_ia32_use_frame(irn);
mem_proj = get_mem_proj(store);
set_Proj_pred(mem_proj, irn);
set_Proj_proj(mem_proj, 1);
+
+ /* clear remat flag */
+ set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
+
+ DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
}
} /* if (store) */
else if (get_ia32_am_support(irn) & ia32_am_Source) {
}
/* normalize commutative ops */
- if (node_is_comm(irn)) {
+ if (node_is_ia32_comm(irn)) {
/* Assure that left operand is always a Load if there is one */
/* because non-commutative ops can only use Source AM if the */
/* left operand is a Load, so we only need to check the left */
set_ia32_frame_ent(irn, get_ia32_frame_ent(left));
set_ia32_ls_mode(irn, get_ia32_ls_mode(left));
+ set_ia32_am_sc(irn, get_ia32_am_sc(left));
+ if (is_ia32_am_sc_sign(left))
+ set_ia32_am_sc_sign(irn);
+
+ /* clear remat flag */
+ set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
+
if (is_ia32_use_frame(left))
set_ia32_use_frame(irn);
set_Proj_pred(mem_proj, irn);
set_Proj_proj(mem_proj, 1);
}
+
+ DB((mod, LEVEL_1, "merged with %+F into source AM\n", left));
}
}
}