typedef int is_op_func_t(const ir_node *n);
-static int be_is_NoReg(be_abi_irg_t *babi, const ir_node *irn) {
- if (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_XXX]) == irn ||
- be_abi_get_callee_save_irn(babi, &ia32_fp_regs[REG_XXXX]) == irn)
- {
- return 1;
- }
+/**
+ * checks if a node represents the NOREG value
+ */
+static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
+ be_abi_irg_t *babi = cg->birg->abi;
+ const arch_register_t *fp_noreg = USE_SSE2(cg) ?
+ &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG];
- return 0;
+ return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) ||
+ (be_abi_get_callee_save_irn(babi, fp_noreg) == irn);
}
ir_node *block = env->block;
if (mode_is_float(mode)) {
- cnst = new_rd_ia32_fConst(dbg, irg, block, mode);
+ if (USE_SSE2(env->cg))
+ cnst = new_rd_ia32_fConst(dbg, irg, block, mode);
+ else
+ cnst = new_rd_ia32_vfConst(dbg, irg, block, mode);
}
else {
cnst = new_rd_ia32_Const(dbg, irg, block, mode);
ir_mode *mode = env->mode;
if (mode_is_float(mode)) {
+ if (! USE_SSE2(env->cg)) {
+ cnst_classify_t clss = classify_Const(node);
+
+ if (clss == CNST_NULL)
+ return new_rd_ia32_vfldz(dbg, irg, block, mode);
+ else if (clss == CNST_ONE)
+ return new_rd_ia32_vfld1(dbg, irg, block, mode);
+ }
sym.entity_p = get_entity_for_tv(env->cg, node);
cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
*/
static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
int i, n = get_irn_arity(cand);
- int same_args = 0;
+ int same_args = 1;
for (i = 0; i < n; i++) {
if (get_irn_n(cand, i) == get_irn_n(irn, i)) {
- same_args = 1;
+ same_args = 0;
break;
}
}
*
******************************************************************/
-static int node_is_comm(const ir_node *irn) {
+static int node_is_ia32_comm(const ir_node *irn) {
return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
}
int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
entity *lent = get_ia32_frame_ent(load);
entity *sent = get_ia32_frame_ent(store);
+ ident *lid = get_ia32_am_sc(load);
+ ident *sid = get_ia32_am_sc(store);
+ char *loffs = get_ia32_am_offs(load);
+ char *soffs = get_ia32_am_offs(store);
/* are both entities set and equal? */
- is_equal = lent && sent && (lent == sent);
+ if (is_equal && (lent || sent))
+ is_equal = lent && sent && (lent == sent);
+
+ /* are address mode idents set and equal? */
+ if (is_equal && (lid || sid))
+ is_equal = lid && sid && (lid == sid);
+
+ /* are offsets set and equal */
+ if (is_equal && (loffs || soffs))
+ is_equal = loffs && soffs && strcmp(loffs, soffs) == 0;
/* are the load and the store of the same mode? */
- is_equal = get_ia32_ls_mode(load) == get_ia32_ls_mode(store);
+ is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0;
return is_equal;
}
/**
* Folds Add or Sub to LEA if possible
*/
-static ir_node *fold_addr(be_abi_irg_t *babi, ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
- ir_graph *irg = get_irn_irg(irn);
- dbg_info *dbg = get_irn_dbg_info(irn);
- ir_node *block = get_nodes_block(irn);
- ir_node *res = irn;
- char *offs = NULL;
- const char *offs_cnst = NULL;
- char *offs_lea = NULL;
- int scale = 0;
- int isadd = 0;
- int dolea = 0;
- ir_node *left, *right, *temp;
- ir_node *base, *index;
+static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
+ ir_graph *irg = get_irn_irg(irn);
+ dbg_info *dbg = get_irn_dbg_info(irn);
+ ir_node *block = get_nodes_block(irn);
+ ir_node *res = irn;
+ char *offs = NULL;
+ const char *offs_cnst = NULL;
+ char *offs_lea = NULL;
+ int scale = 0;
+ int isadd = 0;
+ int dolea = 0;
+ int have_am_sc = 0;
+ int am_sc_sign = 0;
+ ident *am_sc = NULL;
+ ir_node *left, *right, *temp;
+ ir_node *base, *index;
ia32_am_flavour_t am_flav;
if (is_ia32_Add(irn))
right = get_irn_n(irn, 3);
/* "normalize" arguments in case of add with two operands */
- if (isadd && ! be_is_NoReg(babi, right)) {
+ if (isadd && ! be_is_NoReg(cg, right)) {
/* put LEA == ia32_am_O as right operand */
if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
set_irn_n(irn, 2, right);
scale = 0;
am_flav = 0;
- /* check if operand is either const */
- if (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) {
- DBG((mod, LEVEL_1, "\tfound op with imm"));
+ /* check for operation with immediate */
+ if (is_ia32_ImmConst(irn)) {
+ DBG((mod, LEVEL_1, "\tfound op with imm const"));
offs_cnst = get_ia32_cnst(irn);
dolea = 1;
}
+ else if (is_ia32_ImmSymConst(irn)) {
+ DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
+
+ have_am_sc = 1;
+ dolea = 1;
+ am_sc = get_ia32_id_cnst(irn);
+ am_sc_sign = is_ia32_am_sc_sign(irn);
+ }
/* determine the operand which needs to be checked */
- if (be_is_NoReg(babi, right)) {
+ if (be_is_NoReg(cg, right)) {
temp = left;
}
else {
temp = right;
}
- /* check if right operand is AMConst (LEA with ia32_am_O) */
- if (is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
+ /* check if right operand is AMConst (LEA with ia32_am_O) */
+ /* but we can only eat it up if there is no other symconst */
+ /* because the linker won't accept two symconsts */
+ if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
- offs_lea = get_ia32_am_offs(temp);
- dolea = 1;
+ offs_lea = get_ia32_am_offs(temp);
+ am_sc = get_ia32_am_sc(temp);
+ am_sc_sign = is_ia32_am_sc_sign(temp);
+ have_am_sc = 1;
+ dolea = 1;
}
if (isadd) {
}
/* fix base */
- if (! be_is_NoReg(babi, index)) {
+ if (! be_is_NoReg(cg, index)) {
/* if we have index, but left == right -> no base */
if (left == right) {
base = noreg;
/* a new LEA. */
/* If the LEA contains already a frame_entity then we also */
/* create a new one otherwise we would loose it. */
- if ((isadd && !be_is_NoReg(babi, index) && (am_flav & ia32_am_I)) ||
- get_ia32_frame_ent(left))
+ if ((isadd && !be_is_NoReg(cg, index) && (am_flav & ia32_am_I)) || /* no new LEA if index already set */
+ get_ia32_frame_ent(left) || /* no new LEA if stack access */
+ (have_am_sc && get_ia32_am_sc(left))) /* no new LEA if AM symconst already present */
{
DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
}
else {
DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
- offs = get_ia32_am_offs(left);
- base = get_irn_n(left, 0);
- index = get_irn_n(left, 1);
- scale = get_ia32_am_scale(left);
+ offs = get_ia32_am_offs(left);
+ am_sc = have_am_sc ? am_sc : get_ia32_am_sc(left);
+ have_am_sc = am_sc ? 1 : 0;
+ am_sc_sign = is_ia32_am_sc_sign(left);
+ base = get_irn_n(left, 0);
+ index = get_irn_n(left, 1);
+ scale = get_ia32_am_scale(left);
}
}
}
}
+ /* set the address mode symconst */
+ if (have_am_sc) {
+ set_ia32_am_sc(res, am_sc);
+ if (am_sc_sign)
+ set_ia32_am_sc_sign(res);
+ }
+
/* copy the frame entity (could be set in case of Add */
/* which was a FrameAddr) */
set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
if (offs || offs_cnst || offs_lea) {
am_flav |= ia32_O;
}
- if (! be_is_NoReg(babi, base)) {
+ if (! be_is_NoReg(cg, base)) {
am_flav |= ia32_B;
}
- if (! be_is_NoReg(babi, index)) {
+ if (! be_is_NoReg(cg, index)) {
am_flav |= ia32_I;
}
if (scale > 0) {
ia32_code_gen_t *cg = env;
firm_dbg_module_t *mod = cg->mod;
ir_node *res = irn;
- be_abi_irg_t *babi = cg->birg->abi;
dbg_info *dbg;
ir_mode *mode;
ir_node *block, *noreg_gp, *noreg_fp;
/* check is irn is a candidate for address calculation */
if (is_candidate(block, irn, 1)) {
DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
- res = fold_addr(babi, irn, mod, noreg_gp);
+ res = fold_addr(cg, irn, mod, noreg_gp);
if (res == irn)
DB((mod, LEVEL_1, "transformed into %+F\n", res));
set_ia32_am_scale(irn, get_ia32_am_scale(left));
set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
+ set_ia32_am_sc(irn, get_ia32_am_sc(left));
+ if (is_ia32_am_sc_sign(left))
+ set_ia32_am_sc_sign(irn);
+
set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
/* set base and index */
}
/* normalize commutative ops */
- if (node_is_comm(irn)) {
+ if (node_is_ia32_comm(irn)) {
/* Assure that right operand is always a Load if there is one */
/* because non-commutative ops can only use Dest AM if the right */
/* operand is a load, so we only need to check right operand. */
/* Extra check for commutative ops with two Loads */
/* -> put the interesting Load right */
- if (node_is_comm(irn) &&
+ if (node_is_ia32_comm(irn) &&
pred_is_specific_nodeblock(block, left, is_ia32_Ld))
{
if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
+ set_ia32_am_sc(irn, get_ia32_am_sc(load));
+ if (is_ia32_am_sc_sign(load))
+ set_ia32_am_sc_sign(irn);
+
if (is_ia32_use_frame(load))
set_ia32_use_frame(irn);
}
/* normalize commutative ops */
- if (node_is_comm(irn)) {
+ if (node_is_ia32_comm(irn)) {
/* Assure that left operand is always a Load if there is one */
/* because non-commutative ops can only use Source AM if the */
/* left operand is a Load, so we only need to check the left */
set_ia32_frame_ent(irn, get_ia32_frame_ent(left));
set_ia32_ls_mode(irn, get_ia32_ls_mode(left));
+ set_ia32_am_sc(irn, get_ia32_am_sc(left));
+ if (is_ia32_am_sc_sign(left))
+ set_ia32_am_sc_sign(irn);
+
/* clear remat flag */
set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);