/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "ia32_finish.h"
#include "ia32_new_nodes.h"
#include "ia32_map_regs.h"
+#include "ia32_common_transform.h"
#include "ia32_transform.h"
#include "ia32_dbg_stat.h"
#include "ia32_optimize.h"
ir_graph *irg;
ir_node *in1, *in2, *noreg, *nomem, *res;
ir_node *noreg_fp, *block;
- ir_mode *mode = get_irn_mode(irn);
- dbg_info *dbg = get_irn_dbg_info(irn);
- const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
- int i, arity;
+ dbg_info *dbg;
+ const arch_register_t *in1_reg, *in2_reg, *out_reg;
- /* Return if AM node or not a Sub or xSub */
- if (!(is_ia32_Sub(irn) || is_ia32_xSub(irn)) || get_ia32_op_type(irn) != ia32_Normal)
+ /* fix_am will solve this for AddressMode variants */
+ if (get_ia32_op_type(irn) != ia32_Normal)
return;
- noreg = ia32_new_NoReg_gp(cg);
- noreg_fp = ia32_new_NoReg_fp(cg);
- nomem = new_rd_NoMem(cg->irg);
- in1 = get_irn_n(irn, 2);
- in2 = get_irn_n(irn, 3);
- in1_reg = arch_get_irn_register(cg->arch_env, in1);
- in2_reg = arch_get_irn_register(cg->arch_env, in2);
- out_reg = get_ia32_out_reg(irn, 0);
+ noreg = ia32_new_NoReg_gp(cg);
+ noreg_fp = ia32_new_NoReg_xmm(cg);
+ nomem = new_rd_NoMem(cg->irg);
+ in1 = get_irn_n(irn, n_ia32_binary_left);
+ in2 = get_irn_n(irn, n_ia32_binary_right);
+ in1_reg = arch_get_irn_register(cg->arch_env, in1);
+ in2_reg = arch_get_irn_register(cg->arch_env, in2);
+ out_reg = get_ia32_out_reg(irn, 0);
irg = cg->irg;
block = get_nodes_block(irn);
if (out_reg != in2_reg)
return;
+ dbg = get_irn_dbg_info(irn);
+
/* generate the neg src2 */
- if(mode_is_float(mode)) {
+ if (is_ia32_xSub(irn)) {
int size;
ir_entity *entity;
+ ir_mode *op_mode = get_ia32_ls_mode(irn);
+
+ assert(get_irn_mode(irn) != mode_T);
- res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, in2, noreg_fp, nomem);
- size = get_mode_size_bits(mode);
+ res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, nomem, in2, noreg_fp);
+ size = get_mode_size_bits(op_mode);
entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
set_ia32_am_sc(res, entity);
set_ia32_op_type(res, ia32_AddrModeS);
- set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
- } else {
- res = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, in2, nomem);
- }
- arch_set_irn_register(cg->arch_env, res, in2_reg);
-
- /* add to schedule */
- sched_add_before(irn, res);
-
- /* generate the add */
- if (mode_is_float(mode)) {
- res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, res, in1, nomem);
- set_ia32_am_support(res, ia32_am_Source, ia32_am_binary);
- set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
- }
- else {
- res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, res, in1, nomem);
- set_ia32_am_support(res, ia32_am_Full, ia32_am_binary);
- set_ia32_commutative(res);
- }
-
- SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
- /* copy register */
- slots = get_ia32_slots(res);
- slots[0] = in2_reg;
+ set_ia32_ls_mode(res, op_mode);
- /* exchange the add and the sub */
- edges_reroute(irn, res, irg);
+ arch_set_irn_register(cg->arch_env, res, in2_reg);
- /* add to schedule */
- sched_add_before(irn, res);
+ /* add to schedule */
+ sched_add_before(irn, res);
- /* remove the old sub */
- sched_remove(irn);
- arity = get_irn_arity(irn);
- for(i = 0; i < arity; ++i) {
- set_irn_n(irn, i, new_Bad());
- }
+ /* generate the add */
+ res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, nomem, res, in1);
+ set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
- DBG_OPT_SUB2NEGADD(irn, res);
-}
+ /* exchange the add and the sub */
+ edges_reroute(irn, res, irg);
-/**
- * Transforms a LEA into an Add or SHL if possible.
- * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
- */
-static void ia32_transform_lea_to_add_or_shl(ir_node *irn, ia32_code_gen_t *cg) {
- ia32_am_flavour_t am_flav;
- dbg_info *dbg = get_irn_dbg_info(irn);
- ir_graph *irg;
- ir_node *res = NULL;
- ir_node *nomem, *noreg, *base, *index, *op1, *op2;
- ir_node *block;
- long offs = 0;
- const arch_register_t *out_reg, *base_reg, *index_reg;
-
- /* must be a LEA */
- if (! is_ia32_Lea(irn))
- return;
+ /* add to schedule */
+ sched_add_before(irn, res);
+ } else {
+ ir_node *res_proj = NULL;
+ ir_node *flags_proj = NULL;
+ const ir_edge_t *edge;
+
+ if (get_irn_mode(irn) == mode_T) {
+ /* collect the Proj uses */
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ long pn = get_Proj_proj(proj);
+ if(pn == pn_ia32_Sub_res) {
+ assert(res_proj == NULL);
+ res_proj = proj;
+ } else {
+ assert(pn == pn_ia32_Sub_flags);
+ assert(flags_proj == NULL);
+ flags_proj = proj;
+ }
+ }
+ }
- am_flav = get_ia32_am_flavour(irn);
+ if (flags_proj == NULL) {
+ res = new_rd_ia32_Neg(dbg, irg, block, in2);
+ arch_set_irn_register(cg->arch_env, res, in2_reg);
- /* mustn't have a symconst */
- if (get_ia32_am_sc(irn) != NULL || get_ia32_frame_ent(irn) != NULL)
- return;
+ /* add to schedule */
+ sched_add_before(irn, res);
- if (am_flav == ia32_am_IS) {
- tarval *tv;
+ /* generate the add */
+ res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, res, in1);
+ arch_set_irn_register(cg->arch_env, res, out_reg);
+ set_ia32_commutative(res);
- /* Create a SHL */
- noreg = ia32_new_NoReg_gp(cg);
- nomem = new_rd_NoMem(cg->irg);
- index = get_irn_n(irn, 1);
- index_reg = arch_get_irn_register(cg->arch_env, index);
- out_reg = arch_get_irn_register(cg->arch_env, irn);
+ /* exchange the add and the sub */
+ edges_reroute(irn, res, irg);
- if (out_reg != index_reg)
- return;
+ /* add to schedule */
+ sched_add_before(irn, res);
+ } else {
+ ir_node *stc, *cmc, *not, *adc;
+ ir_node *adc_flags;
- /* ok, we can transform it */
- irg = cg->irg;
- block = get_nodes_block(irn);
-
- res = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, index, noreg, nomem);
- offs = get_ia32_am_scale(irn);
- tv = new_tarval_from_long(offs, mode_Iu);
- set_ia32_Immop_tarval(res, tv);
- arch_set_irn_register(cg->arch_env, res, out_reg);
- } else {
- /* only some LEAs can be transformed to an Add */
- if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_BI)
- return;
-
- noreg = ia32_new_NoReg_gp(cg);
- nomem = new_rd_NoMem(cg->irg);
- op1 = noreg;
- op2 = noreg;
- base = get_irn_n(irn, 0);
- index = get_irn_n(irn, 1);
- offs = get_ia32_am_offs_int(irn);
-
- out_reg = arch_get_irn_register(cg->arch_env, irn);
- base_reg = arch_get_irn_register(cg->arch_env, base);
- index_reg = arch_get_irn_register(cg->arch_env, index);
-
- irg = cg->irg;
- block = get_nodes_block(irn);
-
- switch(am_flav) {
- case ia32_am_B:
- case ia32_am_OB:
- /* out register must be same as base register */
- if (out_reg != base_reg)
- return;
-
- op1 = base;
- op2 = new_rd_ia32_Immediate(NULL, irg, block, NULL, 0, offs);
- arch_set_irn_register(cg->arch_env, op2,
- &ia32_gp_regs[REG_GP_NOREG]);
- break;
- case ia32_am_BI:
- assert(offs == 0);
- /* out register must be same as one in register */
- if (out_reg == base_reg) {
- op1 = base;
- op2 = index;
- } else if (out_reg == index_reg) {
- op1 = index;
- op2 = base;
- } else {
- /* in registers a different from out -> no Add possible */
- return;
- }
- break;
+ /*
+ * ARG, the above technique does NOT set the flags right.
+ * So, we must produce the following code:
+ * t1 = ~b
+ * t2 = a + ~b + Carry
+ * Complement Carry
+ *
+ * a + -b = a + (~b + 1) would set the carry flag IF a == b ...
+ */
+ not = new_rd_ia32_Not(dbg, irg, block, in2);
+ arch_set_irn_register(cg->arch_env, not, in2_reg);
+ sched_add_before(irn, not);
+
+ stc = new_rd_ia32_Stc(dbg, irg, block);
+ arch_set_irn_register(cg->arch_env, stc,
+ &ia32_flags_regs[REG_EFLAGS]);
+ sched_add_before(irn, stc);
+
+ adc = new_rd_ia32_Adc(dbg, irg, block, noreg, noreg, nomem, not,
+ in1, stc);
+ arch_set_irn_register(cg->arch_env, adc, out_reg);
+ sched_add_before(irn, adc);
+
+ set_irn_mode(adc, mode_T);
+ adc_flags = new_r_Proj(irg, block, adc, mode_Iu, pn_ia32_Adc_flags);
+ arch_set_irn_register(cg->arch_env, adc_flags,
+ &ia32_flags_regs[REG_EFLAGS]);
+
+ cmc = new_rd_ia32_Cmc(dbg, irg, block, adc_flags);
+ arch_set_irn_register(cg->arch_env, cmc,
+ &ia32_flags_regs[REG_EFLAGS]);
+ sched_add_before(irn, cmc);
+
+ exchange(flags_proj, cmc);
+ if (res_proj != NULL) {
+ set_Proj_pred(res_proj, adc);
+ set_Proj_proj(res_proj, pn_ia32_Adc_res);
+ }
- default:
- assert(0);
- break;
+ res = adc;
}
-
- res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem);
- arch_set_irn_register(cg->arch_env, res, out_reg);
- set_ia32_op_type(res, ia32_Normal);
- set_ia32_commutative(res);
}
SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
- /* add new ADD/SHL to schedule */
- sched_add_before(irn, res);
-
- DBG_OPT_LEA2ADD(irn, res);
-
- /* remove the old LEA */
+ /* remove the old sub */
sched_remove(irn);
+ kill_node(irn);
- /* exchange the Add and the LEA */
- exchange(irn, res);
+ DBG_OPT_SUB2NEGADD(irn, res);
}
static INLINE int need_constraint_copy(ir_node *irn) {
+ /* the 3 operand form of IMul needs no constraint copy */
+ if(is_ia32_IMul(irn)) {
+ ir_node *right = get_irn_n(irn, n_ia32_IMul_right);
+ if(is_ia32_Immediate(right))
+ return 0;
+ }
+
return ! is_ia32_Lea(irn) &&
! is_ia32_Conv_I2I(irn) &&
! is_ia32_Conv_I2I8Bit(irn) &&
- ! is_ia32_TestCMov(irn) &&
- ! is_ia32_CmpCMov(irn);
+ ! is_ia32_CMov(irn);
+}
+
+/**
+ * Returns the index of the "same" register.
+ * On the x86, we should have only one.
+ */
+static int get_first_same(const arch_register_req_t* req)
+{
+ const unsigned other = req->other_same;
+ int i;
+
+ for (i = 0; i < 32; ++i) {
+ if (other & (1U << i)) return i;
+ }
+ assert(! "same position not found");
+ return 32;
}
/**
const arch_register_t *out_reg, *in_reg;
int n_res, i;
ir_node *in_node, *block;
- ia32_op_type_t op_tp;
-
- if(!is_ia32_irn(node))
- return;
-
- /* some nodes are just a bit less efficient, but need no fixing if the
- * should be same requirement is not fulfilled */
- if(!need_constraint_copy(node))
- return;
reqs = get_ia32_out_req_all(node);
n_res = get_ia32_n_res(node);
ir_node *in[2];
ir_node *perm_proj0;
ir_node *perm_proj1;
+ ir_node *uses_out_reg;
const arch_register_req_t *req = reqs[i];
- const arch_register_class_t *class;
+ const arch_register_class_t *cls;
+ int uses_out_reg_pos;
if (!arch_register_req_is(req, should_be_same))
continue;
- same_pos = req->other_same;
+ same_pos = get_first_same(req);
/* get in and out register */
out_reg = get_ia32_out_reg(node, i);
/* unknowns can be changed to any register we want on emitting */
if (is_unknown_reg(in_reg))
continue;
- class = arch_register_get_class(in_reg);
- assert(class == arch_register_get_class(out_reg));
+ cls = arch_register_get_class(in_reg);
+ assert(cls == arch_register_get_class(out_reg));
/* check if any other input operands uses the out register */
arity = get_irn_arity(node);
- ir_node *uses_out_reg = NULL;
- int uses_out_reg_pos = -1;
+ uses_out_reg = NULL;
+ uses_out_reg_pos = -1;
for(i2 = 0; i2 < arity; ++i2) {
ir_node *in = get_irn_n(node, i2);
- const arch_register_t *in_reg = arch_get_irn_register(arch_env, in);
+ const arch_register_t *in_reg;
+
+ if(!mode_is_data(get_irn_mode(in)))
+ continue;
+
+ in_reg = arch_get_irn_register(arch_env, in);
if(in_reg != out_reg)
continue;
uses_out_reg_pos = i2;
}
- /* noone else is using the out reg, we can simply copy it
+ /* no-one else is using the out reg, we can simply copy it
* (the register can't be live since the operation will override it
* anyway) */
if(uses_out_reg == NULL) {
- ir_node *copy = be_new_Copy(class, irg, block, in_node);
+ ir_node *copy = be_new_Copy(cls, irg, block, in_node);
DBG_OPT_2ADDRCPY(copy);
/* destination is the out register */
}
/* for commutative nodes we can simply swap the left/right */
- if(is_ia32_commutative(node) && uses_out_reg_pos == 3) {
+ if (uses_out_reg_pos == n_ia32_binary_right && is_ia32_commutative(node)) {
ia32_swap_left_right(node);
DBG((dbg, LEVEL_1, "swapped left/right input of %+F to resolve "
"should be same constraint\n", node));
* after! the operation as we will override the register. */
in[0] = in_node;
in[1] = uses_out_reg;
- perm = be_new_Perm(class, irg, block, 2, in);
+ perm = be_new_Perm(cls, irg, block, 2, in);
perm_proj0 = new_r_Proj(irg, block, perm, get_irn_mode(in[0]), 0);
perm_proj1 = new_r_Proj(irg, block, perm, get_irn_mode(in[1]), 1);
}
}
}
-
- /* check xCmp: try to avoid unordered cmp */
- if ((is_ia32_xCmp(node) || is_ia32_xCmpCMov(node) || is_ia32_xCmpSet(node)) &&
- op_tp == ia32_Normal)
- {
- long pnc = get_ia32_pncode(node);
-
- if (pnc & pn_Cmp_Uo) {
- ir_node *tmp;
- int idx1 = 2, idx2 = 3;
-
- if (is_ia32_xCmpCMov(node)) {
- idx1 = 0;
- idx2 = 1;
- }
-
- /** Matze: TODO this looks wrong, I assume we should exchange
- * the proj numbers and not the inputs... */
-
- tmp = get_irn_n(node, idx1);
- set_irn_n(node, idx1, get_irn_n(node, idx2));
- set_irn_n(node, idx2, tmp);
-
- set_ia32_pncode(node, get_negated_pnc(pnc, mode_E));
- }
- }
}
/**
* Solution: Turn back this address mode into explicit Load + Operation.
*/
static void fix_am_source(ir_node *irn, void *env) {
- ia32_code_gen_t *cg = env;
- ir_node *base, *index, *noreg;
- const arch_register_t *reg_base, *reg_index;
+ ia32_code_gen_t *cg = env;
+ const arch_env_t *arch_env = cg->arch_env;
+ ir_node *base;
+ ir_node *index;
+ ir_node *noreg;
+ const arch_register_t *reg_base;
+ const arch_register_t *reg_index;
const arch_register_req_t **reqs;
- int n_res, i;
+ int n_res, i;
/* check only ia32 nodes with source address mode */
if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
if (get_ia32_am_arity(irn) != ia32_am_binary)
return;
- base = get_irn_n(irn, 0);
- index = get_irn_n(irn, 1);
+ base = get_irn_n(irn, n_ia32_base);
+ index = get_irn_n(irn, n_ia32_index);
- reg_base = arch_get_irn_register(cg->arch_env, base);
- reg_index = arch_get_irn_register(cg->arch_env, index);
+ reg_base = arch_get_irn_register(arch_env, base);
+ reg_index = arch_get_irn_register(arch_env, index);
reqs = get_ia32_out_req_all(irn);
noreg = ia32_new_NoReg_gp(cg);
for (i = 0; i < n_res; i++) {
if (arch_register_req_is(reqs[i], should_be_same)) {
/* get in and out register */
- const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
- int same_pos = reqs[i]->other_same;
-
- /*
- there is a constraint for the remaining operand
- and the result register is equal to base or index register
- */
- if (same_pos == 2 &&
- (out_reg == reg_base || out_reg == reg_index))
- {
- /* turn back address mode */
- ir_node *in_node = get_irn_n(irn, 2);
- const arch_register_t *in_reg = arch_get_irn_register(cg->arch_env, in_node);
- ir_node *block = get_nodes_block(irn);
- ir_mode *ls_mode = get_ia32_ls_mode(irn);
- ir_node *load;
- int pnres;
-
- if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_gp]) {
- load = new_rd_ia32_Load(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
- pnres = pn_ia32_Load_res;
- }
- else if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_xmm]) {
- load = new_rd_ia32_xLoad(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
- pnres = pn_ia32_xLoad_res;
- }
- else {
- panic("cannot turn back address mode for this register class");
- }
-
- /* copy address mode information to load */
- set_ia32_ls_mode(load, ls_mode);
- set_ia32_am_flavour(load, get_ia32_am_flavour(irn));
- set_ia32_op_type(load, ia32_AddrModeS);
- set_ia32_am_scale(load, get_ia32_am_scale(irn));
- set_ia32_am_sc(load, get_ia32_am_sc(irn));
- add_ia32_am_offs_int(load, get_ia32_am_offs_int(irn));
- set_ia32_frame_ent(load, get_ia32_frame_ent(irn));
-
- if (is_ia32_use_frame(irn))
- set_ia32_use_frame(load);
-
- /* insert the load into schedule */
- sched_add_before(irn, load);
-
- DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
-
- load = new_r_Proj(cg->irg, block, load, ls_mode, pnres);
- arch_set_irn_register(cg->arch_env, load, out_reg);
-
- /* insert the load result proj into schedule */
- sched_add_before(irn, load);
+ const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
+ int same_pos = get_first_same(reqs[i]);
+ ir_node *same_node = get_irn_n(irn, same_pos);
+ const arch_register_t *same_reg
+ = arch_get_irn_register(arch_env, same_node);
+ const arch_register_class_t *same_cls;
+ ir_graph *irg = cg->irg;
+ dbg_info *dbgi = get_irn_dbg_info(irn);
+ ir_node *block = get_nodes_block(irn);
+ ir_mode *proj_mode;
+ ir_node *load;
+ ir_node *load_res;
+ ir_node *mem;
+ int pnres;
+ int pnmem;
+
+ /* should_be same constraint is fullfilled, nothing to do */
+ if(out_reg == same_reg)
+ continue;
- /* set the new input operand */
- set_irn_n(irn, 3, load);
+ /* we only need to do something if the out reg is the same as base
+ or index register */
+ if (out_reg != reg_base && out_reg != reg_index)
+ continue;
- /* this is a normal node now */
- set_irn_n(irn, 0, noreg);
- set_irn_n(irn, 1, noreg);
- set_ia32_op_type(irn, ia32_Normal);
+ /* turn back address mode */
+ same_cls = arch_register_get_class(same_reg);
+ mem = get_irn_n(irn, n_ia32_mem);
+ assert(get_irn_mode(mem) == mode_M);
+ if (same_cls == &ia32_reg_classes[CLASS_ia32_gp]) {
+ load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
+ pnres = pn_ia32_Load_res;
+ pnmem = pn_ia32_Load_M;
+ proj_mode = mode_Iu;
+ } else if (same_cls == &ia32_reg_classes[CLASS_ia32_xmm]) {
+ load = new_rd_ia32_xLoad(dbgi, irg, block, base, index, mem,
+ get_ia32_ls_mode(irn));
+ pnres = pn_ia32_xLoad_res;
+ pnmem = pn_ia32_xLoad_M;
+ proj_mode = mode_E;
+ } else {
+ panic("cannot turn back address mode for this register class");
+ }
- break;
+ /* copy address mode information to load */
+ set_ia32_op_type(load, ia32_AddrModeS);
+ ia32_copy_am_attrs(load, irn);
+
+ /* insert the load into schedule */
+ sched_add_before(irn, load);
+
+ DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
+
+ load_res = new_r_Proj(cg->irg, block, load, proj_mode, pnres);
+ arch_set_irn_register(cg->arch_env, load_res, out_reg);
+
+ /* set the new input operand */
+ set_irn_n(irn, n_ia32_binary_right, load_res);
+ if(get_irn_mode(irn) == mode_T) {
+ const ir_edge_t *edge, *next;
+ foreach_out_edge_safe(irn, edge, next) {
+ ir_node *node = get_edge_src_irn(edge);
+ int pn = get_Proj_proj(node);
+ if (pn == pn_ia32_res) {
+ exchange(node, irn);
+ } else if (pn == pn_ia32_mem) {
+ set_Proj_pred(node, load);
+ set_Proj_proj(node, pnmem);
+ }
+ }
+ set_irn_mode(irn, mode_Iu);
}
+
+ /* this is a normal node now */
+ set_irn_n(irn, n_ia32_base, noreg);
+ set_irn_n(irn, n_ia32_index, noreg);
+ set_ia32_op_type(irn, ia32_Normal);
+ break;
}
}
}
next = sched_next(irn);
/* check if there is a sub which need to be transformed */
- ia32_transform_sub_to_neg_add(irn, cg);
-
- /* transform a LEA into an Add if possible */
- ia32_transform_lea_to_add_or_shl(irn, cg);
+ if (is_ia32_Sub(irn) || is_ia32_xSub(irn)) {
+ ia32_transform_sub_to_neg_add(irn, cg);
+ }
}
/* second: insert copies and finish irg */
for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
next = sched_next(irn);
- assure_should_be_same_requirements(cg, irn);
+ if (is_ia32_irn(irn)) {
+ /* some nodes are just a bit less efficient, but need no fixing if the
+ * should be same requirement is not fulfilled */
+ if (need_constraint_copy(irn))
+ assure_should_be_same_requirements(cg, irn);
+ }
}
}