X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_finish.c;h=1e62ce5caa6d811a42cf7e49a553cc5fa6aac9bb;hb=b35afc79238e7731b8b3beecc69468cb719735f5;hp=267f384e8dc932e40b6ea19007e0c2d856e79e52;hpb=9d349270994845943db0c2f67a39cb88f21fc1b1;p=libfirm diff --git a/ir/be/ia32/ia32_finish.c b/ir/be/ia32/ia32_finish.c index 267f384e8..1e62ce5ca 100644 --- a/ir/be/ia32/ia32_finish.c +++ b/ir/be/ia32/ia32_finish.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -21,28 +21,26 @@ * @file * @brief This file implements functions to finalize the irg for emit. * @author Christian Wuerdig - * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include "irnode.h" #include "ircons.h" #include "irgmod.h" #include "irgwalk.h" #include "iredges.h" +#include "irprintf.h" #include "pdeq.h" #include "error.h" -#include "../bearch_t.h" -#include "../besched_t.h" -#include "../benode_t.h" +#include "bearch.h" +#include "besched.h" +#include "benode.h" #include "bearch_ia32_t.h" #include "ia32_finish.h" #include "ia32_new_nodes.h" -#include "ia32_map_regs.h" +#include "ia32_common_transform.h" #include "ia32_transform.h" #include "ia32_dbg_stat.h" #include "ia32_optimize.h" @@ -51,473 +49,390 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) /** - * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG. + * Transforms a Sub or xSub into Neg--Add iff OUT_REG != SRC1_REG && OUT_REG == SRC2_REG. * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION. */ -static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { +static void ia32_transform_sub_to_neg_add(ir_node *irn) +{ ir_graph *irg; ir_node *in1, *in2, *noreg, *nomem, *res; ir_node *noreg_fp, *block; - ir_mode *mode = get_irn_mode(irn); - dbg_info *dbg = get_irn_dbg_info(irn); - const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots; - int i, arity; + dbg_info *dbgi; + const arch_register_t *in1_reg, *in2_reg, *out_reg; - /* Return if AM node or not a Sub or xSub */ - if (!(is_ia32_Sub(irn) || is_ia32_xSub(irn)) || get_ia32_op_type(irn) != ia32_Normal) + /* fix_am will solve this for AddressMode variants */ + if (get_ia32_op_type(irn) != ia32_Normal) return; - noreg = ia32_new_NoReg_gp(cg); - noreg_fp = ia32_new_NoReg_fp(cg); - nomem = new_rd_NoMem(cg->irg); - in1 = get_irn_n(irn, 2); - in2 = get_irn_n(irn, 3); - in1_reg = arch_get_irn_register(cg->arch_env, in1); - in2_reg = arch_get_irn_register(cg->arch_env, in2); - out_reg = get_ia32_out_reg(irn, 0); - - irg = cg->irg; - block = get_nodes_block(irn); + irg = get_irn_irg(irn); + noreg = ia32_new_NoReg_gp(irg); + noreg_fp = ia32_new_NoReg_xmm(irg); + nomem = get_irg_no_mem(irg); + in1 = get_irn_n(irn, n_ia32_binary_left); + in2 = get_irn_n(irn, n_ia32_binary_right); + in1_reg = arch_get_irn_register(in1); + in2_reg = arch_get_irn_register(in2); + out_reg = arch_get_irn_register_out(irn, 0); + + if (out_reg == in1_reg) + return; /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */ - if (!REGS_ARE_EQUAL(out_reg, in2_reg)) + if (out_reg != in2_reg) return; + block = get_nodes_block(irn); + dbgi = get_irn_dbg_info(irn); + /* generate the neg src2 */ - if(mode_is_float(mode)) { + if (is_ia32_xSub(irn)) { int size; ir_entity *entity; + ir_mode *op_mode = get_ia32_ls_mode(irn); - res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, in2, noreg_fp, nomem); - size = get_mode_size_bits(mode); + assert(get_irn_mode(irn) != mode_T); + + res = new_bd_ia32_xXor(dbgi, block, noreg, noreg, nomem, in2, noreg_fp); + size = get_mode_size_bits(op_mode); entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); set_ia32_am_sc(res, entity); set_ia32_op_type(res, ia32_AddrModeS); + set_ia32_ls_mode(res, op_mode); + + arch_set_irn_register(res, in2_reg); + + /* add to schedule */ + sched_add_before(irn, res); + + /* generate the add */ + res = new_bd_ia32_xAdd(dbgi, block, noreg, noreg, nomem, res, in1); set_ia32_ls_mode(res, get_ia32_ls_mode(irn)); } else { - res = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, in2, nomem); - } - arch_set_irn_register(cg->arch_env, res, in2_reg); + ir_node *flags_proj = NULL; + ir_node *carry; + + if (get_irn_mode(irn) == mode_T) { + /* collect the Proj uses */ + foreach_out_edge(irn, edge) { + ir_node *proj = get_edge_src_irn(edge); + long pn = get_Proj_proj(proj); + if (pn == pn_ia32_flags) { + assert(flags_proj == NULL); + flags_proj = proj; + break; + } + } + } - /* add to schedule */ - sched_add_before(irn, res); + if (is_ia32_Sbb(irn)) { + /* Feed borrow (in CF) as carry (via CMC) into NOT+ADC. */ + carry = get_irn_n(irn, n_ia32_Sbb_eflags); + carry = new_bd_ia32_Cmc(dbgi, block, carry); + goto carry; + } else if (flags_proj != 0) { + /* + * ARG, the above technique does NOT set the flags right. + * So, we must produce the following code: + * t1 = ~b + * t2 = a + ~b + Carry + * Complement Carry + * + * a + -b = a + (~b + 1) would set the carry flag wrong IFF both a and b are zero. + */ + ir_node *cmc; + ir_node *nnot; + ir_node *adc; + ir_node *adc_flags; + + carry = new_bd_ia32_Stc(dbgi, block); + +carry: + nnot = new_bd_ia32_Not(dbgi, block, in2); + arch_set_irn_register(nnot, in2_reg); + sched_add_before(irn, nnot); + + arch_set_irn_register(carry, &ia32_registers[REG_EFLAGS]); + sched_add_before(irn, carry); + + adc = new_bd_ia32_Adc(dbgi, block, noreg, noreg, nomem, nnot, in1, carry); + arch_set_irn_register(adc, out_reg); + set_ia32_commutative(adc); + + if (flags_proj != NULL) { + set_irn_mode(adc, mode_T); + adc_flags = new_r_Proj(adc, mode_Iu, pn_ia32_Adc_flags); + arch_set_irn_register(adc_flags, &ia32_registers[REG_EFLAGS]); + + cmc = new_bd_ia32_Cmc(dbgi, block, adc_flags); + arch_set_irn_register(cmc, &ia32_registers[REG_EFLAGS]); + sched_add_after(irn, cmc); + exchange(flags_proj, cmc); + } - /* generate the add */ - if (mode_is_float(mode)) { - res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, res, in1, nomem); - set_ia32_am_support(res, ia32_am_Source); - set_ia32_ls_mode(res, get_ia32_ls_mode(irn)); - } - else { - res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, res, in1, nomem); - set_ia32_am_support(res, ia32_am_Full); - set_ia32_commutative(res); - } + res = adc; + } else { + res = new_bd_ia32_Neg(dbgi, block, in2); + arch_set_irn_register(res, in2_reg); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn)); - /* copy register */ - slots = get_ia32_slots(res); - slots[0] = in2_reg; + /* add to schedule */ + sched_add_before(irn, res); - /* exchange the add and the sub */ - edges_reroute(irn, res, irg); + /* generate the add */ + res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, res, in1); + arch_set_irn_register(res, out_reg); + set_ia32_commutative(res); + } + } - /* add to schedule */ + /* exchange the add and the sub */ + edges_reroute(irn, res); sched_add_before(irn, res); + set_irn_mode(res, get_irn_mode(irn)); + + SET_IA32_ORIG_NODE(res, irn); + /* remove the old sub */ sched_remove(irn); - arity = get_irn_arity(irn); - for(i = 0; i < arity; ++i) { - set_irn_n(irn, i, new_Bad()); - } + kill_node(irn); DBG_OPT_SUB2NEGADD(irn, res); } -/** - * Transforms a LEA into an Add or SHL if possible. - * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION. - */ -static void ia32_transform_lea_to_add_or_shl(ir_node *irn, ia32_code_gen_t *cg) { - ia32_am_flavour_t am_flav; - int imm = 0; - dbg_info *dbg = get_irn_dbg_info(irn); - ir_graph *irg; - ir_node *res = NULL; - ir_node *nomem, *noreg, *base, *index, *op1, *op2; - ir_node *block; - int offs = 0; - const arch_register_t *out_reg, *base_reg, *index_reg; - - /* must be a LEA */ - if (! is_ia32_Lea(irn)) - return; +static inline int need_constraint_copy(ir_node *irn) +{ + /* TODO this should be determined from the node specification */ + switch (get_ia32_irn_opcode(irn)) { + case iro_ia32_IMul: { + /* the 3 operand form of IMul needs no constraint copy */ + ir_node *right = get_irn_n(irn, n_ia32_IMul_right); + return !is_ia32_Immediate(right); + } - am_flav = get_ia32_am_flavour(irn); + case iro_ia32_Lea: + case iro_ia32_Conv_I2I: + case iro_ia32_Conv_I2I8Bit: + case iro_ia32_CMovcc: + case iro_ia32_Minus64Bit: + return 0; - /* mustn't have a symconst */ - if (get_ia32_am_sc(irn) != NULL || get_ia32_frame_ent(irn) != NULL) - return; - - if (am_flav == ia32_am_IS) { - tarval *tv; + default: + return 1; + } +} - /* Create a SHL */ - noreg = ia32_new_NoReg_gp(cg); - nomem = new_rd_NoMem(cg->irg); - index = get_irn_n(irn, 1); - index_reg = arch_get_irn_register(cg->arch_env, index); - out_reg = arch_get_irn_register(cg->arch_env, irn); +/** + * Returns the index of the "same" register. + * On the x86, we should have only one. + */ +static int get_first_same(const arch_register_req_t* req) +{ + const unsigned other = req->other_same; + int i; - if (! REGS_ARE_EQUAL(out_reg, index_reg)) - return; + for (i = 0; i < 32; ++i) { + if (other & (1U << i)) return i; + } + panic("same position not found"); +} - /* ok, we can transform it */ - irg = cg->irg; - block = get_nodes_block(irn); +/** + * Insert copies for all ia32 nodes where the should_be_same requirement + * is not fulfilled. + * Transform Sub into Neg -- Add if IN2 == OUT + */ +static void assure_should_be_same_requirements(ir_node *node) +{ + const arch_register_t *out_reg, *in_reg; + int n_res, i; + ir_node *in_node, *block; - res = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, index, noreg, nomem); - offs = get_ia32_am_scale(irn); - tv = new_tarval_from_long(offs, mode_Iu); - set_ia32_Immop_tarval(res, tv); - arch_set_irn_register(cg->arch_env, res, out_reg); - } else { - /* only some LEAs can be transformed to an Add */ - if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_BI) - return; - - noreg = ia32_new_NoReg_gp(cg); - nomem = new_rd_NoMem(cg->irg); - op1 = noreg; - op2 = noreg; - base = get_irn_n(irn, 0); - index = get_irn_n(irn, 1); - - if (am_flav & ia32_O) { - offs = get_ia32_am_offs_int(irn); - } + n_res = arch_get_irn_n_outs(node); + block = get_nodes_block(node); - out_reg = arch_get_irn_register(cg->arch_env, irn); - base_reg = arch_get_irn_register(cg->arch_env, base); - index_reg = arch_get_irn_register(cg->arch_env, index); - - irg = cg->irg; - block = get_nodes_block(irn); - - switch(get_ia32_am_flavour(irn)) { - case ia32_am_B: - /* out register must be same as base register */ - if (! REGS_ARE_EQUAL(out_reg, base_reg)) - return; - - op1 = base; - break; - case ia32_am_OB: - /* out register must be same as base register */ - if (! REGS_ARE_EQUAL(out_reg, base_reg)) - return; - - op1 = base; - imm = 1; - break; - case ia32_am_BI: - /* out register must be same as one in register */ - if (REGS_ARE_EQUAL(out_reg, base_reg)) { - op1 = base; - op2 = index; - } - else if (REGS_ARE_EQUAL(out_reg, index_reg)) { - op1 = index; - op2 = base; - } - else { - /* in registers a different from out -> no Add possible */ - return; - } - default: - assert(0); - break; - } + /* check all OUT requirements, if there is a should_be_same */ + for (i = 0; i < n_res; i++) { + int i2, arity; + int same_pos; + ir_node *uses_out_reg; + const arch_register_req_t *req = arch_get_irn_register_req_out(node, i); + int uses_out_reg_pos; - res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem); - arch_set_irn_register(cg->arch_env, res, out_reg); - set_ia32_op_type(res, ia32_Normal); - set_ia32_commutative(res); + if (!arch_register_req_is(req, should_be_same)) + continue; - if (imm) { - tarval *tv = new_tarval_from_long(offs, mode_Iu); - set_ia32_Immop_tarval(res, tv); - } - } + same_pos = get_first_same(req); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn)); + /* get in and out register */ + out_reg = arch_get_irn_register_out(node, i); + in_node = get_irn_n(node, same_pos); + in_reg = arch_get_irn_register(in_node); - /* add new ADD/SHL to schedule */ - sched_add_before(irn, res); + /* requirement already fulfilled? */ + if (in_reg == out_reg) + continue; + assert(in_reg->reg_class == out_reg->reg_class); - DBG_OPT_LEA2ADD(irn, res); + /* check if any other input operands uses the out register */ + arity = get_irn_arity(node); + uses_out_reg = NULL; + uses_out_reg_pos = -1; + for (i2 = 0; i2 < arity; ++i2) { + ir_node *in = get_irn_n(node, i2); + const arch_register_t *other_in_reg; - /* remove the old LEA */ - sched_remove(irn); + if (!mode_is_data(get_irn_mode(in))) + continue; - /* exchange the Add and the LEA */ - exchange(irn, res); -} + other_in_reg = arch_get_irn_register(in); -static INLINE int need_constraint_copy(ir_node *irn) { - return ! is_ia32_Lea(irn) && - ! is_ia32_Conv_I2I(irn) && - ! is_ia32_Conv_I2I8Bit(irn) && - ! is_ia32_CmpCMov(irn) && - ! is_ia32_PsiCondCMov(irn) && - ! is_ia32_CmpSet(irn); -} + if (other_in_reg != out_reg) + continue; -/** - * Insert copies for all ia32 nodes where the should_be_same requirement - * is not fulfilled. - * Transform Sub into Neg -- Add if IN2 == OUT - */ -static void ia32_finish_node(ir_node *irn, void *env) { - ia32_code_gen_t *cg = env; - const arch_register_req_t **reqs; - const arch_register_t *out_reg, *in_reg, *in2_reg; - int n_res, i; - ir_node *copy, *in_node, *block, *in2_node; - ia32_op_type_t op_tp; - - if (is_ia32_irn(irn)) { - /* AM Dest nodes don't produce any values */ - op_tp = get_ia32_op_type(irn); - if (op_tp == ia32_AddrModeD) - goto end; - - reqs = get_ia32_out_req_all(irn); - n_res = get_ia32_n_res(irn); - block = get_nodes_block(irn); - - /* check all OUT requirements, if there is a should_be_same */ - if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn)) - { - for (i = 0; i < n_res; i++) { - if (arch_register_req_is(reqs[i], should_be_same)) { - int same_pos = reqs[i]->other_same; - - /* get in and out register */ - out_reg = get_ia32_out_reg(irn, i); - in_node = get_irn_n(irn, same_pos); - in_reg = arch_get_irn_register(cg->arch_env, in_node); - - /* don't copy ignore nodes */ - if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node)) - continue; - - /* check if in and out register are equal */ - if (! REGS_ARE_EQUAL(out_reg, in_reg)) { - /* in case of a commutative op: just exchange the in's */ - /* beware: the current op could be everything, so test for ia32 */ - /* commutativity first before getting the second in */ - if (is_ia32_commutative(irn)) { - in2_node = get_irn_n(irn, same_pos ^ 1); - in2_reg = arch_get_irn_register(cg->arch_env, in2_node); - - if (REGS_ARE_EQUAL(out_reg, in2_reg)) { - set_irn_n(irn, same_pos, in2_node); - set_irn_n(irn, same_pos ^ 1, in_node); - } - else - goto insert_copy; - } - else { -insert_copy: - DBG((dbg, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, same_pos)); - /* create copy from in register */ - copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node); - - DBG_OPT_2ADDRCPY(copy); - - /* destination is the out register */ - arch_set_irn_register(cg->arch_env, copy, out_reg); - - /* insert copy before the node into the schedule */ - sched_add_before(irn, copy); - - /* set copy as in */ - set_irn_n(irn, same_pos, copy); - } - } - } + if (uses_out_reg != NULL && in != uses_out_reg) { + panic("invalid register allocation"); } + uses_out_reg = in; + if (uses_out_reg_pos >= 0) + uses_out_reg_pos = -1; /* multiple inputs... */ + else + uses_out_reg_pos = i2; } - /* check xCmp: try to avoid unordered cmp */ - if ((is_ia32_xCmp(irn) || is_ia32_xCmpCMov(irn) || is_ia32_xCmpSet(irn)) && - op_tp == ia32_Normal && - ! is_ia32_ImmConst(irn) && ! is_ia32_ImmSymConst(irn)) - { - long pnc = get_ia32_pncode(irn); + /* no-one else is using the out reg, we can simply copy it + * (the register can't be live since the operation will override it + * anyway) */ + if (uses_out_reg == NULL) { + ir_node *copy = be_new_Copy(block, in_node); + DBG_OPT_2ADDRCPY(copy); - if (pnc & pn_Cmp_Uo) { - ir_node *tmp; - int idx1 = 2, idx2 = 3; + /* destination is the out register */ + arch_set_irn_register(copy, out_reg); - if (is_ia32_xCmpCMov(irn)) { - idx1 = 0; - idx2 = 1; - } + /* insert copy before the node into the schedule */ + sched_add_before(node, copy); - tmp = get_irn_n(irn, idx1); - set_irn_n(irn, idx1, get_irn_n(irn, idx2)); - set_irn_n(irn, idx2, tmp); + /* set copy as in */ + set_irn_n(node, same_pos, copy); - set_ia32_pncode(irn, get_negated_pnc(pnc, mode_E)); - } + DBG((dbg, LEVEL_1, + "created copy %+F for should be same argument at input %d of %+F\n", + copy, same_pos, node)); + continue; } + + /* for commutative nodes we can simply swap the left/right */ + if (uses_out_reg_pos == n_ia32_binary_right && is_ia32_commutative(node)) { + ia32_swap_left_right(node); + DBG((dbg, LEVEL_1, + "swapped left/right input of %+F to resolve should be same constraint\n", + node)); + continue; + } + + panic("Unresolved should_be_same constraint"); } -end: ; } /** * Following Problem: * We have a source address mode node with base or index register equal to - * result register. The constraint handler will insert a copy from the - * remaining input operand to the result register -> base or index is - * broken then. + * result register and unfulfilled should_be_same requirement. The constraint + * handler will insert a copy from the remaining input operand to the result + * register -> base or index is broken then. * Solution: Turn back this address mode into explicit Load + Operation. */ -static void fix_am_source(ir_node *irn, void *env) { - ia32_code_gen_t *cg = env; - ir_node *base, *index, *noreg; - const arch_register_t *reg_base, *reg_index; - const arch_register_req_t **reqs; - int n_res, i; +static void fix_am_source(ir_node *irn) +{ + int n_res, i; /* check only ia32 nodes with source address mode */ - if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS) + if (!is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS) return; - /* no need to fix unary operations */ - if (get_irn_arity(irn) == 4) + /* only need to fix binary operations */ + if (get_ia32_am_support(irn) != ia32_am_binary) return; - base = get_irn_n(irn, 0); - index = get_irn_n(irn, 1); - - reg_base = arch_get_irn_register(cg->arch_env, base); - reg_index = arch_get_irn_register(cg->arch_env, index); - reqs = get_ia32_out_req_all(irn); - - noreg = ia32_new_NoReg_gp(cg); - - n_res = get_ia32_n_res(irn); + n_res = arch_get_irn_n_outs(irn); for (i = 0; i < n_res; i++) { - if (arch_register_req_is(reqs[i], should_be_same)) { - /* get in and out register */ - const arch_register_t *out_reg = get_ia32_out_reg(irn, i); - int same_pos = reqs[i]->other_same; - - /* - there is a constraint for the remaining operand - and the result register is equal to base or index register - */ - if (same_pos == 2 && - (REGS_ARE_EQUAL(out_reg, reg_base) || REGS_ARE_EQUAL(out_reg, reg_index))) - { - /* turn back address mode */ - ir_node *in_node = get_irn_n(irn, 2); - const arch_register_t *in_reg = arch_get_irn_register(cg->arch_env, in_node); - ir_node *block = get_nodes_block(irn); - ir_mode *ls_mode = get_ia32_ls_mode(irn); - ir_node *load; - int pnres; - - if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_gp]) { - load = new_rd_ia32_Load(NULL, cg->irg, block, base, index, get_irn_n(irn, 4)); - pnres = pn_ia32_Load_res; - } - else if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_xmm]) { - load = new_rd_ia32_xLoad(NULL, cg->irg, block, base, index, get_irn_n(irn, 4)); - pnres = pn_ia32_xLoad_res; - } - else { - panic("cannot turn back address mode for this register class"); - } - - /* copy address mode information to load */ - set_ia32_ls_mode(load, ls_mode); - set_ia32_am_flavour(load, get_ia32_am_flavour(irn)); - set_ia32_op_type(load, ia32_AddrModeS); - set_ia32_am_support(load, ia32_am_Source); - set_ia32_am_scale(load, get_ia32_am_scale(irn)); - set_ia32_am_sc(load, get_ia32_am_sc(irn)); - add_ia32_am_offs_int(load, get_ia32_am_offs_int(irn)); - set_ia32_frame_ent(load, get_ia32_frame_ent(irn)); - - if (is_ia32_use_frame(irn)) - set_ia32_use_frame(load); - - /* insert the load into schedule */ - sched_add_before(irn, load); - - DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load)); - - load = new_r_Proj(cg->irg, block, load, ls_mode, pnres); - arch_set_irn_register(cg->arch_env, load, out_reg); - - /* insert the load result proj into schedule */ - sched_add_before(irn, load); - - /* set the new input operand */ - set_irn_n(irn, 3, load); - - /* this is a normal node now */ - set_irn_n(irn, 0, noreg); - set_irn_n(irn, 1, noreg); - set_ia32_op_type(irn, ia32_Normal); - - break; - } - } + const arch_register_req_t *req = arch_get_irn_register_req_out(irn, i); + const arch_register_t *out_reg; + int same_pos; + ir_node *same_node; + const arch_register_t *same_reg; + ir_node *load_res; + + if (!arch_register_req_is(req, should_be_same)) + continue; + + /* get in and out register */ + out_reg = arch_get_irn_register_out(irn, i); + same_pos = get_first_same(req); + same_node = get_irn_n(irn, same_pos); + same_reg = arch_get_irn_register(same_node); + + /* should_be same constraint is fullfilled, nothing to do */ + if (out_reg == same_reg) + continue; + + /* we only need to do something if the out reg is the same as base + or index register */ + if (out_reg != arch_get_irn_register(get_irn_n(irn, n_ia32_base)) && + out_reg != arch_get_irn_register(get_irn_n(irn, n_ia32_index))) + continue; + + load_res = ia32_turn_back_am(irn); + arch_set_irn_register(load_res, out_reg); + + DBG((dbg, LEVEL_3, + "irg %+F: build back AM source for node %+F, inserted load %+F\n", + get_irn_irg(irn), irn, get_Proj_pred(load_res))); + break; } } /** * Block walker: finishes a block */ -static void ia32_finish_irg_walker(ir_node *block, void *env) { +static void ia32_finish_irg_walker(ir_node *block, void *env) +{ ir_node *irn, *next; + (void) env; /* first: turn back AM source if necessary */ for (irn = sched_first(block); ! sched_is_end(irn); irn = next) { next = sched_next(irn); - fix_am_source(irn, env); + fix_am_source(irn); } for (irn = sched_first(block); ! sched_is_end(irn); irn = next) { - ia32_code_gen_t *cg = env; - next = sched_next(irn); /* check if there is a sub which need to be transformed */ - ia32_transform_sub_to_neg_add(irn, cg); - - /* transform a LEA into an Add if possible */ - ia32_transform_lea_to_add_or_shl(irn, cg); + if (is_ia32_Sub(irn) || is_ia32_Sbb(irn) || is_ia32_xSub(irn)) { + ia32_transform_sub_to_neg_add(irn); + } } /* second: insert copies and finish irg */ for (irn = sched_first(block); ! sched_is_end(irn); irn = next) { next = sched_next(irn); - ia32_finish_node(irn, env); + if (is_ia32_irn(irn)) { + /* some nodes are just a bit less efficient, but need no fixing if the + * should be same requirement is not fulfilled */ + if (need_constraint_copy(irn)) + assure_should_be_same_requirements(irn); + } } } /** * Block walker: pushes all blocks on a wait queue */ -static void ia32_push_on_queue_walker(ir_node *block, void *env) { - waitq *wq = env; +static void ia32_push_on_queue_walker(ir_node *block, void *env) +{ + waitq *wq = (waitq*)env; waitq_put(wq, block); } @@ -525,15 +440,16 @@ static void ia32_push_on_queue_walker(ir_node *block, void *env) { /** * Add Copy nodes for not fulfilled should_be_equal constraints */ -void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) { +void ia32_finish_irg(ir_graph *irg) +{ waitq *wq = new_waitq(); /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */ irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq); while (! waitq_empty(wq)) { - ir_node *block = waitq_get(wq); - ia32_finish_irg_walker(block, cg); + ir_node *block = (ir_node*)waitq_get(wq); + ia32_finish_irg_walker(block, NULL); } del_waitq(wq); }