X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_finish.c;h=6bbbc64e26b4777e4a5f8533e1ddf96f2a2a981f;hb=bc7b5ee69d084e629590a6977b79a2fab7cd1aa1;hp=41841405156198dfa556a08b08e02ab04f821343;hpb=1a0620299ab1b46b3679369bbf28a02e6b5038ab;p=libfirm diff --git a/ir/be/ia32/ia32_finish.c b/ir/be/ia32/ia32_finish.c index 418414051..6bbbc64e2 100644 --- a/ir/be/ia32/ia32_finish.c +++ b/ir/be/ia32/ia32_finish.c @@ -23,9 +23,7 @@ * @author Christian Wuerdig * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include "irnode.h" #include "ircons.h" @@ -56,7 +54,8 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG. * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION. */ -static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { +static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) +{ ir_graph *irg; ir_node *in1, *in2, *noreg, *nomem, *res; ir_node *noreg_fp, *block; @@ -72,8 +71,8 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { nomem = new_rd_NoMem(cg->irg); in1 = get_irn_n(irn, n_ia32_binary_left); in2 = get_irn_n(irn, n_ia32_binary_right); - in1_reg = arch_get_irn_register(cg->arch_env, in1); - in2_reg = arch_get_irn_register(cg->arch_env, in2); + in1_reg = arch_get_irn_register(in1); + in2_reg = arch_get_irn_register(in2); out_reg = get_ia32_out_reg(irn, 0); irg = cg->irg; @@ -100,7 +99,7 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { set_ia32_op_type(res, ia32_AddrModeS); set_ia32_ls_mode(res, op_mode); - arch_set_irn_register(cg->arch_env, res, in2_reg); + arch_set_irn_register(res, in2_reg); /* add to schedule */ sched_add_before(irn, res); @@ -124,7 +123,7 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { foreach_out_edge(irn, edge) { ir_node *proj = get_edge_src_irn(edge); long pn = get_Proj_proj(proj); - if(pn == pn_ia32_Sub_res) { + if (pn == pn_ia32_Sub_res) { assert(res_proj == NULL); res_proj = proj; } else { @@ -137,14 +136,14 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { if (flags_proj == NULL) { res = new_rd_ia32_Neg(dbg, irg, block, in2); - arch_set_irn_register(cg->arch_env, res, in2_reg); + arch_set_irn_register(res, in2_reg); /* add to schedule */ sched_add_before(irn, res); /* generate the add */ res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, res, in1); - arch_set_irn_register(cg->arch_env, res, out_reg); + arch_set_irn_register(res, out_reg); set_ia32_commutative(res); /* exchange the add and the sub */ @@ -166,27 +165,24 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { * a + -b = a + (~b + 1) would set the carry flag IF a == b ... */ not = new_rd_ia32_Not(dbg, irg, block, in2); - arch_set_irn_register(cg->arch_env, not, in2_reg); + arch_set_irn_register(not, in2_reg); sched_add_before(irn, not); stc = new_rd_ia32_Stc(dbg, irg, block); - arch_set_irn_register(cg->arch_env, stc, - &ia32_flags_regs[REG_EFLAGS]); + arch_set_irn_register(stc, &ia32_flags_regs[REG_EFLAGS]); sched_add_before(irn, stc); adc = new_rd_ia32_Adc(dbg, irg, block, noreg, noreg, nomem, not, in1, stc); - arch_set_irn_register(cg->arch_env, adc, out_reg); + arch_set_irn_register(adc, out_reg); sched_add_before(irn, adc); set_irn_mode(adc, mode_T); adc_flags = new_r_Proj(irg, block, adc, mode_Iu, pn_ia32_Adc_flags); - arch_set_irn_register(cg->arch_env, adc_flags, - &ia32_flags_regs[REG_EFLAGS]); + arch_set_irn_register(adc_flags, &ia32_flags_regs[REG_EFLAGS]); cmc = new_rd_ia32_Cmc(dbg, irg, block, adc_flags); - arch_set_irn_register(cg->arch_env, cmc, - &ia32_flags_regs[REG_EFLAGS]); + arch_set_irn_register(cmc, &ia32_flags_regs[REG_EFLAGS]); sched_add_before(irn, cmc); exchange(flags_proj, cmc); @@ -199,6 +195,8 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { } } + set_irn_mode(res, get_irn_mode(irn)); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn)); /* remove the old sub */ @@ -208,7 +206,7 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { DBG_OPT_SUB2NEGADD(irn, res); } -static INLINE int need_constraint_copy(ir_node *irn) +static inline int need_constraint_copy(ir_node *irn) { /* TODO this should be determined from the node specification */ switch (get_ia32_irn_opcode(irn)) { @@ -254,7 +252,6 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg, ir_node *node) { ir_graph *irg = cg->irg; - const arch_env_t *arch_env = cg->arch_env; const arch_register_req_t **reqs; const arch_register_t *out_reg, *in_reg; int n_res, i; @@ -285,7 +282,7 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg, /* get in and out register */ out_reg = get_ia32_out_reg(node, i); in_node = get_irn_n(node, same_pos); - in_reg = arch_get_irn_register(arch_env, in_node); + in_reg = arch_get_irn_register(in_node); /* requirement already fulfilled? */ if (in_reg == out_reg) @@ -300,23 +297,23 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg, arity = get_irn_arity(node); uses_out_reg = NULL; uses_out_reg_pos = -1; - for(i2 = 0; i2 < arity; ++i2) { + for (i2 = 0; i2 < arity; ++i2) { ir_node *in = get_irn_n(node, i2); const arch_register_t *in_reg; - if(!mode_is_data(get_irn_mode(in))) + if (!mode_is_data(get_irn_mode(in))) continue; - in_reg = arch_get_irn_register(arch_env, in); + in_reg = arch_get_irn_register(in); - if(in_reg != out_reg) + if (in_reg != out_reg) continue; - if(uses_out_reg != NULL && in != uses_out_reg) { + if (uses_out_reg != NULL && in != uses_out_reg) { panic("invalid register allocation"); } uses_out_reg = in; - if(uses_out_reg_pos >= 0) + if (uses_out_reg_pos >= 0) uses_out_reg_pos = -1; /* multiple inputs... */ else uses_out_reg_pos = i2; @@ -325,12 +322,12 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg, /* no-one else is using the out reg, we can simply copy it * (the register can't be live since the operation will override it * anyway) */ - if(uses_out_reg == NULL) { + if (uses_out_reg == NULL) { ir_node *copy = be_new_Copy(cls, irg, block, in_node); DBG_OPT_2ADDRCPY(copy); /* destination is the out register */ - arch_set_irn_register(arch_env, copy, out_reg); + arch_set_irn_register(copy, out_reg); /* insert copy before the node into the schedule */ sched_add_before(node, copy); @@ -366,8 +363,8 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg, perm_proj0 = new_r_Proj(irg, block, perm, get_irn_mode(in[0]), 0); perm_proj1 = new_r_Proj(irg, block, perm, get_irn_mode(in[1]), 1); - arch_set_irn_register(arch_env, perm_proj0, out_reg); - arch_set_irn_register(arch_env, perm_proj1, in_reg); + arch_set_irn_register(perm_proj0, out_reg); + arch_set_irn_register(perm_proj1, in_reg); sched_add_before(node, perm); @@ -376,12 +373,12 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg, perm, same_pos, node, uses_out_reg)); /* use the perm results */ - for(i2 = 0; i2 < arity; ++i2) { + for (i2 = 0; i2 < arity; ++i2) { ir_node *in = get_irn_n(node, i2); - if(in == in_node) { + if (in == in_node) { set_irn_n(node, i2, perm_proj0); - } else if(in == uses_out_reg) { + } else if (in == uses_out_reg) { set_irn_n(node, i2, perm_proj1); } } @@ -396,139 +393,69 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg, * register -> base or index is broken then. * Solution: Turn back this address mode into explicit Load + Operation. */ -static void fix_am_source(ir_node *irn, void *env) +static void fix_am_source(ir_node *irn) { - ia32_code_gen_t *cg = env; - const arch_env_t *arch_env = cg->arch_env; - ir_node *base; - ir_node *index; - ir_node *noreg; - const arch_register_t *reg_base; - const arch_register_t *reg_index; const arch_register_req_t **reqs; int n_res, i; /* check only ia32 nodes with source address mode */ - if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS) + if (!is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS) return; /* only need to fix binary operations */ if (get_ia32_am_support(irn) != ia32_am_binary) return; - base = get_irn_n(irn, n_ia32_base); - index = get_irn_n(irn, n_ia32_index); - - reg_base = arch_get_irn_register(arch_env, base); - reg_index = arch_get_irn_register(arch_env, index); - reqs = get_ia32_out_req_all(irn); - - noreg = ia32_new_NoReg_gp(cg); - + reqs = get_ia32_out_req_all(irn); n_res = get_ia32_n_res(irn); for (i = 0; i < n_res; i++) { - if (arch_register_req_is(reqs[i], should_be_same)) { - /* get in and out register */ - const arch_register_t *out_reg = get_ia32_out_reg(irn, i); - int same_pos = get_first_same(reqs[i]); - ir_node *same_node = get_irn_n(irn, same_pos); - const arch_register_t *same_reg - = arch_get_irn_register(arch_env, same_node); - const arch_register_class_t *same_cls; - ir_graph *irg = cg->irg; - dbg_info *dbgi = get_irn_dbg_info(irn); - ir_node *block = get_nodes_block(irn); - ir_mode *proj_mode; - ir_node *load; - ir_node *load_res; - ir_node *mem; - int pnres; - int pnmem; - - /* should_be same constraint is fullfilled, nothing to do */ - if(out_reg == same_reg) - continue; - - /* we only need to do something if the out reg is the same as base - or index register */ - if (out_reg != reg_base && out_reg != reg_index) - continue; - - /* turn back address mode */ - same_cls = arch_register_get_class(same_reg); - mem = get_irn_n(irn, n_ia32_mem); - assert(get_irn_mode(mem) == mode_M); - if (same_cls == &ia32_reg_classes[CLASS_ia32_gp]) { - load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem); - pnres = pn_ia32_Load_res; - pnmem = pn_ia32_Load_M; - proj_mode = mode_Iu; - } else if (same_cls == &ia32_reg_classes[CLASS_ia32_xmm]) { - load = new_rd_ia32_xLoad(dbgi, irg, block, base, index, mem, - get_ia32_ls_mode(irn)); - pnres = pn_ia32_xLoad_res; - pnmem = pn_ia32_xLoad_M; - proj_mode = mode_E; - } else { - panic("cannot turn back address mode for this register class"); - } + const arch_register_t *out_reg; + int same_pos; + ir_node *same_node; + const arch_register_t *same_reg; + ir_node *load_res; - /* copy address mode information to load */ - set_ia32_op_type(load, ia32_AddrModeS); - ia32_copy_am_attrs(load, irn); - if (is_ia32_is_reload(irn)) - set_ia32_is_reload(load); + if (!arch_register_req_is(reqs[i], should_be_same)) + continue; - /* insert the load into schedule */ - sched_add_before(irn, load); + /* get in and out register */ + out_reg = get_ia32_out_reg(irn, i); + same_pos = get_first_same(reqs[i]); + same_node = get_irn_n(irn, same_pos); + same_reg = arch_get_irn_register(same_node); - DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load)); + /* should_be same constraint is fullfilled, nothing to do */ + if (out_reg == same_reg) + continue; - load_res = new_r_Proj(cg->irg, block, load, proj_mode, pnres); - arch_set_irn_register(cg->arch_env, load_res, out_reg); + /* we only need to do something if the out reg is the same as base + or index register */ + if (out_reg != arch_get_irn_register(get_irn_n(irn, n_ia32_base)) && + out_reg != arch_get_irn_register(get_irn_n(irn, n_ia32_index))) + continue; - /* set the new input operand */ - if (is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) - set_irn_n(irn, n_ia32_binary_left, load_res); - else - set_irn_n(irn, n_ia32_binary_right, load_res); - if (get_irn_mode(irn) == mode_T) { - const ir_edge_t *edge, *next; - foreach_out_edge_safe(irn, edge, next) { - ir_node *node = get_edge_src_irn(edge); - int pn = get_Proj_proj(node); - if (pn == pn_ia32_res) { - exchange(node, irn); - } else if (pn == pn_ia32_mem) { - set_Proj_pred(node, load); - set_Proj_proj(node, pnmem); - } else { - panic("Unexpected Proj"); - } - } - set_irn_mode(irn, mode_Iu); - } + load_res = turn_back_am(irn); + arch_set_irn_register(load_res, out_reg); - /* this is a normal node now */ - set_irn_n(irn, n_ia32_base, noreg); - set_irn_n(irn, n_ia32_index, noreg); - set_ia32_op_type(irn, ia32_Normal); - break; - } + DBG((dbg, LEVEL_3, + "irg %+F: build back AM source for node %+F, inserted load %+F\n", + get_irn_irg(irn), irn, get_Proj_pred(load_res))); + break; } } /** * Block walker: finishes a block */ -static void ia32_finish_irg_walker(ir_node *block, void *env) { +static void ia32_finish_irg_walker(ir_node *block, void *env) +{ ia32_code_gen_t *cg = env; ir_node *irn, *next; /* first: turn back AM source if necessary */ for (irn = sched_first(block); ! sched_is_end(irn); irn = next) { next = sched_next(irn); - fix_am_source(irn, env); + fix_am_source(irn); } for (irn = sched_first(block); ! sched_is_end(irn); irn = next) { @@ -557,7 +484,8 @@ static void ia32_finish_irg_walker(ir_node *block, void *env) { /** * Block walker: pushes all blocks on a wait queue */ -static void ia32_push_on_queue_walker(ir_node *block, void *env) { +static void ia32_push_on_queue_walker(ir_node *block, void *env) +{ waitq *wq = env; waitq_put(wq, block); } @@ -566,7 +494,8 @@ static void ia32_push_on_queue_walker(ir_node *block, void *env) { /** * Add Copy nodes for not fulfilled should_be_equal constraints */ -void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) { +void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) +{ waitq *wq = new_waitq(); /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */