X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbelower.c;h=5354b7f6a0008b9446d0a67d3a29b60e0f8416be;hb=945c6c2ceebef5e41c0486c31f49d2319cacb3da;hp=ab2559f5fddee99422dcdd02101284aa67125fb2;hpb=e0ab158779e10972a72ae7ad76de9af72dde3e24;p=libfirm diff --git a/ir/be/belower.c b/ir/be/belower.c index ab2559f5f..5354b7f6a 100644 --- a/ir/be/belower.c +++ b/ir/be/belower.c @@ -1,28 +1,14 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. - * * This file is part of libFirm. - * - * This file may be distributed and/or modified under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation and appearing in the file LICENSE.GPL included in the - * packaging of this file. - * - * Licensees holding valid libFirm Professional Edition licenses may use - * this file in accordance with the libFirm Commercial License. - * Agreement provided with the Software. - * - * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE - * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE. + * Copyright (C) 2012 University of Karlsruhe. */ /** * @file - * @brief Performs lowering of perm nodes. Inserts copies to assure register constraints. + * @brief Performs lowering of perm nodes. Inserts copies to assure + * register constraints. * @author Christian Wuerdig * @date 14.12.2005 - * @version $Id$ */ #include "config.h" @@ -32,16 +18,17 @@ #include "debug.h" #include "xmalloc.h" #include "irnodeset.h" -#include "irnodemap.h" +#include "irnodehashmap.h" #include "irgmod.h" #include "iredges_t.h" #include "irgwalk.h" #include "array_t.h" -#include "bearch_t.h" +#include "bearch.h" +#include "beirg.h" #include "belower.h" -#include "benode_t.h" -#include "besched_t.h" +#include "benode.h" +#include "besched.h" #include "bestat.h" #include "bessaconstr.h" #include "beintlive_t.h" @@ -52,7 +39,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg;) DEBUG_ONLY(static firm_dbg_module_t *dbg_constr;) DEBUG_ONLY(static firm_dbg_module_t *dbg_permmove;) -/** Associates an ir_node with it's copy and CopyKeep. */ +/** Associates an ir_node with its copy and CopyKeep. */ typedef struct { ir_nodeset_t copies; /**< all non-spillable copies of this irn */ const arch_register_class_t *cls; @@ -60,19 +47,19 @@ typedef struct { /** Environment for constraints. */ typedef struct { - be_irg_t *birg; - ir_nodemap_t op_set; - struct obstack obst; + ir_graph *irg; + ir_nodehashmap_t op_set; + struct obstack obst; } constraint_env_t; /** Lowering walker environment. */ -typedef struct _lower_env_t { - be_irg_t *birg; - unsigned do_copy : 1; +typedef struct lower_env_t { + ir_graph *irg; + unsigned do_copy : 1; } lower_env_t; /** Holds a Perm register pair. */ -typedef struct _reg_pair_t { +typedef struct reg_pair_t { const arch_register_t *in_reg; /**< a perm IN register */ ir_node *in_node; /**< the in node to which the register belongs */ @@ -82,15 +69,13 @@ typedef struct _reg_pair_t { int checked; /**< indicates whether the pair was check for cycle or not */ } reg_pair_t; -typedef enum _perm_type_t { +typedef enum perm_type_t { PERM_CYCLE, PERM_CHAIN, - PERM_SWAP, - PERM_COPY } perm_type_t; /** Structure to represent cycles or chains in a Perm. */ -typedef struct _perm_cycle_t { +typedef struct perm_cycle_t { const arch_register_t **elems; /**< the registers in the cycle */ int n_elems; /**< number of elements in the cycle */ perm_type_t type; /**< type (CHAIN or CYCLE) */ @@ -120,7 +105,8 @@ static int get_n_unchecked_pairs(reg_pair_t const *const pairs, int const n) * @param reg The register to look for * @return The corresponding node or NULL if not found */ -static ir_node *get_node_for_in_register(reg_pair_t *pairs, int n, const arch_register_t *reg) { +static ir_node *get_node_for_in_register(reg_pair_t *pairs, int n, const arch_register_t *reg) +{ int i; for (i = 0; i < n; i++) { @@ -142,7 +128,8 @@ static ir_node *get_node_for_in_register(reg_pair_t *pairs, int n, const arch_re * @param reg The register to look for * @return The corresponding node or NULL if not found */ -static ir_node *get_node_for_out_register(reg_pair_t *pairs, int n, const arch_register_t *reg) { +static ir_node *get_node_for_out_register(reg_pair_t *pairs, int n, const arch_register_t *reg) +{ int i; for (i = 0; i < n; i++) { @@ -164,7 +151,8 @@ static ir_node *get_node_for_out_register(reg_pair_t *pairs, int n, const arch_r * * @return The corresponding index in pairs or -1 if not found */ -static int get_pairidx_for_in_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) { +static int get_pairidx_for_in_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) +{ int i; for (i = 0; i < n; i++) { @@ -185,7 +173,8 @@ static int get_pairidx_for_in_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) * * @return The corresponding index in pairs or -1 if not found */ -static int get_pairidx_for_out_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) { +static int get_pairidx_for_out_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) +{ int i; for (i = 0; i < n; i++) { @@ -288,18 +277,15 @@ static void get_perm_cycle(perm_cycle_t *const cycle, static void lower_perm_node(ir_node *irn, lower_env_t *env) { const arch_register_class_t *const reg_class = arch_get_irn_register(get_irn_n(irn, 0))->reg_class; - ir_graph *const irg = get_irn_irg(irn); ir_node *const block = get_nodes_block(irn); int const arity = get_irn_arity(irn); - reg_pair_t *const pairs = alloca(arity * sizeof(pairs[0])); + reg_pair_t *const pairs = ALLOCAN(reg_pair_t, arity); int keep_perm = 0; int do_copy = env->do_copy; /* Get the schedule predecessor node to the perm. * NOTE: This works with auto-magic. If we insert the new copy/exchange * nodes after this node, everything should be ok. */ ir_node * sched_point = sched_prev(irn); - const ir_edge_t * edge; - const ir_edge_t * next; int n; int i; @@ -310,7 +296,7 @@ static void lower_perm_node(ir_node *irn, lower_env_t *env) /* build the list of register pairs (in, out) */ n = 0; - foreach_out_edge_safe(irn, edge, next) { + foreach_out_edge_safe(irn, edge) { ir_node *const out = get_edge_src_irn(edge); long const pn = get_Proj_proj(out); ir_node *const in = get_irn_n(irn, pn); @@ -327,9 +313,9 @@ static void lower_perm_node(ir_node *irn, lower_env_t *env) pair = &pairs[n++]; pair->in_node = in; - pair->in_reg = arch_get_irn_register(in); + pair->in_reg = in_reg; pair->out_node = out; - pair->out_reg = arch_get_irn_register(out); + pair->out_reg = out_reg; pair->checked = 0; } @@ -380,9 +366,9 @@ static void lower_perm_node(ir_node *irn, lower_env_t *env) * IN_2 = in node with register i + 1 * OUT_1 = out node with register i + 1 * OUT_2 = out node with register i */ + ir_node *cpyxchg; if (cycle.type == PERM_CYCLE && !do_copy) { ir_node *in[2]; - ir_node *cpyxchg; in[0] = arg1; in[1] = arg2; @@ -418,14 +404,14 @@ static void lower_perm_node(ir_node *irn, lower_env_t *env) DBG((dbg, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n", irn, res1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name)); - cpyxchg = be_new_Perm(reg_class, irg, block, 2, in); + cpyxchg = be_new_Perm(reg_class, block, 2, in); if (i > 0) { /* cycle is not done yet */ int pidx = get_pairidx_for_in_regidx(pairs, n, cycle.elems[i]->index); /* create intermediate proj */ - res1 = new_r_Proj(irg, block, cpyxchg, get_irn_mode(res1), 0); + res1 = new_r_Proj(cpyxchg, get_irn_mode(res1), 0); /* set as in for next Perm */ pairs[pidx].in_node = res1; @@ -439,31 +425,23 @@ static void lower_perm_node(ir_node *irn, lower_env_t *env) arch_set_irn_register(res2, cycle.elems[i + 1]); arch_set_irn_register(res1, cycle.elems[i]); - /* insert the copy/exchange node in schedule after the magic schedule node (see above) */ - sched_add_after(sched_point, cpyxchg); - - DBG((dbg, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point)); - - /* set the new scheduling point */ - sched_point = res1; + DB((dbg, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point)); } else { - ir_node *cpyxchg; - - DBG((dbg, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n", + DB((dbg, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n", irn, arg1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name)); - cpyxchg = be_new_Copy(reg_class, irg, block, arg1); + cpyxchg = be_new_Copy(block, arg1); arch_set_irn_register(cpyxchg, cycle.elems[i + 1]); /* exchange copy node and proj */ exchange(res2, cpyxchg); + } - /* insert the copy/exchange node in schedule after the magic schedule node (see above) */ - sched_add_after(sched_point, cpyxchg); + /* insert the copy/exchange node in schedule after the magic schedule node (see above) */ + sched_add_after(sched_point, cpyxchg); - /* set the new scheduling point */ - sched_point = cpyxchg; - } + /* set the new scheduling point */ + sched_point = cpyxchg; } } @@ -479,7 +457,8 @@ static void lower_perm_node(ir_node *irn, lower_env_t *env) -static int has_irn_users(const ir_node *irn) { +static int has_irn_users(const ir_node *irn) +{ return get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL) != 0; } @@ -496,24 +475,24 @@ static ir_node *find_copy(ir_node *irn, ir_node *op) } } -static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) { - ir_graph *irg; - ir_nodemap_t *op_set; +static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) +{ + ir_nodehashmap_t *op_set; ir_node *block; const arch_register_class_t *cls; ir_node *keep, *cpy; op_copy_assoc_t *entry; - if (arch_irn_is(other_different, ignore) || + arch_register_req_t const *const req = arch_get_irn_register_req(other_different); + if (arch_register_req_is(req, ignore) || !mode_is_datab(get_irn_mode(other_different))) { - DBG((dbg_constr, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn)); + DB((dbg_constr, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn)); return; } - irg = be_get_birg_irg(env->birg); op_set = &env->op_set; block = get_nodes_block(irn); - cls = arch_get_irn_reg_class(other_different, -1); + cls = req->cls; /* Make a not spillable copy of the different node */ /* this is needed because the different irn could be */ @@ -523,42 +502,42 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, /* check if already exists such a copy in the schedule immediately before */ cpy = find_copy(skip_Proj(irn), other_different); if (! cpy) { - cpy = be_new_Copy(cls, irg, block, other_different); - be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill); - DBG((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different)); + cpy = be_new_Copy(block, other_different); + arch_set_irn_flags(cpy, arch_irn_flags_dont_spill); + DB((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different)); } else { - DBG((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different)); + DB((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different)); } /* Add the Keep resp. CopyKeep and reroute the users */ /* of the other_different irn in case of CopyKeep. */ if (has_irn_users(other_different)) { - keep = be_new_CopyKeep_single(cls, irg, block, cpy, irn, get_irn_mode(other_different)); - be_node_set_reg_class(keep, 1, cls); + keep = be_new_CopyKeep_single(block, cpy, irn); + be_node_set_reg_class_in(keep, 1, cls); } else { ir_node *in[2]; in[0] = irn; in[1] = cpy; - keep = be_new_Keep(cls, irg, block, 2, in); + keep = be_new_Keep(block, 2, in); } - DBG((dbg_constr, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy)); + DB((dbg_constr, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy)); /* insert copy and keep into schedule */ assert(sched_is_scheduled(irn) && "need schedule to assure constraints"); if (! sched_is_scheduled(cpy)) sched_add_before(skip_Proj(irn), cpy); - sched_add_after(irn, keep); + sched_add_after(skip_Proj(irn), keep); - /* insert the other different and it's copies into the map */ - entry = ir_nodemap_get(op_set, other_different); + /* insert the other different and its copies into the map */ + entry = ir_nodehashmap_get(op_copy_assoc_t, op_set, other_different); if (! entry) { - entry = obstack_alloc(&env->obst, sizeof(*entry)); + entry = OALLOC(&env->obst, op_copy_assoc_t); entry->cls = cls; ir_nodeset_init(&entry->copies); - ir_nodemap_insert(op_set, other_different, entry); + ir_nodehashmap_insert(op_set, other_different, entry); } /* insert copy */ @@ -577,8 +556,9 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, * @param skipped_irn if irn is a Proj node, its predecessor, else irn * @param env the constraint environment */ -static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env) { - const arch_register_req_t *req = arch_get_register_req(irn, -1); +static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env) +{ + const arch_register_req_t *req = arch_get_irn_register_req(irn); if (arch_register_req_is(req, must_be_different)) { const unsigned other = req->other_different; @@ -615,25 +595,15 @@ static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, con * @param block The block to be checked * @param walk_env The walker environment */ -static void assure_constraints_walker(ir_node *block, void *walk_env) { - ir_node *irn; +static void assure_constraints_walker(ir_node *block, void *walk_env) +{ + constraint_env_t *env = (constraint_env_t*)walk_env; sched_foreach_reverse(block, irn) { - ir_mode *mode = get_irn_mode(irn); - - if (mode == mode_T) { - const ir_edge_t *edge; - - foreach_out_edge(irn, edge) { - ir_node *proj = get_edge_src_irn(edge); - - mode = get_irn_mode(proj); - if (mode_is_datab(mode)) - assure_different_constraints(proj, irn, walk_env); - } - } else if (mode_is_datab(mode)) { - assure_different_constraints(irn, irn, walk_env); - } + be_foreach_value(irn, value, + if (mode_is_datab(get_irn_mode(value))) + assure_different_constraints(value, irn, env); + ); } } @@ -641,19 +611,16 @@ static void assure_constraints_walker(ir_node *block, void *walk_env) { * Melt all copykeeps pointing to the same node * (or Projs of the same node), copying the same operand. */ -static void melt_copykeeps(constraint_env_t *cenv) { - be_irg_t *birg = cenv->birg; - ir_graph *irg = be_get_birg_irg(birg); - ir_nodemap_iterator_t map_iter; - ir_nodemap_entry_t map_entry; +static void melt_copykeeps(constraint_env_t *cenv) +{ + ir_nodehashmap_iterator_t map_iter; + ir_nodehashmap_entry_t map_entry; /* for all */ - foreach_ir_nodemap(&cenv->op_set, map_entry, map_iter) { - op_copy_assoc_t *entry = map_entry.data; + foreach_ir_nodehashmap(&cenv->op_set, map_entry, map_iter) { + op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data; int idx, num_ck; - ir_node *cp; struct obstack obst; - ir_nodeset_iterator_t iter; ir_node **ck_arr, **melt_arr; obstack_init(&obst); @@ -681,7 +648,6 @@ static void melt_copykeeps(constraint_env_t *cenv) { if (ck_arr[idx]) { int j, n_melt; ir_node **new_ck_in; - ir_node *new_ck; ir_node *sched_pt = NULL; n_melt = 1; @@ -689,7 +655,7 @@ static void melt_copykeeps(constraint_env_t *cenv) { ref_mode_T = skip_Proj(get_irn_n(ref, 1)); obstack_grow(&obst, &ref, sizeof(ref)); - DBG((dbg_constr, LEVEL_1, "Trying to melt %+F:\n", ref)); + DB((dbg_constr, LEVEL_1, "Trying to melt %+F:\n", ref)); /* check for copykeeps pointing to the same mode_T node as the reference copykeep */ for (j = 0; j < num_ck; ++j) { @@ -698,7 +664,7 @@ static void melt_copykeeps(constraint_env_t *cenv) { if (j != idx && cur_ck && skip_Proj(get_irn_n(cur_ck, 1)) == ref_mode_T) { obstack_grow(&obst, &cur_ck, sizeof(cur_ck)); ir_nodeset_remove(&entry->copies, cur_ck); - DBG((dbg_constr, LEVEL_1, "\t%+F\n", cur_ck)); + DB((dbg_constr, LEVEL_1, "\t%+F\n", cur_ck)); ck_arr[j] = NULL; ++n_melt; sched_remove(cur_ck); @@ -708,7 +674,7 @@ static void melt_copykeeps(constraint_env_t *cenv) { /* check, if we found some candidates for melting */ if (n_melt == 1) { - DBG((dbg_constr, LEVEL_1, "\tno candidate found\n")); + DB((dbg_constr, LEVEL_1, "\tno candidate found\n")); continue; } @@ -727,16 +693,14 @@ static void melt_copykeeps(constraint_env_t *cenv) { kill_node(melt_arr[j]); } + ir_node *const new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in); #ifdef KEEP_ALIVE_COPYKEEP_HACK - new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY); keep_alive(new_ck); -#else - new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref)); #endif /* KEEP_ALIVE_COPYKEEP_HACK */ /* set register class for all kept inputs */ for (j = 1; j <= n_melt; ++j) - be_node_set_reg_class(new_ck, j, entry->cls); + be_node_set_reg_class_in(new_ck, j, entry->cls); ir_nodeset_insert(&entry->copies, new_ck); @@ -748,7 +712,7 @@ static void melt_copykeeps(constraint_env_t *cenv) { } while (be_is_Keep(sched_pt) || be_is_CopyKeep(sched_pt)); sched_add_before(sched_pt, new_ck); - DBG((dbg_constr, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt)); + DB((dbg_constr, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt)); /* finally: kill the reference copykeep */ kill_node(ref); @@ -759,22 +723,16 @@ static void melt_copykeeps(constraint_env_t *cenv) { } } -/** - * Walks over all nodes to assure register constraints. - * - * @param birg The birg structure containing the irg - */ -void assure_constraints(be_irg_t *birg) { - ir_graph *irg = be_get_birg_irg(birg); - constraint_env_t cenv; - ir_node **nodes; - ir_nodemap_iterator_t map_iter; - ir_nodemap_entry_t map_entry; +void assure_constraints(ir_graph *irg) +{ + constraint_env_t cenv; + ir_nodehashmap_iterator_t map_iter; + ir_nodehashmap_entry_t map_entry; FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr"); - cenv.birg = birg; - ir_nodemap_init(&cenv.op_set); + cenv.irg = irg; + ir_nodehashmap_init(&cenv.op_set); obstack_init(&cenv.obst); irg_block_walk_graph(irg, NULL, assure_constraints_walker, &cenv); @@ -785,16 +743,12 @@ void assure_constraints(be_irg_t *birg) { melt_copykeeps(&cenv); /* for all */ - foreach_ir_nodemap(&cenv.op_set, map_entry, map_iter) { - op_copy_assoc_t *entry = map_entry.data; - int n; - ir_node *cp; - ir_nodeset_iterator_t iter; + foreach_ir_nodehashmap(&cenv.op_set, map_entry, map_iter) { + op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data; + size_t n = ir_nodeset_size(&entry->copies); + ir_node **nodes = ALLOCAN(ir_node*, n); be_ssa_construction_env_t senv; - n = ir_nodeset_size(&entry->copies); - nodes = alloca(n * sizeof(nodes[0])); - /* put the node in an array */ DBG((dbg_constr, LEVEL_1, "introduce copies for %+F ", map_entry.node)); @@ -807,8 +761,8 @@ void assure_constraints(be_irg_t *birg) { DB((dbg_constr, LEVEL_1, "\n")); - /* introduce the copies for the operand and it's copies */ - be_ssa_construction_init(&senv, birg); + /* introduce the copies for the operand and its copies */ + be_ssa_construction_init(&senv, irg); be_ssa_construction_add_copy(&senv, map_entry.node); be_ssa_construction_add_copies(&senv, nodes, n); be_ssa_construction_fix_users(&senv, map_entry.node); @@ -818,15 +772,13 @@ void assure_constraints(be_irg_t *birg) { /* so we transform unnecessary ones into Keeps. */ foreach_ir_nodeset(&entry->copies, cp, iter) { if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) { + int n = get_irn_arity(cp); ir_node *keep; - int n = get_irn_arity(cp); - keep = be_new_Keep(arch_get_irn_reg_class(cp, -1), - irg, get_nodes_block(cp), n, get_irn_in(cp) + 1); - sched_add_before(cp, keep); + keep = be_new_Keep(get_nodes_block(cp), n, get_irn_in(cp) + 1); + sched_replace(cp, keep); /* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */ - sched_remove(cp); kill_node(cp); } } @@ -834,12 +786,11 @@ void assure_constraints(be_irg_t *birg) { ir_nodeset_destroy(&entry->copies); } - ir_nodemap_destroy(&cenv.op_set); + ir_nodehashmap_destroy(&cenv.op_set); obstack_free(&cenv.obst, NULL); - be_liveness_invalidate(be_get_birg_liveness(birg)); + be_invalidate_live_sets(irg); } - /** * Push nodes that do not need to be permed through the Perm. * This is commonly a reload cascade at block ends. @@ -853,11 +804,10 @@ void assure_constraints(be_irg_t *birg) { * @return 1, if there is something left to perm over. * 0, if removed the complete perm. */ -static int push_through_perm(ir_node *perm, lower_env_t *env) +static int push_through_perm(ir_node *perm) { ir_graph *irg = get_irn_irg(perm); ir_node *bl = get_nodes_block(perm); - ir_node *node; int arity = get_irn_arity(perm); int *map; int *proj_map; @@ -865,19 +815,15 @@ static int push_through_perm(ir_node *perm, lower_env_t *env) int n_moved; int new_size; ir_node *frontier = bl; - int i, n; - const ir_edge_t *edge; - ir_node *one_proj = NULL, *irn; - const arch_register_class_t *cls = NULL; - - DBG((dbg_permmove, LEVEL_1, "perm move %+F irg %+F\n", perm, irg)); + be_lv_t *lv = be_get_irg_liveness(irg); /* get some Proj and find out the register class of that Proj. */ - edge = get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL); - one_proj = get_edge_src_irn(edge); + ir_node *one_proj = get_edge_src_irn(get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL)); + const arch_register_class_t *cls = arch_get_irn_reg_class(one_proj); assert(is_Proj(one_proj)); - cls = arch_get_irn_reg_class(one_proj, -1); + + DB((dbg_permmove, LEVEL_1, "perm move %+F irg %+F\n", perm, irg)); /* Find the point in the schedule after which the * potentially movable nodes must be defined. @@ -887,26 +833,27 @@ static int push_through_perm(ir_node *perm, lower_env_t *env) * the former dead operand would be live now at the point of * the Perm, increasing the register pressure by one. */ - sched_foreach_reverse_from(sched_prev(perm), irn) { - for (i = get_irn_arity(irn) - 1; i >= 0; --i) { - ir_node *op = get_irn_n(irn, i); - if (arch_irn_consider_in_reg_alloc(cls, op) && - !values_interfere(env->birg, op, one_proj)) { + sched_foreach_reverse_before(perm, irn) { + be_foreach_use(irn, cls, in_req_, op, op_req_, + if (!be_values_interfere(lv, op, one_proj)) { frontier = irn; goto found_front; } - } + ); } found_front: - DBG((dbg_permmove, LEVEL_2, "\tfrontier: %+F\n", frontier)); + DB((dbg_permmove, LEVEL_2, "\tfrontier: %+F\n", frontier)); - node = sched_prev(perm); n_moved = 0; - while (!sched_is_begin(node)) { + for (;;) { + ir_node *const node = sched_prev(perm); + if (node == frontier) + break; + const arch_register_req_t *req; int input = -1; - ir_node *proj; + ir_node *proj = NULL; /* search if node is a INPUT of Perm */ foreach_out_edge(perm, edge) { @@ -922,16 +869,9 @@ found_front: /* it wasn't an input to the perm, we can't do anything more */ if (input < 0) break; - if (!sched_comes_after(frontier, node)) - break; if (arch_irn_is(node, modify_flags)) break; - if (is_Proj(node)) { - req = arch_get_register_req(get_Proj_pred(node), - -1 - get_Proj_proj(node)); - } else { - req = arch_get_register_req(node, -1); - } + req = arch_get_irn_register_req(node); if (req->type != arch_register_req_type_normal) break; for (i = get_irn_arity(node) - 1; i >= 0; --i) { @@ -953,29 +893,25 @@ found_front: arch_set_irn_register(node, arch_get_irn_register(proj)); /* reroute all users of the proj to the moved node. */ - edges_reroute(proj, node, irg); - - /* and kill it */ - set_Proj_pred(proj, new_Bad()); - kill_node(proj); + exchange(proj, node); bitset_set(moved, input); n_moved++; - - node = sched_prev(node); } /* well, we could not push anything through the perm */ - if(n_moved == 0) + if (n_moved == 0) return 1; new_size = arity - n_moved; - if(new_size == 0) { + if (new_size == 0) { + sched_remove(perm); + kill_node(perm); return 0; } - map = alloca(new_size * sizeof(map[0])); - proj_map = alloca(arity * sizeof(proj_map[0])); + map = ALLOCAN(int, new_size); + proj_map = ALLOCAN(int, arity); memset(proj_map, -1, sizeof(proj_map[0])); n = 0; for (i = 0; i < arity; ++i) { @@ -1011,31 +947,23 @@ static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) if (!be_is_Perm(irn)) return; - perm_stayed = push_through_perm(irn, walk_env); + perm_stayed = push_through_perm(irn); if (perm_stayed) - lower_perm_node(irn, walk_env); + lower_perm_node(irn, (lower_env_t*)walk_env); } -/** - * Walks over all blocks in an irg and performs lowering need to be - * done after register allocation (e.g. perm lowering). - * - * @param birg The birg object - * @param do_copy 1 == resolve cycles with a free reg if available - */ -void lower_nodes_after_ra(be_irg_t *birg, int do_copy) { +void lower_nodes_after_ra(ir_graph *irg, int do_copy) +{ lower_env_t env; - ir_graph *irg; FIRM_DBG_REGISTER(dbg, "firm.be.lower"); FIRM_DBG_REGISTER(dbg_permmove, "firm.be.lower.permmove"); - env.birg = birg; + env.irg = irg; env.do_copy = do_copy; /* we will need interference */ - be_liveness_assure_chk(be_get_birg_liveness(birg)); + be_assure_live_chk(irg); - irg = be_get_birg_irg(birg); irg_walk_graph(irg, NULL, lower_nodes_after_ra_walker, &env); }