X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbelower.c;h=83b60f6b9e209cf285f73b8dce06b165d8c02ee2;hb=24d4d90a8a18fd1e2157787efc06fb3c9c7f2380;hp=dc93e6b57bed2183d1f9ad92f0fcd58a542f1cbd;hpb=15ad7ccd8dff64e1808e1d093d4a8d7cda5af33e;p=libfirm diff --git a/ir/be/belower.c b/ir/be/belower.c index dc93e6b57..83b60f6b9 100644 --- a/ir/be/belower.c +++ b/ir/be/belower.c @@ -19,10 +19,10 @@ /** * @file - * @brief Performs lowering of perm nodes. Inserts copies to assure register constraints. + * @brief Performs lowering of perm nodes. Inserts copies to assure + * register constraints. * @author Christian Wuerdig * @date 14.12.2005 - * @version $Id$ */ #include "config.h" @@ -32,7 +32,7 @@ #include "debug.h" #include "xmalloc.h" #include "irnodeset.h" -#include "irnodemap.h" +#include "irnodehashmap.h" #include "irgmod.h" #include "iredges_t.h" #include "irgwalk.h" @@ -60,9 +60,9 @@ typedef struct { /** Environment for constraints. */ typedef struct { - ir_graph *irg; - ir_nodemap_t op_set; - struct obstack obst; + ir_graph *irg; + ir_nodehashmap_t op_set; + struct obstack obst; } constraint_env_t; /** Lowering walker environment. */ @@ -455,7 +455,7 @@ static void lower_perm_node(ir_node *irn, lower_env_t *env) DB((dbg, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n", irn, arg1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name)); - cpyxchg = be_new_Copy(reg_class, block, arg1); + cpyxchg = be_new_Copy(block, arg1); arch_set_irn_register(cpyxchg, cycle.elems[i + 1]); /* exchange copy node and proj */ @@ -502,7 +502,7 @@ static ir_node *find_copy(ir_node *irn, ir_node *op) static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) { - ir_nodemap_t *op_set; + ir_nodehashmap_t *op_set; ir_node *block; const arch_register_class_t *cls; ir_node *keep, *cpy; @@ -516,7 +516,7 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, op_set = &env->op_set; block = get_nodes_block(irn); - cls = arch_get_irn_reg_class_out(other_different); + cls = arch_get_irn_reg_class(other_different); /* Make a not spillable copy of the different node */ /* this is needed because the different irn could be */ @@ -526,8 +526,8 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, /* check if already exists such a copy in the schedule immediately before */ cpy = find_copy(skip_Proj(irn), other_different); if (! cpy) { - cpy = be_new_Copy(cls, block, other_different); - arch_irn_set_flags(cpy, arch_irn_flags_dont_spill); + cpy = be_new_Copy(block, other_different); + arch_set_irn_flags(cpy, arch_irn_flags_dont_spill); DB((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different)); } else { DB((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different)); @@ -536,7 +536,7 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, /* Add the Keep resp. CopyKeep and reroute the users */ /* of the other_different irn in case of CopyKeep. */ if (has_irn_users(other_different)) { - keep = be_new_CopyKeep_single(cls, block, cpy, irn, get_irn_mode(other_different)); + keep = be_new_CopyKeep_single(block, cpy, irn); be_node_set_reg_class_in(keep, 1, cls); } else { ir_node *in[2]; @@ -555,13 +555,13 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, sched_add_after(skip_Proj(irn), keep); /* insert the other different and its copies into the map */ - entry = (op_copy_assoc_t*)ir_nodemap_get(op_set, other_different); + entry = (op_copy_assoc_t*)ir_nodehashmap_get(op_set, other_different); if (! entry) { entry = OALLOC(&env->obst, op_copy_assoc_t); entry->cls = cls; ir_nodeset_init(&entry->copies); - ir_nodemap_insert(op_set, other_different, entry); + ir_nodehashmap_insert(op_set, other_different, entry); } /* insert copy */ @@ -582,7 +582,7 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, */ static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env) { - const arch_register_req_t *req = arch_get_register_req_out(irn); + const arch_register_req_t *req = arch_get_irn_register_req(irn); if (arch_register_req_is(req, must_be_different)) { const unsigned other = req->other_different; @@ -649,11 +649,11 @@ static void assure_constraints_walker(ir_node *block, void *walk_env) */ static void melt_copykeeps(constraint_env_t *cenv) { - ir_nodemap_iterator_t map_iter; - ir_nodemap_entry_t map_entry; + ir_nodehashmap_iterator_t map_iter; + ir_nodehashmap_entry_t map_entry; /* for all */ - foreach_ir_nodemap(&cenv->op_set, map_entry, map_iter) { + foreach_ir_nodehashmap(&cenv->op_set, map_entry, map_iter) { op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data; int idx, num_ck; ir_node *cp; @@ -733,10 +733,10 @@ static void melt_copykeeps(constraint_env_t *cenv) } #ifdef KEEP_ALIVE_COPYKEEP_HACK - new_ck = be_new_CopyKeep(entry->cls, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY); + new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in); keep_alive(new_ck); #else - new_ck = be_new_CopyKeep(entry->cls, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref)); + new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in); #endif /* KEEP_ALIVE_COPYKEEP_HACK */ /* set register class for all kept inputs */ @@ -766,14 +766,14 @@ static void melt_copykeeps(constraint_env_t *cenv) void assure_constraints(ir_graph *irg) { - constraint_env_t cenv; - ir_nodemap_iterator_t map_iter; - ir_nodemap_entry_t map_entry; + constraint_env_t cenv; + ir_nodehashmap_iterator_t map_iter; + ir_nodehashmap_entry_t map_entry; FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr"); cenv.irg = irg; - ir_nodemap_init(&cenv.op_set); + ir_nodehashmap_init(&cenv.op_set); obstack_init(&cenv.obst); irg_block_walk_graph(irg, NULL, assure_constraints_walker, &cenv); @@ -784,7 +784,7 @@ void assure_constraints(ir_graph *irg) melt_copykeeps(&cenv); /* for all */ - foreach_ir_nodemap(&cenv.op_set, map_entry, map_iter) { + foreach_ir_nodehashmap(&cenv.op_set, map_entry, map_iter) { op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data; size_t n = ir_nodeset_size(&entry->copies); ir_node **nodes = ALLOCAN(ir_node*, n); @@ -830,12 +830,11 @@ void assure_constraints(ir_graph *irg) ir_nodeset_destroy(&entry->copies); } - ir_nodemap_destroy(&cenv.op_set); + ir_nodehashmap_destroy(&cenv.op_set); obstack_free(&cenv.obst, NULL); - be_liveness_invalidate(be_get_irg_liveness(irg)); + be_invalidate_live_sets(irg); } - /** * Push nodes that do not need to be permed through the Perm. * This is commonly a reload cascade at block ends. @@ -867,7 +866,7 @@ static int push_through_perm(ir_node *perm) /* get some Proj and find out the register class of that Proj. */ const ir_edge_t *edge = get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL); ir_node *one_proj = get_edge_src_irn(edge); - const arch_register_class_t *cls = arch_get_irn_reg_class_out(one_proj); + const arch_register_class_t *cls = arch_get_irn_reg_class(one_proj); assert(is_Proj(one_proj)); DB((dbg_permmove, LEVEL_1, "perm move %+F irg %+F\n", perm, irg)); @@ -900,7 +899,7 @@ found_front: while (!sched_is_begin(node)) { const arch_register_req_t *req; int input = -1; - ir_node *proj; + ir_node *proj = NULL; /* search if node is a INPUT of Perm */ foreach_out_edge(perm, edge) { @@ -920,7 +919,7 @@ found_front: break; if (arch_irn_is(node, modify_flags)) break; - req = arch_get_register_req_out(node); + req = arch_get_irn_register_req(node); if (req->type != arch_register_req_type_normal) break; for (i = get_irn_arity(node) - 1; i >= 0; --i) { @@ -1014,7 +1013,7 @@ void lower_nodes_after_ra(ir_graph *irg, int do_copy) env.do_copy = do_copy; /* we will need interference */ - be_liveness_assure_chk(be_get_irg_liveness(irg)); + be_assure_live_chk(irg); irg_walk_graph(irg, NULL, lower_nodes_after_ra_walker, &env); }