From: Matthias Braun Date: Wed, 7 Jun 2006 15:31:00 +0000 (+0000) Subject: - Don't use a callback for deciding whether to spill phi nodes X-Git-Url: http://nsz.repo.hu/git/?a=commitdiff_plain;h=2c52bb5cb8c25f23843c035fdad1efb80f6417a0;p=libfirm - Don't use a callback for deciding whether to spill phi nodes - Morgan now calls be_spill_phi when spilling phi nodes - no need to use beinsn in morgan analysis phase, simple iterating over in_edges is enough - fixed bug in morgan spiller that marked some values as livethrough unused which were not --- diff --git a/ir/be/bespill.c b/ir/be/bespill.c index bb3ec838d..e1f93bb34 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -58,8 +58,7 @@ struct _spill_env_t { set *spill_ctxs; set *spills; /**< all spill_info_t's, which must be placed */ pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */ - decide_irn_t is_spilled_phi;/**< callback func to decide if a phi needs special spilling */ - void *data; /**< data passed to all callbacks */ + ir_node **copies; /**< set of copies placed because of phi spills */ DEBUG_ONLY(firm_dbg_module_t *dbg;) }; @@ -104,29 +103,26 @@ void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) { ) /* Creates a new spill environment. */ -spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env, decide_irn_t is_spilled_phi, void *data) { +spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env) { spill_env_t *env = xmalloc(sizeof(env[0])); env->spill_ctxs = new_set(cmp_spillctx, 1024); env->spills = new_set(cmp_spillinfo, 1024); env->cls = chordal_env->cls; - env->is_spilled_phi = is_spilled_phi; - env->data = data; env->chordal_env = chordal_env; + env->mem_phis = pset_new_ptr_default(); + env->copies = NEW_ARR_F(ir_node*, 0); obstack_init(&env->obst); return env; } -void be_set_is_spilled_phi(spill_env_t *env, decide_irn_t is_spilled_phi, void *data) { - env->is_spilled_phi = is_spilled_phi; - env->data = data; -} - /* Deletes a spill environment. */ -void be_delete_spill_env(spill_env_t *senv) { - del_set(senv->spill_ctxs); - del_set(senv->spills); - obstack_free(&senv->obst, NULL); - free(senv); +void be_delete_spill_env(spill_env_t *env) { + del_set(env->spill_ctxs); + del_set(env->spills); + del_pset(env->mem_phis); + DEL_ARR_F(env->copies); + obstack_free(&env->obst, NULL); + free(env); } /** @@ -175,9 +171,77 @@ static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) } ctx->spill = be_spill(env->arch_env, irn, ctx_irn); + return ctx->spill; } +/** + * Removes all copies introduced for phi-spills + */ +static void remove_copies(spill_env_t *env) { + int i; + + for(i = 0; i < ARR_LEN(env->copies); ++i) { + ir_node *node = env->copies[i]; + ir_node *src; + const ir_edge_t *edge, *ne; + + assert(be_is_Copy(node)); + + src = be_get_Copy_op(node); + foreach_out_edge_safe(node, edge, ne) { + ir_node *user = get_edge_src_irn(edge); + int user_pos = get_edge_src_pos(edge); + + set_irn_n(user, user_pos, src); + } + } + + ARR_SETLEN(ir_node*, env->copies, 0); +} + +/** + * Inserts a copy (needed for spilled phi handling) of a value at the earliest + * possible location in a block. That is after the last use/def of the value or at + * the beginning of the block if there is no use/def. + */ +static ir_node *insert_copy(spill_env_t *env, ir_node *block, ir_node *value) { + ir_node* node; + ir_graph *irg = get_irn_irg(block); + ir_node *copy = be_new_Copy(env->cls, irg, block, value); + + ARR_APP1(ir_node*, env->copies, copy); + + // walk schedule backwards until we find a use/def, or until we have reached the first phi + // TODO we could also do this by iterating over all uses and checking the + // sched_get_time_step value. Need benchmarks to decide this... + sched_foreach_reverse(block, node) { + int i, arity; + + if(is_Phi(node)) { + sched_add_after(node, copy); + goto placed; + } + if(value == node) { + sched_add_after(node, copy); + goto placed; + } + for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + ir_node *arg = get_irn_n(node, i); + if(arg == value) { + sched_add_after(node, copy); + goto placed; + } + } + } + // we didn't find a use or a phi yet, so place the copy at the beginning of the block + sched_add_before(sched_first(block), copy); + +placed: + + return copy; +} + /** * If the first usage of a Phi result would be out of memory * there is no sense in allocating a register for it. @@ -190,8 +254,9 @@ static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) * * @return a be_Spill node */ -static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set *already_visited_phis, bitset_t *bs) { - int i, n = get_irn_arity(phi); +static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set *already_visited_phis, bitset_t *bs) { + int i; + int arity = get_irn_arity(phi); ir_graph *irg = senv->chordal_env->irg; ir_node *bl = get_nodes_block(phi); ir_node **ins, *phi_spill; @@ -202,11 +267,11 @@ static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn)); /* build a new PhiM */ - NEW_ARR_A(ir_node *, ins, n); - for (i = 0; i < n; ++i) { + NEW_ARR_A(ir_node *, ins, arity); + for (i = 0; i < arity; ++i) { ins[i] = new_r_Bad(irg); } - phi_spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M); + phi_spill = new_r_Phi(senv->chordal_env->irg, bl, arity, ins, mode_M); key.phi = phi; key.spill = phi_spill; set_insert(already_visited_phis, &key, sizeof(key), HASH_PTR(phi)); @@ -218,14 +283,14 @@ static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, /* if not found spill the phi */ if (! ctx->spill) { /* collect all arguments of the phi */ - for (i = 0; i < n; ++i) { + for (i = 0; i < arity; ++i) { ir_node *arg = get_irn_n(phi, i); ir_node *sub_res; phi_spill_assoc_t *entry; if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) { if (! bitset_is_set(bs, get_irn_idx(arg))) - sub_res = be_spill_phi(senv, arg, ctx_irn, already_visited_phis, bs); + sub_res = spill_phi(senv, arg, ctx_irn, already_visited_phis, bs); else { /* we already visited the argument phi: get it's spill */ key.phi = arg; @@ -256,13 +321,13 @@ static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, * @return a be_Spill node */ static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) { - ir_graph *irg = get_irn_irg(to_spill); + ir_graph *irg = get_irn_irg(to_spill); ir_node *res; if (pset_find_ptr(senv->mem_phis, to_spill)) { set *already_visited_phis = new_set(cmp_phi_spill_assoc, 10); bitset_t *bs = bitset_alloca(get_irg_last_idx(irg)); - res = be_spill_phi(senv, to_spill, to_spill, already_visited_phis, bs); + res = spill_phi(senv, to_spill, to_spill, already_visited_phis, bs); del_set(already_visited_phis); } else { res = be_spill_irn(senv, to_spill, to_spill); @@ -389,52 +454,49 @@ static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) return res; } -/** - * Walker: fills the mem_phis set by evaluating Phi nodes - * using the is_spilled_phi() callback. - */ -static void phi_walker(ir_node *irn, void *env) { - spill_env_t *senv = env; - - if (is_Phi(irn)) { - const arch_env_t *arch = senv->chordal_env->birg->main_env->arch_env; - if (arch_irn_has_reg_class(arch, irn, 0, senv->cls) && - senv->is_spilled_phi(irn, senv->data)) { - DBG((senv->dbg, LEVEL_1, " %+F\n", irn)); - pset_insert_ptr(senv->mem_phis, irn); - } - } +void be_spill_phi(spill_env_t *env, ir_node *node) { + assert(is_Phi(node)); + + pset_insert_ptr(env->mem_phis, node); } -void be_insert_spills_reloads(spill_env_t *senv) { - const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env; - ir_node *irn; +void be_insert_spills_reloads(spill_env_t *env) { + const arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env; + ir_node *node; spill_info_t *si; - /* get all special spilled phis */ - DBG((senv->dbg, LEVEL_1, "Mem-phis:\n")); - senv->mem_phis = pset_new_ptr_default(); - irg_walk_graph(senv->chordal_env->irg, phi_walker, NULL, senv); - - /* Add reloads for mem_phis */ - /* BETTER: These reloads (1) should only be inserted, if they are really needed */ - DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n")); - for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) { + DBG((env->dbg, LEVEL_1, "Reloads for mem-phis:\n")); + foreach_pset(env->mem_phis, node) { const ir_edge_t *e; - DBG((senv->dbg, LEVEL_1, " Mem-phi %+F\n", irn)); - foreach_out_edge(irn, e) { + int i, arity; + + /* We have to place copy nodes in the predecessor blocks to temporarily + * produce new values that get separate spill slots + */ + for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + ir_node *pred_block = get_Block_cfgpred_block(get_nodes_block(node), i); + ir_node *arg = get_irn_n(node, i); + ir_node* copy = insert_copy(env, pred_block, arg); + + set_irn_n(node, i, copy); + } + + /* Add reloads for mem_phis */ + /* BETTER: These reloads (1) should only be inserted, if they are really needed */ + DBG((env->dbg, LEVEL_1, " Mem-phi %+F\n", node)); + foreach_out_edge(node, e) { ir_node *user = e->src; - if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) { - ir_node *use_bl = get_nodes_block(user); - DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user)); - be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */ + if (is_Phi(user) && !pset_find_ptr(env->mem_phis, user)) { + ir_node *use_bl = get_nodes_block(user); + DBG((env->dbg, LEVEL_1, " non-mem-phi user %+F\n", user)); + be_add_reload_on_edge(env, node, use_bl, e->pos); /* (1) */ } } } /* process each spilled node */ - DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n")); - for(si = set_first(senv->spills); si; si = set_next(senv->spills)) { + DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n")); + for(si = set_first(env->spills); si; si = set_next(env->spills)) { reloader_t *rld; ir_mode *mode = get_irn_mode(si->spilled_node); //ir_node *value; @@ -445,54 +507,56 @@ void be_insert_spills_reloads(spill_env_t *senv) { ir_node *new_val; /* the spill for this reloader */ - ir_node *spill = be_spill_node(senv, si->spilled_node); + ir_node *spill = be_spill_node(env, si->spilled_node); #ifdef REMAT - if (check_remat_conditions(senv, spill, si->spilled_node, rld->reloader)) { - new_val = do_remat(senv, si->spilled_node, rld->reloader); + if (check_remat_conditions(env, spill, si->spilled_node, rld->reloader)) { + new_val = do_remat(env, si->spilled_node, rld->reloader); //pdeq_putl(possibly_dead, spill); } else #endif /* do a reload */ - new_val = be_reload(aenv, senv->cls, rld->reloader, mode, spill); + new_val = be_reload(arch_env, env->cls, rld->reloader, mode, spill); - DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader)); + DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader)); pset_insert_ptr(values, new_val); } /* introduce copies, rewire the uses */ assert(pset_count(values) > 0 && "???"); pset_insert_ptr(values, si->spilled_node); - be_ssa_constr_set_ignore(senv->chordal_env->dom_front, values, senv->mem_phis); + be_ssa_constr_set_ignore(env->chordal_env->dom_front, values, env->mem_phis); del_pset(values); } - del_pset(senv->mem_phis); + remove_copies(env); // reloads are placed now, but we might reuse the spill environment for further spilling decisions - del_set(senv->spills); - senv->spills = new_set(cmp_spillinfo, 1024); + del_set(env->spills); + env->spills = new_set(cmp_spillinfo, 1024); } -void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) { +void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { spill_info_t templ, *res; reloader_t *rel; + assert(arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->cls, to_spill)); + templ.spilled_node = to_spill; templ.reloaders = NULL; - res = set_insert(senv->spills, &templ, sizeof(templ), HASH_PTR(to_spill)); + res = set_insert(env->spills, &templ, sizeof(templ), HASH_PTR(to_spill)); - rel = obstack_alloc(&senv->obst, sizeof(rel[0])); + rel = obstack_alloc(&env->obst, sizeof(rel[0])); rel->reloader = before; rel->next = res->reloaders; res->reloaders = rel; } -void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos) { +void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *bl, int pos) { ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos); - be_add_reload(senv, to_spill, insert_bl); + be_add_reload(env, to_spill, insert_bl); } @@ -730,6 +794,7 @@ static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) { be_set_Spill_entity(irn, spill_ent); } + /* set final size of stack frame */ frame_align = get_type_alignment_bytes(frame); set_type_size_bytes(frame, round_up2(offset, frame_align)); diff --git a/ir/be/bespill.h b/ir/be/bespill.h index 72f1e2cd7..8cffc849e 100644 --- a/ir/be/bespill.h +++ b/ir/be/bespill.h @@ -19,7 +19,6 @@ #include "bearch.h" typedef struct _spill_env_t spill_env_t; -typedef int(*decide_irn_t)(const ir_node*, void*); /** * Creates a new spill environment. @@ -28,12 +27,12 @@ typedef int(*decide_irn_t)(const ir_node*, void*); * @param is_spilled_phi a function that evaluates a phi node and returns true if it is a spilled phi node * @param data context parameter for the is_spilled_phi function */ -spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal, decide_irn_t is_spilled_phi, void *data); +spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal); /** - * (re-)sets the is_spilled_phi callback + * Marks a phi-node for spilling */ -void be_set_is_spilled_phi(spill_env_t *env, decide_irn_t is_spilled_phi, void *data); +void be_spill_phi(spill_env_t *env, ir_node *node); /** * Deletes a spill environment. diff --git a/ir/be/bespillappel.c b/ir/be/bespillappel.c index 54d8f0b5b..803234c60 100644 --- a/ir/be/bespillappel.c +++ b/ir/be/bespillappel.c @@ -352,14 +352,6 @@ is_spilled(const spill_ilp_t * si, const live_range_t * lr) } #endif -static int -is_mem_phi(const ir_node * phi, void *data) -{ - spill_ilp_t *si = data; -// return is_spilled(si, get_use_head(si, phi)->closest_use); - return 0; -} - void be_spill_appel(const be_chordal_env_t * chordal_env) { @@ -372,7 +364,7 @@ be_spill_appel(const be_chordal_env_t * chordal_env) obstack_init(&obst); si.chordal_env = chordal_env; si.obst = &obst; - si.senv = be_new_spill_env(chordal_env, is_mem_phi, &si); + si.senv = be_new_spill_env(chordal_env); si.cls = chordal_env->cls; si.lpp = new_lpp(problem_name, lpp_minimize); FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillappel"); diff --git a/ir/be/bespillbelady.c b/ir/be/bespillbelady.c index 8d309baf5..b01c4bfc6 100644 --- a/ir/be/bespillbelady.c +++ b/ir/be/bespillbelady.c @@ -237,21 +237,10 @@ static INLINE void *new_block_info(struct obstack *ob) { #define get_block_info(blk) ((block_info_t *)get_irn_link(blk)) #define set_block_info(blk, info) set_irn_link(blk, info) -static int is_mem_phi(const ir_node *irn, void *data) { - workset_t *sws; - ir_node *blk = get_nodes_block(irn); - - DBG((dbg, DBG_SPILL, "Is %+F a mem-phi?\n", irn)); - sws = get_block_info(blk)->ws_start; - DBG((dbg, DBG_SPILL, " %d\n", !workset_contains(sws, irn))); - return !workset_contains(sws, irn); -} - /** * @return The distance to the next use * Or 0 if irn is an ignore node */ - static INLINE unsigned get_distance(belady_env_t *bel, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses) { arch_irn_flags_t fl = arch_irn_get_flags(bel->arch, def); @@ -260,7 +249,7 @@ static INLINE unsigned get_distance(belady_env_t *bel, const ir_node *from, unsi if(!USES_IS_INIFINITE(dist) && (fl & (arch_irn_flags_ignore | arch_irn_flags_dont_spill)) != 0) return 0; - return dist; + return dist + 1; } /** @@ -293,6 +282,7 @@ static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) { if (is_usage) be_add_reload(bel->senv, val, bel->instr); } else { + assert(is_usage || "Defined value already in workset?!?"); DBG((dbg, DBG_DECIDE, " skip %+F\n", val)); } } @@ -344,9 +334,9 @@ static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) { static void belady(ir_node *blk, void *env); /** - * Inserts a spill of a value at the earliest possible location in a block. - * That is after the last use of the value or at the beginning of the block if - * there is no use + * Inserts a copy (needed for spilled phi handling) of a value at the earliest + * possible location in a block. That is after the last use/def of the value or at + * the beginning of the block if there is no use/def. */ static ir_node *insert_copy(belady_env_t *env, ir_node *block, ir_node *value) { ir_node* node; @@ -355,8 +345,9 @@ static ir_node *insert_copy(belady_env_t *env, ir_node *block, ir_node *value) { ARR_APP1(ir_node*, env->copies, copy); - // walk schedule backwards until we find a usage, or until we have reached the first phi - // TODO can we do this faster somehow? This makes insert_copy O(n) in block_size... + // walk schedule backwards until we find a use/def, or until we have reached the first phi + // TODO we could also do this by iterating over all uses and checking the + // sched_get_time_step value. Need benchmarks to decide this... sched_foreach_reverse(block, node) { int i, arity; @@ -473,21 +464,11 @@ static block_info_t *compute_block_start_info(ir_node *blk, void *data) { * into the same spill slot. * After spilling these copies get deleted. */ for (i=workset_get_length(res->ws_start); isenv, irn); } obstack_free(&ob, NULL); @@ -530,7 +511,6 @@ static void belady(ir_node *blk, void *env) { sched_foreach(blk, irn) { assert(workset_get_length(bel->ws) <= bel->n_regs && "Too much values in workset!"); - /* projs are handled with the tuple value. * Phis are no real instr (see insert_starters()) * instr_nr does not increase */ @@ -658,10 +638,9 @@ void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t bel.ws = new_workset(&bel.ob, &bel); bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls); if(spill_env == NULL) { - bel.senv = be_new_spill_env(chordal_env, is_mem_phi, NULL); + bel.senv = be_new_spill_env(chordal_env); } else { bel.senv = spill_env; - be_set_is_spilled_phi(bel.senv, is_mem_phi, NULL); } DEBUG_ONLY(be_set_spill_env_dbg_module(bel.senv, dbg);) bel.copies = NEW_ARR_F(ir_node*, 0); diff --git a/ir/be/bespillmorgan.c b/ir/be/bespillmorgan.c index 3e923b7d3..40a159f7b 100644 --- a/ir/be/bespillmorgan.c +++ b/ir/be/bespillmorgan.c @@ -16,7 +16,6 @@ #include "bespill.h" #include "belive.h" #include "belive_t.h" -#include "beinsn_t.h" #include "irgwalk.h" #include "besched.h" #include "beutil.h" @@ -45,7 +44,6 @@ typedef struct _morgan_env_t { // maximum safe register pressure int registers_available; - be_insn_env_t insn_env; spill_env_t *senv; be_uses_t *uses; @@ -139,12 +137,6 @@ static INLINE block_attr_t *get_block_attr(morgan_env_t *env, ir_node *block) { return res; } -static int is_mem_phi(const ir_node *node, void *data) { - // TODO what is this for? - - return 0; -} - //--------------------------------------------------------------------------- /** @@ -180,23 +172,31 @@ static void free_loop_out_edges(morgan_env_t *env) { } } +/** + * Debugging help, shows all nodes in a (node-)bitset + */ +static void show_nodebitset(ir_graph* irg, bitset_t* bitset) { + int i; + + bitset_foreach(bitset, i) { + ir_node* node = get_idx_irn(irg, i); + DBG((dbg, DBG_LIVE, "\t%+F\n", node)); + } +} + /** * Construct the livethrough unused information for a block */ static bitset_t *construct_block_livethrough_unused(morgan_env_t* env, ir_node* block) { - int i; - int node_idx; - ir_node *irn; block_attr_t *block_attr = get_block_attr(env, block); - - /* - * This is the first block in a sequence, all variables that are livethrough this block are potential - * candidates for livethrough_unused - */ irn_live_t *li; + ir_node *node; + DBG((dbg, DBG_LIVE, "Processing block %d\n", get_irn_node_nr(block))); // copy all live-outs into the livethrough_unused set live_foreach(block, li) { + int node_idx; + if(!live_is_in(li) || !live_is_out(li)) continue; if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, li->irn)) @@ -210,31 +210,19 @@ static bitset_t *construct_block_livethrough_unused(morgan_env_t* env, ir_node* * All values that are used within the block are not unused (and therefore not * livethrough_unused) */ - sched_foreach(block, irn) { - be_insn_t *insn = be_scan_insn(&env->insn_env, irn); + sched_foreach(block, node) { + int i, arity; - for(i = insn->use_start; i < insn->n_ops; ++i) { - const be_operand_t *op = &insn->ops[i]; - int idx = get_irn_idx(op->irn); + for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + int idx = get_irn_idx(get_irn_n(node, i)); bitset_clear(block_attr->livethrough_unused, idx); } } + show_nodebitset(env->irg, block_attr->livethrough_unused); return block_attr->livethrough_unused; } -/** - * Debugging help, shows all nodes in a (node-)bitset - */ -static void show_nodebitset(ir_graph* irg, bitset_t* bitset) { - int i; - - bitset_foreach(bitset, i) { - ir_node* node = get_idx_irn(irg, i); - DBG((dbg, DBG_LIVE, "\t%+F\n", node)); - } -} - static bitset_t *construct_loop_livethrough_unused(morgan_env_t *env, ir_loop *loop) { int i; loop_attr_t* loop_attr = get_loop_attr(env, loop); @@ -271,6 +259,7 @@ static bitset_t *construct_loop_livethrough_unused(morgan_env_t *env, ir_loop *l break; } } + DBG((dbg, DBG_LIVE, "Done with loop %d\n", loop->loop_nr)); // remove all unused livethroughs that are remembered for this loop from child loops and blocks for(i = 0; i < get_loop_n_elements(loop); ++i) { @@ -438,6 +427,10 @@ static int reduce_register_pressure_in_loop(morgan_env_t *env, ir_loop *loop, in ir_node *to_spill = get_idx_irn(env->irg, i); for(edge = set_first(loop_attr->out_edges); edge != NULL; edge = set_next(loop_attr->out_edges)) { + if(is_Phi(to_spill)) { + be_spill_phi(env->senv, to_spill); + } + be_add_reload_on_edge(env->senv, to_spill, edge->block, edge->pos); } } @@ -460,7 +453,7 @@ void be_spill_morgan(const be_chordal_env_t *chordal_env) { env.arch = chordal_env->birg->main_env->arch_env; env.irg = chordal_env->irg; env.cls = chordal_env->cls; - env.senv = be_new_spill_env(chordal_env, is_mem_phi, NULL); + env.senv = be_new_spill_env(chordal_env); DEBUG_ONLY(be_set_spill_env_dbg_module(env.senv, dbg);) env.uses = be_begin_uses(env.irg, env.arch, env.cls); @@ -468,8 +461,6 @@ void be_spill_morgan(const be_chordal_env_t *chordal_env) { env.registers_available = arch_count_non_ignore_regs(env.arch, env.cls); - be_insn_env_init(&env.insn_env, chordal_env->birg, chordal_env->cls, &env.phase.obst); - env.loop_attr_set = new_set(loop_attr_cmp, 5); env.block_attr_set = new_set(block_attr_cmp, 20); @@ -489,10 +480,11 @@ void be_spill_morgan(const be_chordal_env_t *chordal_env) { reduce_register_pressure_in_loop(&env, get_irg_loop(env.irg), 0); be_insert_spills_reloads(env.senv); - if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) + if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) { be_verify_schedule(env.irg); - else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) + } else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) { assert(be_verify_schedule(env.irg)); + } // cleanup be_end_uses(env.uses); @@ -502,6 +494,8 @@ void be_spill_morgan(const be_chordal_env_t *chordal_env) { del_set(env.block_attr_set); // fix the remaining places with too high register pressure with beladies algorithm + + // we have to remove dead nodes from schedule to not confuse liveness calculation be_remove_dead_nodes_from_schedule(env.irg); be_liveness(env.irg); be_spill_belady_spill_env(chordal_env, env.senv); diff --git a/ir/be/beuses.c b/ir/be/beuses.c index f79400fd5..ed53fac54 100644 --- a/ir/be/beuses.c +++ b/ir/be/beuses.c @@ -48,16 +48,6 @@ struct _be_uses_t { DEBUG_ONLY(firm_dbg_module_t *dbg;) }; -static INLINE unsigned sadd(unsigned a, unsigned b) -{ - return a + b; -} - -static INLINE unsigned sdiv(unsigned a, unsigned b) -{ - return a / b; -} - static int cmp_use(const void *a, const void *b, size_t n) { const be_use_t *p = a; diff --git a/ir/be/beverify.c b/ir/be/beverify.c index d231248e2..cbf675bf8 100644 --- a/ir/be/beverify.c +++ b/ir/be/beverify.c @@ -135,8 +135,8 @@ static void verify_schedule_walker(ir_node *block, void *data) env->problem_found = 1; } } else { - non_phi_found = 1; - } + non_phi_found = 1; + } // 2. Check for control flow changing nodes if (is_cfop(node) && get_irn_opcode(node) != iro_Start) { @@ -170,7 +170,7 @@ static void verify_schedule_walker(ir_node *block, void *data) } del_pset(uses); - /* check that all delay branches are used (at least with NOPs) */ + /* check that all delay branches are filled (at least with NOPs) */ if (cfchange_found && delay_branches != 0) { ir_fprintf(stderr, "Not all delay slots filled after jump (%d/%d) in block %+F (%s)\n", block, get_irg_dump_name(env->irg));