X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespill.c;h=b50733a8c3efbe4aa7296577fb948390fdb71a8f;hb=80a6158fdd766f42ee6c508a773bc114ff1b61f3;hp=d87915646370355d0a02424189a4fef03e6c46d6;hpb=927eac90f1c28092a5406def6540bb9fae752360;p=libfirm diff --git a/ir/be/bespill.c b/ir/be/bespill.c index d87915646..b50733a8c 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -14,6 +14,8 @@ #include "irnode_t.h" #include "ircons_t.h" #include "iredges_t.h" +#include "irbackedge_t.h" +#include "irprintf.h" #include "ident_t.h" #include "type_t.h" #include "entity_t.h" @@ -31,9 +33,13 @@ #include "benode_t.h" #include "bechordal_t.h" #include "bejavacoal.h" +#include "benodesets.h" +#include "bespilloptions.h" +#include "bestatevent.h" -/* This enables re-computation of values. Current state: Unfinished and buggy. */ -#undef BUGGY_REMAT +// only rematerialise when costs are less than REMAT_COST_LIMIT +// TODO determine a good value here... +#define REMAT_COST_LIMIT 10 typedef struct _reloader_t reloader_t; @@ -43,18 +49,31 @@ struct _reloader_t { }; typedef struct _spill_info_t { + /** the value that should get spilled */ ir_node *spilled_node; + /** list of places where the value should get reloaded */ reloader_t *reloaders; + /** the spill node, or a PhiM node */ ir_node *spill; + /** if we had the value of a phi spilled before but not the phi itself then + * this field contains the spill for the phi value */ + ir_node *old_spill; + + /** the register class in which the reload should be placed */ + const arch_register_class_t *reload_cls; } spill_info_t; struct _spill_env_t { const arch_register_class_t *cls; + const arch_env_t *arch_env; const be_chordal_env_t *chordal_env; struct obstack obst; - set *spills; /**< all spill_info_t's, which must be placed */ - pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */ + be_irg_t *birg; + int spill_cost; /**< the cost of a single spill node */ + int reload_cost; /**< the cost of a reload node */ + set *spills; /**< all spill_info_t's, which must be placed */ + pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */ DEBUG_ONLY(firm_dbg_module_t *dbg;) }; @@ -68,12 +87,25 @@ static int cmp_spillinfo(const void *x, const void *y, size_t size) { return xx->spilled_node != yy->spilled_node; } +/** + * Returns spill info for a specific value (returns NULL if the info doesn't + * exist yet) + */ +static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) { + spill_info_t info; + int hash = nodeset_hash(value); + + info.spilled_node = value; + + return set_find(env->spills, &info, sizeof(info), hash); +} + /** * Returns spill info for a specific value (the value that is to be spilled) */ static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) { spill_info_t info, *res; - int hash = HASH_PTR(value); + int hash = nodeset_hash(value); info.spilled_node = value; res = set_find(env->spills, &info, sizeof(info), hash); @@ -81,6 +113,7 @@ static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) { if (res == NULL) { info.reloaders = NULL; info.spill = NULL; + info.old_spill = NULL; res = set_insert(env->spills, &info, sizeof(info), hash); } @@ -100,7 +133,12 @@ spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env) { env->spills = new_set(cmp_spillinfo, 1024); env->cls = chordal_env->cls; env->chordal_env = chordal_env; + env->birg = chordal_env->birg; + env->arch_env = env->chordal_env->birg->main_env->arch_env; env->mem_phis = pset_new_ptr_default(); + // TODO, ask backend about costs... + env->spill_cost = 8; + env->reload_cost = 5; obstack_init(&env->obst); return env; } @@ -113,21 +151,142 @@ void be_delete_spill_env(spill_env_t *env) { free(env); } +/* + * ____ _ ____ _ _ + * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___ + * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __| + * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \ + * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/ + * + */ + +void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before, const arch_register_class_t *reload_cls) { + spill_info_t *info; + reloader_t *rel; + + assert(arch_irn_consider_in_reg_alloc(env->arch_env, env->cls, to_spill)); + + info = get_spillinfo(env, to_spill); + + if(is_Phi(to_spill)) { + int i, arity; + + /* create spillinfos for the phi arguments */ + for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) { + ir_node *arg = get_irn_n(to_spill, i); + get_spillinfo(env, arg); + } + +#if 1 + // hackery... sometimes the morgan algo spilled the value of a phi, + // the belady algo decides later to spill the whole phi, then sees the + // spill node and adds a reload for that spill node, problem is the + // reload gets attach to that same spill (and is totally unnecessary) + if (info->old_spill != NULL && + (before == info->old_spill || value_dominates(before, info->old_spill))) { + printf("spilledphi hack was needed...\n"); + before = sched_next(info->old_spill); + } +#endif + } + + /* put reload into list */ + rel = obstack_alloc(&env->obst, sizeof(rel[0])); + rel->reloader = before; + rel->next = info->reloaders; + + info->reloaders = rel; + info->reload_cls = reload_cls; +} + +static ir_node *get_reload_insertion_point(ir_node *block, int pos) { + ir_node *predblock, *last; + + /* simply add the reload to the beginning of the block if we only have 1 predecessor + * (we don't need to check for phis as there can't be any in a block with only 1 pred) + */ + if(get_Block_n_cfgpreds(block) == 1) { + assert(!is_Phi(sched_first(block))); + return sched_first(block); + } + + /* We have to reload the value in pred-block */ + predblock = get_Block_cfgpred_block(block, pos); + last = sched_last(predblock); + + /* we might have projs and keepanys behind the jump... */ + while(is_Proj(last) || be_is_Keep(last)) { + last = sched_prev(last); + assert(!sched_is_end(last)); + } + + if(!is_cfop(last)) { + ir_graph *irg = get_irn_irg(block); + ir_node *startblock = get_irg_start_block(irg); + + last = sched_next(last); + // last node must be a cfop, only exception is the start block + assert(last == startblock); + } + + // add the reload before the (cond-)jump + return last; +} + +void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, + ir_node *block, int pos, const arch_register_class_t *reload_cls) +{ + ir_node *before = get_reload_insertion_point(block, pos); + be_add_reload(env, to_spill, before, reload_cls); +} + +void be_spill_phi(spill_env_t *env, ir_node *node) { + spill_info_t* spill; + int i, arity; + + assert(is_Phi(node)); + + pset_insert_ptr(env->mem_phis, node); + + // create spillinfos for the phi arguments + spill = get_spillinfo(env, node); + for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + ir_node *arg = get_irn_n(node, i); + get_spillinfo(env, arg); + } + + // if we had a spill for the phi value before, then remove this spill from + // schedule, as we will remove it in the insert spill/reload phase + if(spill->spill != NULL && !is_Phi(spill->spill)) { + assert(spill->old_spill == NULL); + spill->old_spill = spill->spill; + spill->spill = NULL; + } +} + +/* + * ____ _ ____ _ _ _ + * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___ + * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __| + * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \ + * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/ + * |_| + */ + /** * Schedules a node after an instruction. (That is the place after all projs and phis * that are scheduled after the instruction) + * This function also skips phi nodes at the beginning of a block */ static void sched_add_after_insn(ir_node *sched_after, ir_node *node) { ir_node *next = sched_next(sched_after); - while(!sched_is_end(next)) { - if(!is_Proj(next) && !is_Phi(next)) - break; + while(is_Proj(next) || is_Phi(next)) { next = sched_next(next); } + assert(next != NULL); if(sched_is_end(next)) { - next = sched_last(get_nodes_block(sched_after)); - sched_add_after(next, node); + sched_add_after(sched_last(get_nodes_block(sched_after)), node); } else { sched_add_before(next, node); } @@ -143,20 +302,29 @@ static void sched_add_after_insn(ir_node *sched_after, ir_node *node) { * @return a be_Spill node */ static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) { - const be_main_env_t *mainenv = env->chordal_env->birg->main_env; ir_node *to_spill = spillinfo->spilled_node; DBG((env->dbg, LEVEL_1, "%+F\n", to_spill)); /* Trying to spill an already spilled value, no need for a new spill * node then, we can simply connect to the same one for this reload + * + * (although rematerialization code should handle most of these cases + * this can still happen when spilling Phis) */ if(be_is_Reload(to_spill)) { spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem); return; } - spillinfo->spill = be_spill(mainenv->arch_env, to_spill); + if (arch_irn_is(env->arch_env, to_spill, dont_spill)) { + if (env->chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) + ir_fprintf(stderr, "Verify warning: spilling 'dont_spill' node %+F\n", to_spill); + else if (env->chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) + assert(0 && "Attempt to spill a node marked 'dont_spill'"); + } + + spillinfo->spill = be_spill(env->arch_env, to_spill); sched_add_after_insn(to_spill, spillinfo->spill); } @@ -196,6 +364,21 @@ static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) { set_irn_n(spillinfo->spill, i, arg_info->spill); } + + // rewire reloads from old_spill to phi + if(spillinfo->old_spill != NULL) { + const ir_edge_t *edge, *next; + ir_node *old_spill = spillinfo->old_spill; + + foreach_out_edge_safe(old_spill, edge, next) { + ir_node* reload = get_edge_src_irn(edge); + assert(be_is_Reload(reload) || is_Phi(reload)); + set_irn_n(reload, get_edge_src_pos(edge), spillinfo->spill); + } + set_irn_n(old_spill, be_pos_Spill_val, new_Bad()); + //sched_remove(old_spill); + spillinfo->old_spill = NULL; + } } /** @@ -219,129 +402,140 @@ static void spill_node(spill_env_t *env, spill_info_t *spillinfo) { } } -static INLINE ir_node *skip_projs(ir_node *node) { - while(is_Proj(node)) { - node = sched_next(node); - assert(!sched_is_end(node)); - } - - return node; -} +/* + * + * ____ _ _ _ _ + * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______ + * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \ + * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/ + * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___| + * + */ -#if 0 /** - * Searchs the schedule backwards until we reach the first use or def of a - * value or a phi. - * Returns the node after this node (so that you can do sched_add_before) + * Tests whether value @p arg is available before node @p reloader + * @returns 1 if value is available, 0 otherwise */ -static ir_node *find_last_use_def(spill_env_t *env, ir_node *block, ir_node *value) { - ir_node *node, *last; +static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) { + if(is_Unknown(arg) || arg == new_NoMem()) + return 1; + + if(be_is_Spill(arg)) + return 1; + + if(arg == get_irg_frame(env->chordal_env->irg)) + return 1; + + // hack for now (happens when command should be inserted at end of block) + if(is_Block(reloader)) { + return 0; + } - last = NULL; - sched_foreach_reverse(block, node) { + /* the following test does not work while spilling, + * because the liveness info is not adapted yet to the effects of the + * additional spills/reloads. + * + * So we can only do this test for ignore registers (of our register class) + */ + if(arch_get_irn_reg_class(env->arch_env, arg, -1) == env->cls + && arch_irn_is(env->arch_env, arg, ignore)) { int i, arity; - if(is_Phi(node)) { - return last; - } - if(value == node) { - return skip_projs(last); + /* we want to remat before the insn reloader + * thus an arguments is alive if + * - it interferes with the reloaders result + * - or it is (last-) used by reloader itself + */ + if (values_interfere(env->birg->lv, reloader, arg)) { + return 1; } - for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { - ir_node *arg = get_irn_n(node, i); - if(arg == value) { - return skip_projs(last); - } + + arity = get_irn_arity(reloader); + for (i = 0; i < arity; ++i) { + ir_node *rel_arg = get_irn_n(reloader, i); + if (rel_arg == arg) + return 1; } - last = node; } - // simply return first node if no def or use found - return sched_first(block); + return 0; } -#endif - -#ifdef BUGGY_REMAT /** - * Check if a spilled node could be rematerialized. - * - * @param senv the spill environment - * @param spill the Spill node - * @param spilled the node that was spilled - * @param reloader a irn that requires a reload + * Checks whether the node can principally be rematerialized */ -static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) { - int pos, max; +static int is_remat_node(spill_env_t *env, ir_node *node) { + const arch_env_t *arch_env = env->arch_env; - /* check for 'normal' spill and general remat condition */ - if (!arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable)) - return 0; + assert(!be_is_Spill(node)); - /* check availability of original arguments */ - if (is_Block(reloader)) { + if(arch_irn_is(arch_env, node, rematerializable)) { + return 1; + } - /* we want to remat at the end of a block. - * thus all arguments must be alive at the end of the block - */ - for (pos=0, max=get_irn_arity(spilled); pos REMAT_COST_LIMIT if remat is not possible. + */ +static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_node *reloader, int parentcosts) { + int i, arity; + int argremats; + int costs = 0; - if (values_interfere(reloader, arg)) - goto is_alive; + if(!is_remat_node(env, spilled)) + return REMAT_COST_LIMIT; - for (i=0, m=get_irn_arity(reloader); iarch_env, spilled); + } + if(parentcosts + costs >= REMAT_COST_LIMIT) { + return REMAT_COST_LIMIT; + } - /* arg is not alive before reloader */ - return 0; + argremats = 0; + for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { + ir_node *arg = get_irn_n(spilled, i); -is_alive: ; + if(is_value_available(env, arg, reloader)) + continue; + // we have to rematerialize the argument as well... + if(argremats >= 1) { + /* we only support rematerializing 1 argument at the moment, + * so that we don't have to care about register pressure + */ + return REMAT_COST_LIMIT; } + argremats++; + costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs); + if(parentcosts + costs >= REMAT_COST_LIMIT) + return REMAT_COST_LIMIT; } - return 1; + return costs; } -#else /* BUGGY_REMAT */ +static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) { + int costs = check_remat_conditions_costs(env, spilled, reloader, 0); -/** - * A very simple rematerialization checker. - * - * @param senv the spill environment - * @param spill the Spill node - * @param spilled the node that was spilled - * @param reloader a irn that requires a reload - */ -static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) { - const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env; - - return get_irn_arity(spilled) == 0 && - arch_irn_is(aenv, spilled, rematerializable); + return costs < REMAT_COST_LIMIT; } -#endif /* BUGGY_REMAT */ - /** * Re-materialize a node. * @@ -349,137 +543,136 @@ static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node * * @param spilled the node that was spilled * @param reloader a irn that requires a reload */ -static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) { +static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) { + int i, arity; ir_node *res; - ir_node *bl = (is_Block(reloader)) ? reloader : get_nodes_block(reloader); + ir_node *bl; + ir_node **ins; + + if(is_Block(reloader)) { + bl = reloader; + } else { + bl = get_nodes_block(reloader); + } - /* recompute the value */ - res = new_ir_node(get_irn_dbg_info(spilled), senv->chordal_env->irg, bl, + ins = alloca(get_irn_arity(spilled) * sizeof(ins[0])); + for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { + ir_node *arg = get_irn_n(spilled, i); + + if(is_value_available(env, arg, reloader)) { + ins[i] = arg; + } else { + ins[i] = do_remat(env, arg, reloader); + } + } + + /* create a copy of the node */ + res = new_ir_node(get_irn_dbg_info(spilled), env->chordal_env->irg, bl, get_irn_op(spilled), get_irn_mode(spilled), get_irn_arity(spilled), - get_irn_in(spilled) + 1); + ins); copy_node_attr(spilled, res); + new_backedge_info(res); + sched_reset(res); - DBG((senv->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader)); + DBG((env->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader)); /* insert in schedule */ - if (is_Block(reloader)) { - ir_node *insert = sched_skip(reloader, 0, sched_skip_cf_predicator, (void *) senv->chordal_env->birg->main_env->arch_env); - sched_add_after(insert, res); - } else { - sched_add_before(reloader, res); - } + sched_add_before(reloader, res); return res; } -void be_spill_phi(spill_env_t *env, ir_node *node) { - int i, arity; +int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) { + spill_info_t *spill_info; - assert(is_Phi(node)); + if(be_do_remats) { + // is the node rematerializable? + int costs = check_remat_conditions_costs(env, to_spill, before, 0); + if(costs < REMAT_COST_LIMIT) + return costs; + } - pset_insert_ptr(env->mem_phis, node); + // do we already have a spill? + spill_info = find_spillinfo(env, to_spill); + if(spill_info != NULL && spill_info->spill != NULL) + return env->reload_cost; - // create spillinfos for the phi arguments - get_spillinfo(env, node); - for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { - ir_node *arg = get_irn_n(node, i); - get_spillinfo(env, arg); - } + return env->spill_cost + env->reload_cost; +} + +int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { + ir_node *before = get_reload_insertion_point(block, pos); + return be_get_reload_costs(env, to_spill, before); } +/* + * ___ _ ____ _ _ + * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___ + * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __| + * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \ + * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/ + * + */ + void be_insert_spills_reloads(spill_env_t *env) { - const arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env; - spill_info_t *si; + const arch_env_t *arch_env = env->arch_env; + int remats = 0; + int reloads = 0; + int spills = 0; + spill_info_t *si; /* process each spilled node */ - DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n")); - for(si = set_first(env->spills); si; si = set_next(env->spills)) { + for (si = set_first(env->spills); si; si = set_next(env->spills)) { reloader_t *rld; - ir_mode *mode = get_irn_mode(si->spilled_node); - pset *values = pset_new_ptr(16); + ir_mode *mode = get_irn_mode(si->spilled_node); + pset *values = pset_new_ptr(16); /* go through all reloads for this spill */ - for(rld = si->reloaders; rld; rld = rld->next) { + for (rld = si->reloaders; rld; rld = rld->next) { ir_node *new_val; - if (check_remat_conditions(env, si->spilled_node, rld->reloader)) { + if (be_do_remats && check_remat_conditions(env, si->spilled_node, rld->reloader)) { new_val = do_remat(env, si->spilled_node, rld->reloader); - } else { + remats++; + } + else { /* make sure we have a spill */ - spill_node(env, si); - - /* do a reload */ - new_val = be_reload(arch_env, env->cls, rld->reloader, mode, si->spill); + if (si->spill == NULL) { + spill_node(env, si); + spills++; + } + + /* create a reload */ + new_val = be_reload(arch_env, si->reload_cls, rld->reloader, mode, si->spill); + reloads++; } DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader)); pset_insert_ptr(values, new_val); } - if(pset_count(values) > 0) { + if (pset_count(values) > 0) { /* introduce copies, rewire the uses */ pset_insert_ptr(values, si->spilled_node); - be_ssa_constr_set_ignore(env->chordal_env->dom_front, env->chordal_env->lv, values, env->mem_phis); + be_ssa_constr_set_ignore(env->birg->dom_front, env->birg->lv, values, env->mem_phis); } del_pset(values); - } - - // reloads are placed now, but we might reuse the spill environment for further spilling decisions - del_set(env->spills); - env->spills = new_set(cmp_spillinfo, 1024); -} - -void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { - spill_info_t *info; - reloader_t *rel; - - assert(sched_is_scheduled(before)); - assert(arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->cls, to_spill)); - - info = get_spillinfo(env, to_spill); - - if(is_Phi(to_spill)) { - int i, arity; - // create spillinfos for the phi arguments - for(i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) { - ir_node *arg = get_irn_n(to_spill, i); - get_spillinfo(env, arg); - } - } - rel = obstack_alloc(&env->obst, sizeof(rel[0])); - rel->reloader = before; - rel->next = info->reloaders; - info->reloaders = rel; - be_liveness_add_missing(env->chordal_env->lv); -} - -void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { - ir_node *predblock, *last; - - /* simply add the reload to the beginning of the block if we only have 1 predecessor - * (we don't need to check for phis as there can't be any in a block with only 1 pred) - */ - if(get_Block_n_cfgpreds(block) == 1) { - assert(!is_Phi(sched_first(block))); - be_add_reload(env, to_spill, sched_first(block)); - return; + si->reloaders = NULL; } - /* We have to reload the value in pred-block */ - predblock = get_Block_cfgpred_block(block, pos); - last = sched_last(predblock); - - /* we might have projs and keepanys behind the jump... */ - while(is_Proj(last) || be_is_Keep(last)) { - last = sched_prev(last); - assert(!sched_is_end(last)); +#ifdef FIRM_STATISTICS + if (be_stat_ev_is_active()) { + be_stat_ev("spill_spills", spills); + be_stat_ev("spill_reloads", reloads); + be_stat_ev("spill_remats", remats); } - assert(is_cfop(last)); +#endif /* FIRM_STATISTICS */ - // add the reload before the (cond-)jump - be_add_reload(env, to_spill, last); + be_remove_dead_nodes_from_schedule(env->chordal_env->irg); + //be_liveness_recompute(env->birg->lv); + be_invalidate_liveness(env->birg); }