X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespill.c;h=2495cc7f3cd174c2696a2387cdf4e079f7fe9aa8;hb=d5975c9df2dee13abedf388f1d70397a2163b69f;hp=97f5a071f3aa365f2d4c2c7c2ed2a490a34aee71;hpb=6a6aa7649687baaf542da80d996525fb41ee585d;p=libfirm diff --git a/ir/be/bespill.c b/ir/be/bespill.c index 97f5a071f..2495cc7f3 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -1,8 +1,28 @@ +/* + * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. + * + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. + */ + /** - * Author: Daniel Grund, Sebastian Hack - * Date: 29.09.2005 - * Copyright: (c) Universitaet Karlsruhe - * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE. + * @file + * @brief Main spill driver. + * @author Daniel Grund, Sebastian Hack, Matthias Braun + * @date 29.09.2005 + * @version $Id$ */ #ifdef HAVE_CONFIG_H #include "config.h" @@ -14,6 +34,8 @@ #include "irnode_t.h" #include "ircons_t.h" #include "iredges_t.h" +#include "irbackedge_t.h" +#include "irprintf.h" #include "ident_t.h" #include "type_t.h" #include "entity_t.h" @@ -21,225 +43,363 @@ #include "irgwalk.h" #include "array.h" #include "pdeq.h" +#include "execfreq.h" +#include "irnodeset.h" #include "belive_t.h" #include "besched_t.h" #include "bespill.h" +#include "belive_t.h" #include "benode_t.h" #include "bechordal_t.h" - -#define REMAT -/* This enables re-computation of values. Current state: Unfinished and buggy. */ -#undef BUGGY_REMAT +#include "bejavacoal.h" +#include "benodesets.h" +#include "bespilloptions.h" +#include "bestatevent.h" +#include "bessaconstr.h" +#include "beirg_t.h" +#include "beintlive_t.h" + +/* only rematerialise when costs are less than REMAT_COST_LIMIT */ +/* TODO determine a good value here... */ +#define REMAT_COST_LIMIT 10 typedef struct _reloader_t reloader_t; -typedef struct _spill_info_t spill_info_t; struct _reloader_t { reloader_t *next; - ir_node *reloader; + ir_node *reloader; + ir_node *rematted_node; + int allow_remat; /**< the node may be rematted instead of reloaded if global remat option is on */ }; -struct _spill_info_t { +typedef struct _spill_info_t { + /** the value that should get spilled */ ir_node *spilled_node; + /** list of places where the value should get reloaded */ reloader_t *reloaders; -}; -typedef struct _spill_ctx_t { - ir_node *spilled; /**< The spilled node. */ - ir_node *user; /**< The node this spill is for. */ - ir_node *spill; /**< The spill itself. */ -} spill_ctx_t; + /** the spill node, or a PhiM node */ + ir_node *spill; + /** if we had the value of a phi spilled before but not the phi itself then + * this field contains the spill for the phi value */ + ir_node *old_spill; + + /** the register class in which the reload should be placed */ + const arch_register_class_t *reload_cls; +} spill_info_t; struct _spill_env_t { - const arch_register_class_t *cls; - const be_chordal_env_t *chordal_env; + const arch_env_t *arch_env; + ir_graph *irg; struct obstack obst; - set *spill_ctxs; - set *spills; /**< all spill_info_t's, which must be placed */ - pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */ - ir_node **copies; /**< set of copies placed because of phi spills */ + be_irg_t *birg; + int spill_cost; /**< the cost of a single spill node */ + int reload_cost; /**< the cost of a reload node */ + set *spills; /**< all spill_info_t's, which must be placed */ + ir_nodeset_t mem_phis; /**< set of all special spilled phis. allocated and freed separately */ + DEBUG_ONLY(firm_dbg_module_t *dbg;) }; -/* associated Phi -> Spill*/ -typedef struct _phi_spill_assoc_t { - ir_node *phi; - ir_node *spill; -} phi_spill_assoc_t; - /** - * Compare two Phi->Spill associations. + * Compare two spill infos. */ -static int cmp_phi_spill_assoc(const void *a, const void *b, size_t n) { - const phi_spill_assoc_t *p1 = a; - const phi_spill_assoc_t *p2 = b; - return p1->phi != p2->phi; +static int cmp_spillinfo(const void *x, const void *y, size_t size) { + const spill_info_t *xx = x; + const spill_info_t *yy = y; + return xx->spilled_node != yy->spilled_node; } /** - * compare two spill contexts. + * Returns spill info for a specific value (returns NULL if the info doesn't + * exist yet) */ -static int cmp_spillctx(const void *a, const void *b, size_t n) { - const spill_ctx_t *p = a; - const spill_ctx_t *q = b; - return p->user != q->user || p->spilled != q->spilled; +static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) { + spill_info_t info; + int hash = nodeset_hash(value); + + info.spilled_node = value; + + return set_find(env->spills, &info, sizeof(info), hash); } /** - * Compare two spill infos. + * Returns spill info for a specific value (the value that is to be spilled) */ -static int cmp_spillinfo(const void *x, const void *y, size_t size) { - const spill_info_t *xx = x; - const spill_info_t *yy = y; - return xx->spilled_node != yy->spilled_node; +static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) { + spill_info_t info, *res; + int hash = nodeset_hash(value); + + info.spilled_node = value; + res = set_find(env->spills, &info, sizeof(info), hash); + + if (res == NULL) { + info.reloaders = NULL; + info.spill = NULL; + info.old_spill = NULL; + info.reload_cls = NULL; + res = set_insert(env->spills, &info, sizeof(info), hash); + } + + return res; } -DEBUG_ONLY( +#ifdef DEBUG_libfirm /* Sets the debug module of a spill environment. */ void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) { env->dbg = dbg; } -) +#endif /* Creates a new spill environment. */ -spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env) { +spill_env_t *be_new_spill_env(be_irg_t *birg) { spill_env_t *env = xmalloc(sizeof(env[0])); - env->spill_ctxs = new_set(cmp_spillctx, 1024); env->spills = new_set(cmp_spillinfo, 1024); - env->cls = chordal_env->cls; - env->chordal_env = chordal_env; - env->mem_phis = pset_new_ptr_default(); - env->copies = NEW_ARR_F(ir_node*, 0); + env->irg = be_get_birg_irg(birg); + env->birg = birg; + env->arch_env = birg->main_env->arch_env; + ir_nodeset_init(&env->mem_phis); + // TODO, ask backend about costs..., or even ask backend whether we should + // rematerialize... + env->spill_cost = 8; + env->reload_cost = 5; obstack_init(&env->obst); return env; } /* Deletes a spill environment. */ void be_delete_spill_env(spill_env_t *env) { - del_set(env->spill_ctxs); del_set(env->spills); - del_pset(env->mem_phis); - DEL_ARR_F(env->copies); + ir_nodeset_destroy(&env->mem_phis); obstack_free(&env->obst, NULL); free(env); } -/** - * Returns a spill context. If the context did not exists, create one. +/* + * ____ _ ____ _ _ + * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___ + * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __| + * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \ + * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/ * - * @param sc the set containing all spill contexts - * @param to_spill the node that should be spilled - * @param ctx_irn an user of the spilled node - * - * @return a spill context. */ -static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) { - spill_ctx_t templ; - templ.spilled = to_spill; - templ.user = ctx_irn; - templ.spill = NULL; +void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before, ir_node *rematted_node) { + spill_info_t *spill_info; + reloader_t *reloader; + + spill_info = get_spillinfo(env, to_spill); + + /* add the remat information */ + reloader = obstack_alloc(&env->obst, sizeof(reloader[0])); + reloader->next = spill_info->reloaders; + reloader->reloader = before; + reloader->rematted_node = rematted_node; + reloader->allow_remat = 1; - return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn))); + spill_info->reloaders = reloader; + + DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n", + to_spill, before)); } -/** - * Creates a spill. - * - * @param senv the spill environment - * @param irn the node that should be spilled - * @param ctx_irn an user of the spilled node - * - * @return a be_Spill node - */ -static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) { - spill_ctx_t *ctx; - const be_main_env_t *env = senv->chordal_env->birg->main_env; - DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn)); +void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before, + const arch_register_class_t *reload_cls, int allow_remat) +{ + spill_info_t *info; + reloader_t *rel; - // Has the value already been spilled? - ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn); - if(ctx->spill) - return ctx->spill; + info = get_spillinfo(env, to_spill); - /* Trying to spill an already spilled value, no need for a new spill - * node then, we can simply connect to the same one for this reload - */ - if(be_is_Reload(irn)) { - return get_irn_n(irn, be_pos_Reload_mem); + if (is_Phi(to_spill)) { + int i, arity; + + /* create spillinfos for the phi arguments */ + for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) { + ir_node *arg = get_irn_n(to_spill, i); + get_spillinfo(env, arg); + } + +#if 1 + // hackery... sometimes the morgan algo spilled the value of a phi, + // the belady algo decides later to spill the whole phi, then sees the + // spill node and adds a reload for that spill node, problem is the + // reload gets attach to that same spill (and is totally unnecessary) + if (info->old_spill != NULL && + (before == info->old_spill || value_dominates(before, info->old_spill))) + { + printf("spilledphi hack was needed...\n"); + before = sched_next(info->old_spill); + } +#endif } - ctx->spill = be_spill(env->arch_env, irn, ctx_irn); + /* put reload into list */ + rel = obstack_alloc(&env->obst, sizeof(rel[0])); + rel->next = info->reloaders; + rel->reloader = before; + rel->rematted_node = NULL; + rel->allow_remat = allow_remat; - return ctx->spill; + info->reloaders = rel; + assert(info->reload_cls == NULL || info->reload_cls == reload_cls); + info->reload_cls = reload_cls; + + DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n", + to_spill, before, allow_remat ? "" : " not")); } /** - * Removes all copies introduced for phi-spills + * Returns the point at which you can insert a node that should be executed + * before block @p block when coming from pred @p pos. */ -static void remove_copies(spill_env_t *env) { - int i; +static ir_node *get_block_insertion_point(ir_node *block, int pos) { + ir_node *predblock, *last; - for(i = 0; i < ARR_LEN(env->copies); ++i) { - ir_node *node = env->copies[i]; - ir_node *src; - const ir_edge_t *edge, *ne; + /* simply add the reload to the beginning of the block if we only have 1 + * predecessor. We don't need to check for phis as there can't be any in a + * block with only 1 pred. */ + if(get_Block_n_cfgpreds(block) == 1) { + assert(!is_Phi(sched_first(block))); + return sched_first(block); + } - assert(be_is_Copy(node)); + /* We have to reload the value in pred-block */ + predblock = get_Block_cfgpred_block(block, pos); + last = sched_last(predblock); - src = be_get_Copy_op(node); - foreach_out_edge_safe(node, edge, ne) { - ir_node *user = get_edge_src_irn(edge); - int user_pos = get_edge_src_pos(edge); + /* we might have projs and keepanys behind the jump... */ + while(is_Proj(last) || be_is_Keep(last)) { + last = sched_prev(last); + assert(!sched_is_end(last)); + } - set_irn_n(user, user_pos, src); - } + if(!is_cfop(last)) { + last = sched_next(last); + // last node must be a cfop, only exception is the start block + assert(last == get_irg_start_block(get_irn_irg(block))); } - ARR_SETLEN(ir_node*, env->copies, 0); + // add the reload before the (cond-)jump + return last; } +void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos, + const arch_register_class_t *reload_cls, int allow_remat) +{ + ir_node *before = get_block_insertion_point(block, pos); + be_add_reload(env, to_spill, before, reload_cls, allow_remat); +} + +void be_spill_phi(spill_env_t *env, ir_node *node) { + spill_info_t* spill; + int i, arity; + + assert(is_Phi(node)); + + ir_nodeset_insert(&env->mem_phis, node); + + // create spillinfos for the phi arguments + spill = get_spillinfo(env, node); + for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + ir_node *arg = get_irn_n(node, i); + get_spillinfo(env, arg); + } + + // if we had a spill for the phi value before, then remove this spill from + // schedule, as we will remove it in the insert spill/reload phase + if(spill->spill != NULL && !is_Phi(spill->spill)) { + assert(spill->old_spill == NULL); + spill->old_spill = spill->spill; + spill->spill = NULL; + } +} + +/* + * ____ _ ____ _ _ _ + * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___ + * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __| + * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \ + * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/ + * |_| + */ + /** - * Inserts a copy (needed for spilled phi handling) of a value at the earliest - * possible location in a block. That is after the last use/def of the value or at - * the beginning of the block if there is no use/def. + * Schedules a node after an instruction. That is the place after all projs and + * phis that are scheduled after the instruction. This function also skips phi + * nodes at the beginning of a block */ -static ir_node *insert_copy(spill_env_t *env, ir_node *block, ir_node *value) { - ir_node* node; - ir_graph *irg = get_irn_irg(block); - ir_node *copy = be_new_Copy(env->cls, irg, block, value); +static void sched_add_after_insn(ir_node *sched_after, ir_node *node) { + ir_node *next = sched_next(sched_after); + while(is_Proj(next) || is_Phi(next)) { + next = sched_next(next); + } + assert(next != NULL); - ARR_APP1(ir_node*, env->copies, copy); + if(sched_is_end(next)) { + sched_add_after(sched_last(get_nodes_block(sched_after)), node); + } else { + sched_add_before(next, node); + } +} - // walk schedule backwards until we find a use, a def, or until we have reached the first phi - sched_foreach_reverse(block, node) { - int i, arity; +/** + * Creates a spill. + * + * @param senv the spill environment + * @param irn the node that should be spilled + * @param ctx_irn an user of the spilled node + * + * @return a be_Spill node + */ +static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) { + optimization_state_t opt; + ir_node *to_spill = spillinfo->spilled_node; - if(is_Phi(node)) { - sched_add_after(node, copy); - goto placed; - } - if(value == node) { - sched_add_after(node, copy); - goto placed; - } - for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { - ir_node *arg = get_irn_n(node, i); - if(arg == value) { - sched_add_after(node, copy); - goto placed; - } - } + DBG((env->dbg, LEVEL_1, "spilling %+F ... ", to_spill)); + + /* Trying to spill an already spilled value, no need for a new spill + * node then, we can simply connect to the same one for this reload + * + * Normally reloads get simply rematerialized instead of spilled again; this + * can happen annyway when the reload is the pred of a phi to spill) + */ + if (be_is_Reload(to_spill)) { + spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem); + DB((env->dbg, LEVEL_1, "skip reload, using existing spill %+F\n", spillinfo->spill)); + return; } - // we didn't find a use or a phi yet, so place the copy at the beginning of the block - sched_add_before(sched_first(block), copy); -placed: + assert(!(arch_irn_is(env->arch_env, to_spill, dont_spill) + && "Attempt to spill a node marked 'dont_spill'")); + + /* some backends have virtual noreg/unknown nodes that are not scheduled */ + if(!sched_is_scheduled(to_spill)) { + spillinfo->spill = new_NoMem(); + return; + } - return copy; + + /* + We switch on optimizations here to get CSE. This is needed as some + backends have some extra spill phases and we want to make use of those + spills instead of creating new ones. + */ + save_optimization_state(&opt); + set_optimize(1); + spillinfo->spill = be_spill(env->arch_env, to_spill); + restore_optimization_state(&opt); + if (! sched_is_scheduled(spillinfo->spill)) { + DB((env->dbg, LEVEL_1, "add spill %+F after %+F\n", spillinfo->spill, to_spill)); + sched_add_after_insn(to_spill, spillinfo->spill); + } else { + DB((env->dbg, LEVEL_1, "re-using spill %+F after %+F\n", spillinfo->spill, to_spill)); + } } +static void spill_node(spill_env_t *env, spill_info_t *spillinfo); + /** * If the first usage of a Phi result would be out of memory * there is no sense in allocating a register for it. @@ -249,68 +409,55 @@ placed: * @param senv the spill environment * @param phi the Phi node that should be spilled * @param ctx_irn an user of the spilled node - * - * @return a be_Spill node */ -static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set *already_visited_phis, bitset_t *bs) { - int i; +static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) { + ir_node *phi = spillinfo->spilled_node; + int i; int arity = get_irn_arity(phi); - ir_graph *irg = senv->chordal_env->irg; - ir_node *bl = get_nodes_block(phi); - ir_node **ins, *phi_spill; - phi_spill_assoc_t key; - spill_ctx_t *ctx; + ir_node *block = get_nodes_block(phi); + ir_node **ins; assert(is_Phi(phi)); - DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn)); + DBG((env->dbg, LEVEL_1, "spilling Phi %+F:\n", phi)); /* build a new PhiM */ - NEW_ARR_A(ir_node *, ins, arity); - for (i = 0; i < arity; ++i) { - ins[i] = new_r_Bad(irg); + ins = alloca(sizeof(ir_node*) * arity); + for(i = 0; i < arity; ++i) { + ins[i] = get_irg_bad(env->irg); } - phi_spill = new_r_Phi(senv->chordal_env->irg, bl, arity, ins, mode_M); - key.phi = phi; - key.spill = phi_spill; - set_insert(already_visited_phis, &key, sizeof(key), HASH_PTR(phi)); - bitset_set(bs, get_irn_idx(phi)); - - /* search an existing spill for this context */ - ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn); - - /* if not found spill the phi */ - if (! ctx->spill) { - /* collect all arguments of the phi */ - for (i = 0; i < arity; ++i) { - ir_node *arg = get_irn_n(phi, i); - ir_node *sub_res; - phi_spill_assoc_t *entry; - - if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) { - // looping edge? - if(arg == phi) { - sub_res = phi_spill; - } else if (! bitset_is_set(bs, get_irn_idx(arg))) { - sub_res = spill_phi(senv, arg, ctx_irn, already_visited_phis, bs); - } else { - /* we already visited the argument phi: get it's spill */ - key.phi = arg; - key.spill = NULL; - entry = set_find(already_visited_phis, &key, sizeof(key), HASH_PTR(arg)); - assert(entry && "argument phi already visited, but no spill found?!?"); - sub_res = entry->spill; - assert(sub_res && "spill missing?!?"); - } - } - else - sub_res = be_spill_irn(senv, arg, ctx_irn); + spillinfo->spill = new_r_Phi(env->irg, block, arity, ins, mode_M); - set_irn_n(phi_spill, i, sub_res); - } + for(i = 0; i < arity; ++i) { + ir_node *arg = get_irn_n(phi, i); + spill_info_t *arg_info = get_spillinfo(env, arg); + + spill_node(env, arg_info); - ctx->spill = phi_spill; + set_irn_n(spillinfo->spill, i, arg_info->spill); + } + DBG((env->dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi, spillinfo->spill)); + + // rewire reloads from old_spill to phi + if (spillinfo->old_spill != NULL) { + const ir_edge_t *edge, *next; + ir_node *old_spill = spillinfo->old_spill; + + DBG((env->dbg, LEVEL_1, "old spill found, rewiring reloads:\n")); + + foreach_out_edge_safe(old_spill, edge, next) { + ir_node *reload = get_edge_src_irn(edge); + int pos = get_edge_src_pos(edge); + + DBG((env->dbg, LEVEL_1, "\tset input %d of %+F to %+F\n", pos, reload, spillinfo->spill)); + + assert(be_is_Reload(reload) || is_Phi(reload)); + set_irn_n(reload, pos, spillinfo->spill); + } + DBG((env->dbg, LEVEL_1, "\tset input of %+F to BAD\n", old_spill)); + set_irn_n(old_spill, be_pos_Spill_val, new_Bad()); + //sched_remove(old_spill); + spillinfo->old_spill = NULL; } - return ctx->spill; } /** @@ -318,545 +465,332 @@ static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set * * @param senv the spill environment * @param to_spill the node that should be spilled - * - * @return a be_Spill node */ -static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) { - ir_graph *irg = get_irn_irg(to_spill); - ir_node *res; - - if (pset_find_ptr(senv->mem_phis, to_spill)) { - set *already_visited_phis = new_set(cmp_phi_spill_assoc, 10); - bitset_t *bs = bitset_alloca(get_irg_last_idx(irg)); - res = spill_phi(senv, to_spill, to_spill, already_visited_phis, bs); - del_set(already_visited_phis); - } else { - res = be_spill_irn(senv, to_spill, to_spill); - } +static void spill_node(spill_env_t *env, spill_info_t *spillinfo) { + ir_node *to_spill; - return res; -} + // the node should be tagged for spilling already... + if(spillinfo->spill != NULL) + return; -#ifdef REMAT + to_spill = spillinfo->spilled_node; -#ifdef BUGGY_REMAT + if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) { + spill_phi(env, spillinfo); + } else { + spill_irn(env, spillinfo); + } +} -/** - * Check if a spilled node could be rematerialized. +/* + * + * ____ _ _ _ _ + * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______ + * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \ + * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/ + * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___| * - * @param senv the spill environment - * @param spill the Spill node - * @param spilled the node that was spilled - * @param reloader a irn that requires a reload */ -static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) { - int pos, max; - - /* check for 'normal' spill and general remat condition */ - if (!be_is_Spill(spill) || !arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable)) - return 0; - /* check availability of original arguments */ - if (is_Block(reloader)) { +/** + * Tests whether value @p arg is available before node @p reloader + * @returns 1 if value is available, 0 otherwise + */ +static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) { + if(is_Unknown(arg) || arg == new_NoMem()) + return 1; - /* we want to remat at the end of a block. - * thus all arguments must be alive at the end of the block - */ - for (pos=0, max=get_irn_arity(spilled); posirg)) + return 1; - /* we want to remat before the insn reloader - * thus an arguments is alive if - * - it interferes with the reloaders result - * or - * - or it is (last-) used by reloader itself - */ - for (pos=0, max=get_irn_arity(spilled); posarch_env, arg, ignore)) { + return 1; + } -is_alive: ; +#if 0 + /* the following test does not work while spilling, + * because the liveness info is not adapted yet to the effects of the + * additional spills/reloads. + */ - } + /* we want to remat before the insn reloader + * thus an arguments is alive if + * - it interferes with the reloaders result + * - or it is (last-) used by reloader itself + */ + if (values_interfere(env->birg->lv, reloader, arg)) { + return 1; + } + arity = get_irn_arity(reloader); + for (i = 0; i < arity; ++i) { + ir_node *rel_arg = get_irn_n(reloader, i); + if (rel_arg == arg) + return 1; } +#endif - return 1; + return 0; } -#else /* BUGGY_REMAT */ - /** - * A very simple rematerialization checker. - * - * @param senv the spill environment - * @param spill the Spill node - * @param spilled the node that was spilled - * @param reloader a irn that requires a reload + * Checks whether the node can principally be rematerialized */ -static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) { - const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env; +static int is_remat_node(spill_env_t *env, ir_node *node) { + const arch_env_t *arch_env = env->arch_env; - return get_irn_arity(spilled) == 0 && - be_is_Spill(spill) && - arch_irn_is(aenv, spilled, rematerializable); -} + assert(!be_is_Spill(node)); -#endif /* BUGGY_REMAT */ + if(arch_irn_is(arch_env, node, rematerializable)) { + return 1; + } -#endif /* REMAT */ + if(be_is_StackParam(node)) + return 1; + + return 0; +} /** - * Re-materialize a node. + * Check if a node is rematerializable. This tests for the following conditions: * - * @param senv the spill environment - * @param spilled the node that was spilled - * @param reloader a irn that requires a reload + * - The node itself is rematerializable + * - All arguments of the node are available or also rematerialisable + * - The costs for the rematerialisation operation is less or equal a limit + * + * Returns the costs needed for rematerialisation or something + * > REMAT_COST_LIMIT if remat is not possible. */ -static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) { - ir_node *res; - ir_node *bl = (is_Block(reloader)) ? reloader : get_nodes_block(reloader); - - /* recompute the value */ - res = new_ir_node(get_irn_dbg_info(spilled), senv->chordal_env->irg, bl, - get_irn_op(spilled), - get_irn_mode(spilled), - get_irn_arity(spilled), - get_irn_in(spilled) + 1); - copy_node_attr(spilled, res); +static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_node *reloader, int parentcosts) { + int i, arity; + int argremats; + int costs = 0; - DBG((senv->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader)); + if(!is_remat_node(env, spilled)) + return REMAT_COST_LIMIT; - /* insert in schedule */ - if (is_Block(reloader)) { - ir_node *insert = sched_skip(reloader, 0, sched_skip_cf_predicator, (void *) senv->chordal_env->birg->main_env->arch_env); - sched_add_after(insert, res); + if(be_is_Reload(spilled)) { + costs += 2; } else { - sched_add_before(reloader, res); + costs += arch_get_op_estimated_cost(env->arch_env, spilled); + } + if(parentcosts + costs >= REMAT_COST_LIMIT) { + return REMAT_COST_LIMIT; } - return res; -} - -void be_spill_phi(spill_env_t *env, ir_node *node) { - assert(is_Phi(node)); - - pset_insert_ptr(env->mem_phis, node); -} - -void be_insert_spills_reloads(spill_env_t *env) { - const arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env; - ir_node *node; - spill_info_t *si; - - DBG((env->dbg, LEVEL_1, "Reloads for mem-phis:\n")); - foreach_pset(env->mem_phis, node) { - const ir_edge_t *e; - int i, arity; - - assert(is_Phi(node)); + argremats = 0; + for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { + ir_node *arg = get_irn_n(spilled, i); - /* We have to place copy nodes in the predecessor blocks to temporarily - * produce new values that get separate spill slots - */ - for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { - ir_node *pred_block, *arg, *copy; + if(is_value_available(env, arg, reloader)) + continue; - /* Don't do anything for looping edges (there's no need - * and placing copies here breaks stuff as it suddenly - * generates new living values through the whole loop) + // we have to rematerialize the argument as well... + if(argremats >= 1) { + /* we only support rematerializing 1 argument at the moment, + * so that we don't have to care about register pressure */ - arg = get_irn_n(node, i); - if(arg == node) - continue; - - pred_block = get_Block_cfgpred_block(get_nodes_block(node), i); - copy = insert_copy(env, pred_block, arg); - - set_irn_n(node, i, copy); + return REMAT_COST_LIMIT; } + argremats++; - /* Add reloads for mem_phis */ - /* BETTER: These reloads (1) should only be inserted, if they are really needed */ - DBG((env->dbg, LEVEL_1, " Mem-phi %+F\n", node)); - foreach_out_edge(node, e) { - ir_node *user = e->src; - if (is_Phi(user) && !pset_find_ptr(env->mem_phis, user)) { - ir_node *use_bl = get_nodes_block(user); - DBG((env->dbg, LEVEL_1, " non-mem-phi user %+F\n", user)); - be_add_reload_on_edge(env, node, use_bl, e->pos); /* (1) */ - } - } + costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs); + if(parentcosts + costs >= REMAT_COST_LIMIT) + return REMAT_COST_LIMIT; } - /* process each spilled node */ - DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n")); - for(si = set_first(env->spills); si; si = set_next(env->spills)) { - reloader_t *rld; - ir_mode *mode = get_irn_mode(si->spilled_node); - //ir_node *value; - pset *values = pset_new_ptr(16); - - /* go through all reloads for this spill */ - for(rld = si->reloaders; rld; rld = rld->next) { - ir_node *new_val; - - /* the spill for this reloader */ - ir_node *spill = be_spill_node(env, si->spilled_node); + return costs; +} -#ifdef REMAT - if (check_remat_conditions(env, spill, si->spilled_node, rld->reloader)) { - new_val = do_remat(env, si->spilled_node, rld->reloader); - //pdeq_putl(possibly_dead, spill); - } - else -#endif - /* do a reload */ - new_val = be_reload(arch_env, env->cls, rld->reloader, mode, spill); +static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) { + int costs = check_remat_conditions_costs(env, spilled, reloader, 0); - DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader)); - pset_insert_ptr(values, new_val); - } + return costs < REMAT_COST_LIMIT; +} - /* introduce copies, rewire the uses */ - assert(pset_count(values) > 0 && "???"); - pset_insert_ptr(values, si->spilled_node); - be_ssa_constr_set_ignore(env->chordal_env->dom_front, values, env->mem_phis); +/** + * Re-materialize a node. + * + * @param senv the spill environment + * @param spilled the node that was spilled + * @param reloader a irn that requires a reload + */ +static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) { + int i, arity; + ir_node *res; + ir_node *bl; + ir_node **ins; - del_pset(values); + if(is_Block(reloader)) { + bl = reloader; + } else { + bl = get_nodes_block(reloader); } - remove_copies(env); + ins = alloca(get_irn_arity(spilled) * sizeof(ins[0])); + for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { + ir_node *arg = get_irn_n(spilled, i); - // reloads are placed now, but we might reuse the spill environment for further spilling decisions - del_set(env->spills); - env->spills = new_set(cmp_spillinfo, 1024); -} + if(is_value_available(env, arg, reloader)) { + ins[i] = arg; + } else { + ins[i] = do_remat(env, arg, reloader); + } + } -void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { - spill_info_t templ, *res; - reloader_t *rel; + /* create a copy of the node */ + res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl, + get_irn_op(spilled), + get_irn_mode(spilled), + get_irn_arity(spilled), + ins); + copy_node_attr(spilled, res); + new_backedge_info(res); + sched_reset(res); - assert(sched_is_scheduled(before)); - assert(arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->cls, to_spill)); + DBG((env->dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader)); - templ.spilled_node = to_spill; - templ.reloaders = NULL; - res = set_insert(env->spills, &templ, sizeof(templ), HASH_PTR(to_spill)); + /* insert in schedule */ + sched_add_before(reloader, res); - rel = obstack_alloc(&env->obst, sizeof(rel[0])); - rel->reloader = before; - rel->next = res->reloaders; - res->reloaders = rel; + return res; } -void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { - ir_node *predblock, *last; +int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) { + spill_info_t *spill_info; - /* simply add the reload to the beginning of the block if we only have 1 predecessor - * (we don't need to check for phis as there can't be any in a block with only 1 pred) - */ - if(get_Block_n_cfgpreds(block) == 1) { - assert(!is_Phi(sched_first(block))); - be_add_reload(env, to_spill, sched_first(block)); - return; + if(be_do_remats) { + // is the node rematerializable? + int costs = check_remat_conditions_costs(env, to_spill, before, 0); + if(costs < REMAT_COST_LIMIT) + return costs; } - // We have to reload the value in pred-block - predblock = get_Block_cfgpred_block(block, pos); - last = sched_last(predblock); - // there should be exactly 1 jump at the end of the block - assert(is_cfop(last)); + // do we already have a spill? + spill_info = find_spillinfo(env, to_spill); + if(spill_info != NULL && spill_info->spill != NULL) + return env->reload_cost; - // add the reload before the (cond-)jump - be_add_reload(env, to_spill, last); + return env->spill_cost + env->reload_cost; } -/**************************************** - - SPILL SLOT MANAGEMENT AND OPTS - -****************************************/ - -typedef struct _spill_slot_t { - unsigned size; - unsigned align; - pset *members; - ir_mode *largest_mode; /* the mode of all members with largest size */ -} spill_slot_t; - -typedef struct _ss_env_t { - struct obstack ob; - be_chordal_env_t *cenv; - pmap *slots; /* maps spill_contexts to spill_slots */ - pmap *types; /* maps modes to types */ - DEBUG_ONLY(firm_dbg_module_t *dbg;) -} ss_env_t; - +int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { + ir_node *before = get_block_insertion_point(block, pos); + return be_get_reload_costs(env, to_spill, before); +} -/** - * Walker: compute the spill slots +/* + * ___ _ ____ _ _ + * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___ + * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __| + * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \ + * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/ + * */ -static void compute_spill_slots_walker(ir_node *spill, void *env) { - ss_env_t *ssenv = env; - ir_node *ctx; - pmap_entry *entry; - spill_slot_t *ss; - if (!be_is_Spill(spill)) - return; - - /* check, if this spill is for a context already known */ - ctx = be_get_Spill_context(spill); - entry = pmap_find(ssenv->slots, ctx); - - if (!entry) { - struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env; - const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val); - ir_mode *largest_mode = arch_register_class_mode(cls); - - /* this is a new spill context */ - ss = obstack_alloc(&ssenv->ob, sizeof(*ss)); - ss->members = pset_new_ptr(8); - ss->largest_mode = largest_mode; - ss->size = get_mode_size_bytes(ss->largest_mode); - ss->align = arch_isa_get_reg_class_alignment(arch_env->isa, cls); - pmap_insert(ssenv->slots, ctx, ss); - } else { - /* values with the same spill_ctx must go into the same spill slot */ - ss = entry->value; - -#ifndef NDEBUG - /* ugly mega assert :-) */ - { - ir_node *irn; - struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env; - const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val); - int size = get_mode_size_bytes(arch_register_class_mode(cls)); - assert((int) ss->size == size && "Different sizes for the same spill slot are not allowed."); - for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) { - /* use values_interfere here, because it uses the dominance check, - which does work for values in memory */ - assert(!values_interfere(spill, irn) && "Spills for the same spill slot must not interfere!"); - } - } -#endif /* NDEBUG */ +void be_insert_spills_reloads(spill_env_t *env) { + const arch_env_t *arch_env = env->arch_env; + int remats = 0; + int reloads = 0; + int spills = 0; + spill_info_t *si; + ir_nodeset_iterator_t iter; + ir_node *node; + + /* create all phi-ms first, this is needed so, that phis, hanging on + spilled phis work correctly */ + foreach_ir_nodeset(&env->mem_phis, node, iter) { + spill_info_t *info = get_spillinfo(env, node); + spill_node(env, info); } - pset_insert_ptr(ss->members, spill); -} - -/** - * qsort compare function, sort spill slots by size. - */ -static int ss_sorter(const void *v1, const void *v2) { - const spill_slot_t **ss1 = (const spill_slot_t **)v1; - const spill_slot_t **ss2 = (const spill_slot_t **)v2; - return ((int) (*ss2)->size) - ((int) (*ss1)->size); -} + /* process each spilled node */ + for (si = set_first(env->spills); si; si = set_next(env->spills)) { + reloader_t *rld; + ir_node *spilled_node = si->spilled_node; + ir_mode *mode = get_irn_mode(spilled_node); + ir_node **copies = NEW_ARR_F(ir_node*, 0); + DBG((env->dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", spilled_node)); -/** - * This function should optimize the spill slots. - * - Coalescing of multiple slots - * - Ordering the slots - * - * Input slots are in @p ssenv->slots - * @p size The count of initial spill slots in @p ssenv->slots - * This also is the size of the preallocated array @p ass - * - * @return An array of spill slots @p ass in specific order - **/ -static void optimize_slots(ss_env_t *ssenv, int size, spill_slot_t *ass[]) { - int i, o, used_slots; - pmap_entry *entr; - - i=0; - pmap_foreach(ssenv->slots, entr) - ass[i++] = entr->value; - - /* Sort the array to minimize fragmentation and cache footprint. - Large slots come first */ - qsort(ass, size, sizeof(ass[0]), ss_sorter); - - /* For each spill slot: - - assign a new offset to this slot - - xor find another slot to coalesce with */ - used_slots = 0; - for (i=0; idbg, LEVEL_1, "Spill slot %d members:\n", i)); - for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members)) - DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1)); - - - for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */ - /* check inter-slot-pairs for interference */ - ir_node *n2; - for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members)) - for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members)) - if(values_interfere(n1, n2)) { - pset_break(ass[i]->members); - pset_break(ass[o]->members); - DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2)); - goto interf_detected; - } - - /* if we are here, there is no interference between ass[i] and ass[o] */ - tgt_slot = o; - -interf_detected: /*nothing*/ ; - } + /* go through all reloads for this spill */ + for (rld = si->reloaders; rld; rld = rld->next) { + ir_node *copy; /* a reload is a "copy" of the original value */ + + if (rld->rematted_node != NULL) { + copy = rld->rematted_node; + remats++; + sched_add_before(rld->reloader, copy); + } else if (be_do_remats && rld->allow_remat && + check_remat_conditions(env, spilled_node, rld->reloader)) { + copy = do_remat(env, spilled_node, rld->reloader); + remats++; + } else { + /* make sure we have a spill */ + if (si->spill == NULL) { + spill_node(env, si); + spills++; + } - /* now the members of ass[i] join the members of ass[tgt_slot] */ + /* create a reload */ + copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode, si->spill); + reloads++; + } - /* do we need a new slot? */ - if (tgt_slot == -1) { - tgt_slot = used_slots; - used_slots++; + DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", + copy, spilled_node, rld->reloader)); + ARR_APP1(ir_node*, copies, copy); + } - /* init slot */ - if (tgt_slot != i) { - ass[tgt_slot]->size = ass[i]->size; - del_pset(ass[tgt_slot]->members); - ass[tgt_slot]->members = pset_new_ptr(8); + /* if we had any reloads or remats, then we need to reconstruct the + * SSA form for the spilled value */ + if (ARR_LEN(copies) > 0) { + be_ssa_construction_env_t senv; + /* be_lv_t *lv = be_get_birg_liveness(env->birg); */ + + be_ssa_construction_init(&senv, env->birg); + be_ssa_construction_add_copy(&senv, spilled_node); + be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies)); + be_ssa_construction_fix_users(&senv, spilled_node); + +#if 0 + /* no need to enable this as long as we invalidate liveness + after this function... */ + be_ssa_construction_update_liveness_phis(&senv); + be_liveness_update(spilled_node); + len = ARR_LEN(copies); + for(i = 0; i < len; ++i) { + be_liveness_update(lv, copies[i]); } +#endif + be_ssa_construction_destroy(&senv); } - /* copy the members to the target pset */ - /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */ - if (tgt_slot != i) - for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members)) - pset_insert_ptr(ass[tgt_slot]->members, n1); + DEL_ARR_F(copies); + si->reloaders = NULL; } -} -#define ALIGN_SPILL_AREA 16 -#define pset_foreach(pset, elm) for(elm=pset_first(pset); elm; elm=pset_next(pset)) - -/** - * Returns a spill type for a mode. Keep them in a map to reduce - * the number of types. - * - * @param types a map containing all created types - * @param ss the spill slot - * - * Note that type types should are identical for every mode. - * This rule might break if two different register classes return the same - * mode but different alignments. - */ -static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) { - pmap_entry *e = pmap_find(types, ss->largest_mode); - ir_type *res; - - if (! e) { - char buf[64]; - snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode)); - res = new_type_primitive(new_id_from_str(buf), ss->largest_mode); - set_type_alignment_bytes(res, ss->align); - pmap_insert(types, ss->largest_mode, res); - } else { - res = e->value; - assert(get_type_alignment_bytes(res) == (int)ss->align); +#ifdef FIRM_STATISTICS + if (be_stat_ev_is_active()) { + be_stat_ev("spill_spills", spills); + be_stat_ev("spill_reloads", reloads); + be_stat_ev("spill_remats", remats); } +#endif /* FIRM_STATISTICS */ - return res; -} - -/** - * Create spill slot entities on the frame type. - * - * @param ssenv the spill environment - * @param n number of spill slots - * @param ss array of spill slots - */ -static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) { - int i, offset, frame_align; - ir_type *frame = get_irg_frame_type(ssenv->cenv->irg); - - /* aligning by increasing frame size */ - offset = get_type_size_bits(frame) / 8; - offset = round_up2(offset, ALIGN_SPILL_AREA); - set_type_size_bytes(frame, -1); - - /* create entities and assign offsets according to size and alignment*/ - for (i = 0; i < n_slots; ++i) { - char buf[64]; - ident *name; - entity *spill_ent; - ir_node *irn; - - /* build entity */ - snprintf(buf, sizeof(buf), "spill_slot_%d", i); - name = new_id_from_str(buf); - - spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i])); - - /* align */ - offset = round_up2(offset, ss[i]->align); - /* set */ - set_entity_offset_bytes(spill_ent, offset); - /* next possible offset */ - offset += round_up2(ss[i]->size, ss[i]->align); - - pset_foreach(ss[i]->members, irn) - be_set_Spill_entity(irn, spill_ent); - } - - - /* set final size of stack frame */ - frame_align = get_type_alignment_bytes(frame); - set_type_size_bytes(frame, round_up2(offset, frame_align)); -} - -void be_compute_spill_offsets(be_chordal_env_t *cenv) { - ss_env_t ssenv; - spill_slot_t **ss; - int ss_size; - pmap_entry *pme; - - obstack_init(&ssenv.ob); - ssenv.cenv = cenv; - ssenv.slots = pmap_create(); - ssenv.types = pmap_create(); - FIRM_DBG_REGISTER(ssenv.dbg, "ir.be.spillslots"); - - /* Get initial spill slots */ - irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv); - - /* Build an empty array for optimized spill slots */ - ss_size = pmap_count(ssenv.slots); - ss = obstack_alloc(&ssenv.ob, ss_size * sizeof(*ss)); - optimize_slots(&ssenv, ss_size, ss); - - /* Integrate slots into the stack frame entity */ - assign_entities(&ssenv, ss_size, ss); - - /* Clean up */ - pmap_foreach(ssenv.slots, pme) - del_pset(((spill_slot_t *)pme->value)->members); - pmap_destroy(ssenv.slots); - pmap_destroy(ssenv.types); - obstack_free(&ssenv.ob, NULL); - - be_copy_entities_to_reloads(cenv->irg); + be_remove_dead_nodes_from_schedule(env->irg); + /* Matze: In theory be_ssa_construction should take care of the liveness... + * try to disable this again in the future */ + be_invalidate_liveness(env->birg); }