+/*
+ * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
/**
- * Author: Daniel Grund, Sebastian Hack
- * Date: 29.09.2005
- * Copyright: (c) Universitaet Karlsruhe
- * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ * @file
+ * @brief implementation of the spill/reload placement abstraction layer
+ * @author Daniel Grund, Sebastian Hack, Matthias Braun
+ * @date 29.09.2005
+ * @version $Id$
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#include "irnode_t.h"
#include "ircons_t.h"
#include "iredges_t.h"
+#include "irbackedge_t.h"
+#include "irprintf.h"
#include "ident_t.h"
#include "type_t.h"
#include "entity_t.h"
#include "irgwalk.h"
#include "array.h"
#include "pdeq.h"
+#include "execfreq.h"
+#include "irnodeset.h"
+#include "bearch_t.h"
#include "belive_t.h"
#include "besched_t.h"
#include "bespill.h"
+#include "belive_t.h"
#include "benode_t.h"
#include "bechordal_t.h"
+#include "bejavacoal.h"
+#include "benodesets.h"
+#include "bespilloptions.h"
+#include "bestatevent.h"
+#include "bessaconstr.h"
+#include "beirg_t.h"
+#include "beintlive_t.h"
+#include "bemodule.h"
-#define REMAT
-/* This enables re-computation of values. Current state: Unfinished and buggy. */
-#undef BUGGY_REMAT
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-typedef struct _reloader_t reloader_t;
-typedef struct _spill_info_t spill_info_t;
+#define REMAT_COST_INFINITE 1000
-struct _reloader_t {
+typedef struct reloader_t reloader_t;
+struct reloader_t {
reloader_t *next;
- ir_node *reloader;
+ ir_node *reloader;
+ ir_node *rematted_node;
+ int remat_cost_delta; /** costs needed for rematerialization,
+ compared to placing a reload */
};
-struct _spill_info_t {
- ir_node *spilled_node;
- reloader_t *reloaders;
+typedef struct spill_info_t spill_info_t;
+struct spill_info_t {
+ ir_node *to_spill; /**< the value that should get spilled */
+ reloader_t *reloaders; /**< list of places where the value should get
+ reloaded */
+ ir_node *spill; /**< the spill node, or a PhiM node */
+ ir_node *old_spill; /**< if we had the value of a phi spilled before but
+ not the phi itself then this field contains the
+ spill for the phi value */
+ const arch_register_class_t *reload_cls; /** the register class in which the
+ reload should be placed */
};
-typedef struct _spill_ctx_t {
- ir_node *spilled; /**< The spilled node. */
- ir_node *user; /**< The node this spill is for. */
- ir_node *spill; /**< The spill itself. */
-} spill_ctx_t;
-
-struct _spill_env_t {
- const arch_register_class_t *cls;
- const be_chordal_env_t *chordal_env;
- struct obstack obst;
- set *spill_ctxs;
- set *spills; /**< all spill_info_t's, which must be placed */
- pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */
- decide_irn_t is_spilled_phi;/**< callback func to decide if a phi needs special spilling */
- void *data; /**< data passed to all callbacks */
- DEBUG_ONLY(firm_dbg_module_t *dbg;)
+struct spill_env_t {
+ const arch_env_t *arch_env;
+ ir_graph *irg;
+ struct obstack obst;
+ be_irg_t *birg;
+ int spill_cost; /**< the cost of a single spill node */
+ int reload_cost; /**< the cost of a reload node */
+ set *spills; /**< all spill_info_t's, which must be
+ placed */
+ ir_nodeset_t mem_phis; /**< set of all spilled phis. */
+ ir_exec_freq *exec_freq;
+
+#ifdef FIRM_STATISTICS
+ unsigned spill_count;
+ unsigned reload_count;
+ unsigned remat_count;
+ unsigned spilled_phi_count;
+#endif
};
-/* associated Phi -> Spill*/
-typedef struct _phi_spill_assoc_t {
- ir_node *phi;
- ir_node *spill;
-} phi_spill_assoc_t;
-
/**
- * Compare two Phi->Spill associations.
+ * Compare two spill infos.
*/
-static int cmp_phi_spill_assoc(const void *a, const void *b, size_t n) {
- const phi_spill_assoc_t *p1 = a;
- const phi_spill_assoc_t *p2 = b;
- return p1->phi != p2->phi;
+static
+int cmp_spillinfo(const void *x, const void *y, size_t size)
+{
+ const spill_info_t *xx = x;
+ const spill_info_t *yy = y;
+ return xx->to_spill != yy->to_spill;
}
/**
- * compare two spill contexts.
+ * Returns spill info for a specific value (the value that is to be spilled)
*/
-static int cmp_spillctx(const void *a, const void *b, size_t n) {
- const spill_ctx_t *p = a;
- const spill_ctx_t *q = b;
- return p->user != q->user || p->spilled != q->spilled;
-}
+static
+spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
+{
+ spill_info_t info, *res;
+ int hash = nodeset_hash(value);
+
+ info.to_spill = value;
+ res = set_find(env->spills, &info, sizeof(info), hash);
+
+ if (res == NULL) {
+ info.reloaders = NULL;
+ info.spill = NULL;
+ info.old_spill = NULL;
+ info.reload_cls = NULL;
+ res = set_insert(env->spills, &info, sizeof(info), hash);
+ }
-/**
- * Compare two spill infos.
- */
-static int cmp_spillinfo(const void *x, const void *y, size_t size) {
- const spill_info_t *xx = x;
- const spill_info_t *yy = y;
- return xx->spilled_node != yy->spilled_node;
+ return res;
}
-DEBUG_ONLY(
-/* Sets the debug module of a spill environment. */
-void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
- env->dbg = dbg;
-}
-)
+spill_env_t *be_new_spill_env(be_irg_t *birg)
+{
+ const arch_env_t *arch_env = birg->main_env->arch_env;
-/* Creates a new spill environment. */
-spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env, decide_irn_t is_spilled_phi, void *data) {
spill_env_t *env = xmalloc(sizeof(env[0]));
- env->spill_ctxs = new_set(cmp_spillctx, 1024);
env->spills = new_set(cmp_spillinfo, 1024);
- env->cls = chordal_env->cls;
- env->is_spilled_phi = is_spilled_phi;
- env->data = data;
- env->chordal_env = chordal_env;
+ env->irg = be_get_birg_irg(birg);
+ env->birg = birg;
+ env->arch_env = arch_env;
+ ir_nodeset_init(&env->mem_phis);
+ env->spill_cost = arch_env->isa->spill_cost;
+ env->reload_cost = arch_env->isa->reload_cost;
+ env->exec_freq = be_get_birg_exec_freq(birg);
obstack_init(&env->obst);
+
+#ifdef FIRM_STATISTICS
+ env->spill_count = 0;
+ env->reload_count = 0;
+ env->remat_count = 0;
+ env->spilled_phi_count = 0;
+#endif
+
return env;
}
-void be_set_is_spilled_phi(spill_env_t *env, decide_irn_t is_spilled_phi, void *data) {
- env->is_spilled_phi = is_spilled_phi;
- env->data = data;
+void be_delete_spill_env(spill_env_t *env)
+{
+ del_set(env->spills);
+ ir_nodeset_destroy(&env->mem_phis);
+ obstack_free(&env->obst, NULL);
+ free(env);
}
-/* Deletes a spill environment. */
-void be_delete_spill_env(spill_env_t *senv) {
- del_set(senv->spill_ctxs);
- del_set(senv->spills);
- obstack_free(&senv->obst, NULL);
- free(senv);
+/*
+ * ____ _ ____ _ _
+ * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___
+ * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
+ * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \
+ * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
+ *
+ */
+
+void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before,
+ ir_node *rematted_node)
+{
+ spill_info_t *spill_info;
+ reloader_t *reloader;
+
+ spill_info = get_spillinfo(env, to_spill);
+
+ /* add the remat information */
+ reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
+ reloader->next = spill_info->reloaders;
+ reloader->reloader = before;
+ reloader->rematted_node = rematted_node;
+ reloader->remat_cost_delta = 0; /* We will never have a cost win over a
+ reload since we're not even allowed to
+ create a reload */
+
+ spill_info->reloaders = reloader;
+
+ DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
+ to_spill, before));
+}
+
+void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before,
+ const arch_register_class_t *reload_cls, int allow_remat)
+{
+ spill_info_t *info;
+ reloader_t *rel;
+
+ info = get_spillinfo(env, to_spill);
+
+ if (is_Phi(to_spill)) {
+ int i, arity;
+
+ /* create spillinfos for the phi arguments */
+ for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
+ ir_node *arg = get_irn_n(to_spill, i);
+ get_spillinfo(env, arg);
+ }
+
+#if 1
+ /* hackery... sometimes the morgan algo spilled the value of a phi,
+ * the belady algo decides later to spill the whole phi, then sees the
+ * spill node and adds a reload for that spill node, problem is the
+ * reload gets attach to that same spill (and is totally unnecessary)
+ */
+ if (info->old_spill != NULL &&
+ (before == info->old_spill || value_dominates(before, info->old_spill)))
+ {
+ printf("spilledphi hack was needed...\n");
+ before = sched_next(info->old_spill);
+ }
+#endif
+ }
+
+ assert(!is_Proj(before) && !be_is_Keep(before));
+
+ /* put reload into list */
+ rel = obstack_alloc(&env->obst, sizeof(rel[0]));
+ rel->next = info->reloaders;
+ rel->reloader = before;
+ rel->rematted_node = NULL;
+ if(!allow_remat) {
+ rel->remat_cost_delta = REMAT_COST_INFINITE;
+ } else {
+ rel->remat_cost_delta = 0;
+ }
+
+ info->reloaders = rel;
+ assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
+ info->reload_cls = reload_cls;
+
+ DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
+ to_spill, before, allow_remat ? "" : " not"));
}
/**
- * Returns a spill context. If the context did not exists, create one.
- *
- * @param sc the set containing all spill contexts
- * @param to_spill the node that should be spilled
- * @param ctx_irn an user of the spilled node
- *
- * @return a spill context.
+ * Returns the point at which you can insert a node that should be executed
+ * before block @p block when coming from pred @p pos.
+ */
+static
+ir_node *get_block_insertion_point(ir_node *block, int pos)
+{
+ ir_node *predblock, *last;
+
+ /* simply add the reload to the beginning of the block if we only have 1
+ * predecessor. We don't need to check for phis as there can't be any in a
+ * block with only 1 pred. */
+ if(get_Block_n_cfgpreds(block) == 1) {
+ assert(!is_Phi(sched_first(block)));
+ return sched_first(block);
+ }
+
+ /* We have to reload the value in pred-block */
+ predblock = get_Block_cfgpred_block(block, pos);
+ last = sched_last(predblock);
+
+ /* we might have projs and keepanys behind the jump... */
+ while(is_Proj(last) || be_is_Keep(last)) {
+ last = sched_prev(last);
+ assert(!sched_is_end(last));
+ }
+
+ if(!is_cfop(last)) {
+ last = sched_next(last);
+ /* last node must be a cfop, only exception is the start block */
+ assert(last == get_irg_start_block(get_irn_irg(block)));
+ }
+
+ /* add the reload before the (cond-)jump */
+ return last;
+}
+
+void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
+ int pos, const arch_register_class_t *reload_cls,
+ int allow_remat)
+{
+ ir_node *before = get_block_insertion_point(block, pos);
+ be_add_reload(env, to_spill, before, reload_cls, allow_remat);
+}
+
+void be_spill_phi(spill_env_t *env, ir_node *node)
+{
+ spill_info_t* spill;
+ int i, arity;
+
+ assert(is_Phi(node));
+
+ ir_nodeset_insert(&env->mem_phis, node);
+
+ /* create spillinfos for the phi arguments */
+ spill = get_spillinfo(env, node);
+ for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
+ ir_node *arg = get_irn_n(node, i);
+ get_spillinfo(env, arg);
+ }
+
+ /* if we had a spill for the phi value before, then remove this spill from
+ * schedule, as we will remove it in the insert spill/reload phase
+ */
+ if(spill->spill != NULL && !is_Phi(spill->spill)) {
+ assert(spill->old_spill == NULL);
+ spill->old_spill = spill->spill;
+ spill->spill = NULL;
+ }
+}
+
+/*
+ * ____ _ ____ _ _ _
+ * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___
+ * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
+ * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \
+ * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
+ * |_|
*/
-static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
- spill_ctx_t templ;
- templ.spilled = to_spill;
- templ.user = ctx_irn;
- templ.spill = NULL;
+/**
+ * Schedules a node after an instruction. That is the place after all projs and
+ * phis that are scheduled after the instruction. This function also skips phi
+ * nodes at the beginning of a block
+ */
+static
+void sched_add_after_insn(ir_node *sched_after, ir_node *node)
+{
+ ir_node *next = sched_next(sched_after);
+ while(is_Proj(next) || is_Phi(next) || be_is_Keep(next)) {
+ next = sched_next(next);
+ }
+ assert(next != NULL);
- return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
+ if(sched_is_end(next)) {
+ sched_add_after(sched_last(get_nodes_block(sched_after)), node);
+ } else {
+ sched_add_before(next, node);
+ }
}
/**
*
* @return a be_Spill node
*/
-static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
- spill_ctx_t *ctx;
- const be_main_env_t *env = senv->chordal_env->birg->main_env;
- DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
+static
+void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
+{
+ optimization_state_t opt;
+ ir_node *to_spill = spillinfo->to_spill;
- // Has the value already been spilled?
- ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
- if(ctx->spill)
- return ctx->spill;
+ DBG((dbg, LEVEL_1, "spilling %+F ... ", to_spill));
/* Trying to spill an already spilled value, no need for a new spill
* node then, we can simply connect to the same one for this reload
+ *
+ * Normally reloads get simply rematerialized instead of spilled again; this
+ * can happen annyway when the reload is the pred of a phi to spill)
*/
- if(be_is_Reload(irn)) {
- return get_irn_n(irn, be_pos_Reload_mem);
+ if (be_is_Reload(to_spill)) {
+ spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem);
+ DB((dbg, LEVEL_1, "skip reload, using existing spill %+F\n", spillinfo->spill));
+ return;
+ }
+
+ assert(!(arch_irn_is(env->arch_env, to_spill, dont_spill)
+ && "Attempt to spill a node marked 'dont_spill'"));
+
+ /* some backends have virtual noreg/unknown nodes that are not scheduled */
+ if(!sched_is_scheduled(to_spill)) {
+ spillinfo->spill = new_NoMem();
+ return;
}
- ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
- return ctx->spill;
+
+ /*
+ * We switch on optimizations here to get CSE. This is needed as the STA
+ * backends has some extra spill phases and we want to make use of those
+ * spills instead of creating new ones.
+ */
+ save_optimization_state(&opt);
+ set_optimize(1);
+ spillinfo->spill = be_spill(env->arch_env, to_spill);
+ restore_optimization_state(&opt);
+ if (! sched_is_scheduled(spillinfo->spill)) {
+ DB((dbg, LEVEL_1, "add spill %+F after %+F\n", spillinfo->spill, to_spill));
+#ifdef FIRM_STATISTICS
+ env->spill_count++;
+#endif
+ sched_add_after_insn(to_spill, spillinfo->spill);
+ } else {
+ DB((dbg, LEVEL_1, "re-using spill %+F after %+F\n", spillinfo->spill, to_spill));
+ }
}
+static
+void spill_node(spill_env_t *env, spill_info_t *spillinfo);
+
/**
* If the first usage of a Phi result would be out of memory
* there is no sense in allocating a register for it.
* @param senv the spill environment
* @param phi the Phi node that should be spilled
* @param ctx_irn an user of the spilled node
- *
- * @return a be_Spill node
*/
-static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, unsigned visited_nr, set *already_visited_phis) {
- int i, n = get_irn_arity(phi);
- ir_graph *irg = senv->chordal_env->irg;
- ir_node *bl = get_nodes_block(phi);
- ir_node **ins, *phi_spill;
- phi_spill_assoc_t key;
- spill_ctx_t *ctx;
+static
+void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
+{
+ ir_node *phi = spillinfo->to_spill;
+ int i;
+ int arity = get_irn_arity(phi);
+ ir_node *block = get_nodes_block(phi);
+ ir_node **ins;
assert(is_Phi(phi));
- DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
+ DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
/* build a new PhiM */
- NEW_ARR_A(ir_node *, ins, n);
- for (i = 0; i < n; ++i) {
- ins[i] = new_r_Bad(irg);
+ ins = alloca(sizeof(ir_node*) * arity);
+ for(i = 0; i < arity; ++i) {
+ ins[i] = get_irg_bad(env->irg);
}
- phi_spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M);
- key.phi = phi;
- key.spill = phi_spill;
- set_insert(already_visited_phis, &key, sizeof(key), HASH_PTR(phi));
-
- /* search an existing spill for this context */
- ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
-
- /* if not found spill the phi */
- if (! ctx->spill) {
- set_irn_visited(phi, visited_nr);
-
- /* collect all arguments of the phi */
- for (i = 0; i < n; ++i) {
- ir_node *arg = get_irn_n(phi, i);
- ir_node *sub_res;
- phi_spill_assoc_t *entry;
-
- if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) {
- if (get_irn_visited(arg) < visited_nr)
- sub_res = be_spill_phi(senv, arg, ctx_irn, visited_nr, already_visited_phis);
- else {
- /* we already visited the argument phi: get it's spill */
- key.phi = arg;
- key.spill = NULL;
- entry = set_find(already_visited_phis, &key, sizeof(key), HASH_PTR(arg));
- assert(entry && "argument phi already visited, but no spill found?!?");
- sub_res = entry->spill;
- assert(sub_res && "spill missing?!?");
- }
- }
- else
- sub_res = be_spill_irn(senv, arg, ctx_irn);
+ spillinfo->spill = new_r_Phi(env->irg, block, arity, ins, mode_M);
+#ifdef FIRM_STATISTICS
+ env->spilled_phi_count++;
+#endif
- set_irn_n(phi_spill, i, sub_res);
- }
+ for(i = 0; i < arity; ++i) {
+ ir_node *arg = get_irn_n(phi, i);
+ spill_info_t *arg_info = get_spillinfo(env, arg);
- ctx->spill = phi_spill;
+ spill_node(env, arg_info);
+
+ set_irn_n(spillinfo->spill, i, arg_info->spill);
+ }
+ DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi, spillinfo->spill));
+
+ /* rewire reloads from old_spill to phi */
+ if (spillinfo->old_spill != NULL) {
+ const ir_edge_t *edge, *next;
+ ir_node *old_spill = spillinfo->old_spill;
+
+ DBG((dbg, LEVEL_1, "old spill found, rewiring reloads:\n"));
+
+ foreach_out_edge_safe(old_spill, edge, next) {
+ ir_node *reload = get_edge_src_irn(edge);
+ int pos = get_edge_src_pos(edge);
+
+ DBG((dbg, LEVEL_1, "\tset input %d of %+F to %+F\n", pos, reload, spillinfo->spill));
+
+ assert(be_is_Reload(reload) || is_Phi(reload));
+ set_irn_n(reload, pos, spillinfo->spill);
+ }
+ DBG((dbg, LEVEL_1, "\tset input of %+F to BAD\n", old_spill));
+ set_irn_n(old_spill, be_pos_Spill_val, new_Bad());
+ /* sched_remove(old_spill); */
+ spillinfo->old_spill = NULL;
}
- return ctx->spill;
}
/**
*
* @param senv the spill environment
* @param to_spill the node that should be spilled
- *
- * @return a be_Spill node
*/
-static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill, unsigned visited_nr) {
- ir_graph *irg = get_irn_irg(to_spill);
- int save_optimize = get_optimize();
- int save_normalize = get_opt_normalize();
- set *already_visited_phis = new_set(cmp_phi_spill_assoc, 10);
- ir_node *res;
-
- /*
- * Disable optimization so that the phi functions do not
- * disappear.
- */
- set_optimize(0);
- set_opt_normalize(0);
-
- if (pset_find_ptr(senv->mem_phis, to_spill))
- res = be_spill_phi(senv, to_spill, to_spill, visited_nr, already_visited_phis);
- else
- res = be_spill_irn(senv, to_spill, to_spill);
+static
+void spill_node(spill_env_t *env, spill_info_t *spillinfo)
+{
+ ir_node *to_spill;
- del_set(already_visited_phis);
+ /* the node should be tagged for spilling already... */
+ if(spillinfo->spill != NULL)
+ return;
- /* reset the optimizations */
- set_optimize(save_optimize);
- set_opt_normalize(save_normalize);
+ to_spill = spillinfo->to_spill;
- return res;
+ if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
+ spill_phi(env, spillinfo);
+ } else {
+ spill_irn(env, spillinfo);
+ }
}
-#ifdef REMAT
-
-#ifdef BUGGY_REMAT
-
-/**
- * Check if a spilled node could be rematerialized.
+/*
+ *
+ * ____ _ _ _ _
+ * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______
+ * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \
+ * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/
+ * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___|
*
- * @param senv the spill environment
- * @param spill the Spill node
- * @param spilled the node that was spilled
- * @param reloader a irn that requires a reload
*/
-static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
- int pos, max;
- /* check for 'normal' spill and general remat condition */
- if (!be_is_Spill(spill) || !arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable))
- return 0;
+/**
+ * Tests whether value @p arg is available before node @p reloader
+ * @returns 1 if value is available, 0 otherwise
+ */
+static
+int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader)
+{
+ if(is_Unknown(arg) || arg == new_NoMem())
+ return 1;
- /* check availability of original arguments */
- if (is_Block(reloader)) {
+ if(be_is_Spill(arg))
+ return 1;
- /* we want to remat at the end of a block.
- * thus all arguments must be alive at the end of the block
- */
- for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
- ir_node *arg = get_irn_n(spilled, pos);
- if (!is_live_end(reloader, arg))
- return 0;
- }
+ if(arg == get_irg_frame(env->irg))
+ return 1;
- } else {
+ /* hack for now (happens when command should be inserted at end of block) */
+ if(is_Block(reloader)) {
+ return 0;
+ }
- /* we want to remat before the insn reloader
- * thus an arguments is alive if
- * - it interferes with the reloaders result
- * or
- * - or it is (last-) used by reloader itself
- */
- for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
- ir_node *arg = get_irn_n(spilled, pos);
- int i, m;
+ /*
+ * Ignore registers are always available
+ */
+ if(arch_irn_is(env->arch_env, arg, ignore)) {
+ return 1;
+ }
- if (values_interfere(reloader, arg))
- goto is_alive;
+ /* the following test does not work while spilling,
+ * because the liveness info is not adapted yet to the effects of the
+ * additional spills/reloads.
+ */
+#if 0
+ /* we want to remat before the insn reloader
+ * thus an arguments is alive if
+ * - it interferes with the reloaders result
+ * - or it is (last-) used by reloader itself
+ */
+ if (values_interfere(env->birg->lv, reloader, arg)) {
+ return 1;
+ }
- for (i=0, m=get_irn_arity(reloader); i<m; ++i) {
- ir_node *rel_arg = get_irn_n(reloader, i);
- if (rel_arg == arg)
- goto is_alive;
- }
+ arity = get_irn_arity(reloader);
+ for (i = 0; i < arity; ++i) {
+ ir_node *rel_arg = get_irn_n(reloader, i);
+ if (rel_arg == arg)
+ return 1;
+ }
+#endif
- /* arg is not alive before reloader */
- return 0;
+ return 0;
+}
-is_alive: ;
+/**
+ * Checks whether the node can principally be rematerialized
+ */
+static
+int is_remat_node(spill_env_t *env, ir_node *node)
+{
+ const arch_env_t *arch_env = env->arch_env;
- }
+ assert(!be_is_Spill(node));
- }
+ if(arch_irn_is(arch_env, node, rematerializable))
+ return 1;
- return 1;
+ return 0;
}
-#else /* BUGGY_REMAT */
-
/**
- * A very simple rematerialization checker.
+ * Check if a node is rematerializable. This tests for the following conditions:
*
- * @param senv the spill environment
- * @param spill the Spill node
- * @param spilled the node that was spilled
- * @param reloader a irn that requires a reload
+ * - The node itself is rematerializable
+ * - All arguments of the node are available or also rematerialisable
+ * - The costs for the rematerialisation operation is less or equal a limit
+ *
+ * Returns the costs needed for rematerialisation or something
+ * >= REMAT_COST_INFINITE if remat is not possible.
*/
-static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
- const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
+static
+int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled,
+ ir_node *reloader, int parentcosts)
+{
+ int i, arity;
+ int argremats;
+ int costs = 0;
+
+ if(!is_remat_node(env, spilled))
+ return REMAT_COST_INFINITE;
+
+ if(be_is_Reload(spilled)) {
+ costs += 2;
+ } else {
+ costs += arch_get_op_estimated_cost(env->arch_env, spilled);
+ }
+ if(parentcosts + costs >= env->reload_cost + env->spill_cost) {
+ return REMAT_COST_INFINITE;
+ }
- return get_irn_arity(spilled) == 0 &&
- be_is_Spill(spill) &&
- arch_irn_is(aenv, spilled, rematerializable);
-}
+ argremats = 0;
+ for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
+ ir_node *arg = get_irn_n(spilled, i);
+
+ if(is_value_available(env, arg, reloader))
+ continue;
-#endif /* BUGGY_REMAT */
+ /* we have to rematerialize the argument as well */
+ if(argremats >= 1) {
+ /* we only support rematerializing 1 argument at the moment,
+ * so that we don't have to care about register pressure
+ */
+ return REMAT_COST_INFINITE;
+ }
+ argremats++;
+
+ costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs);
+ if(parentcosts + costs >= env->reload_cost + env->spill_cost)
+ return REMAT_COST_INFINITE;
+ }
-#endif /* REMAT */
+ return costs;
+}
/**
* Re-materialize a node.
* @param spilled the node that was spilled
* @param reloader a irn that requires a reload
*/
-static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
+static
+ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
+{
+ int i, arity;
ir_node *res;
- ir_node *bl = (is_Block(reloader)) ? reloader : get_nodes_block(reloader);
-
- /* recompute the value */
- res = new_ir_node(get_irn_dbg_info(spilled), senv->chordal_env->irg, bl,
- get_irn_op(spilled),
- get_irn_mode(spilled),
- get_irn_arity(spilled),
- get_irn_in(spilled) + 1);
- copy_node_attr(spilled, res);
-
- DBG((senv->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader));
+ ir_node *bl;
+ ir_node **ins;
- /* insert in schedule */
- if (is_Block(reloader)) {
- ir_node *insert = sched_skip(reloader, 0, sched_skip_cf_predicator, (void *) senv->chordal_env->birg->main_env->arch_env);
- sched_add_after(insert, res);
+ if(is_Block(reloader)) {
+ bl = reloader;
} else {
- sched_add_before(reloader, res);
- }
-
- return res;
-}
-
-/**
- * Walker: fills the mem_phis set by evaluating Phi nodes
- * using the is_spilled_phi() callback.
- */
-static void phi_walker(ir_node *irn, void *env) {
- spill_env_t *senv = env;
-
- if (is_Phi(irn)) {
- const arch_env_t *arch = senv->chordal_env->birg->main_env->arch_env;
- if (arch_irn_has_reg_class(arch, irn, 0, senv->cls) &&
- senv->is_spilled_phi(irn, senv->data)) {
- DBG((senv->dbg, LEVEL_1, " %+F\n", irn));
- pset_insert_ptr(senv->mem_phis, irn);
- }
+ bl = get_nodes_block(reloader);
}
-}
-void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
- const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
- ir_graph *irg = senv->chordal_env->irg;
- unsigned visited_nr;
- ir_node *irn;
- spill_info_t *si;
-
- /* get all special spilled phis */
- DBG((senv->dbg, LEVEL_1, "Mem-phis:\n"));
- senv->mem_phis = pset_new_ptr_default();
- irg_walk_graph(senv->chordal_env->irg, phi_walker, NULL, senv);
-
- /* Add reloads for mem_phis */
- /* BETTER: These reloads (1) should only be inserted, if they are really needed */
- DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
- for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
- const ir_edge_t *e;
- DBG((senv->dbg, LEVEL_1, " Mem-phi %+F\n", irn));
- foreach_out_edge(irn, e) {
- ir_node *user = e->src;
- if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) {
- ir_node *use_bl = get_nodes_block(user);
- DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
- be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */
- }
+ ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
+ for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
+ ir_node *arg = get_irn_n(spilled, i);
+
+ if(is_value_available(env, arg, reloader)) {
+ ins[i] = arg;
+ } else {
+ ins[i] = do_remat(env, arg, reloader);
+#ifdef FIRM_STATISTICS
+ /* don't count the recursive call as remat */
+ env->remat_count--;
+#endif
}
}
- visited_nr = get_irg_visited(irg) + 1;
- set_irg_visited(irg, visited_nr);
-
- /* process each spilled node */
- DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
- for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
- reloader_t *rld;
- ir_mode *mode = get_irn_mode(si->spilled_node);
- //ir_node *value;
- pset *values = pset_new_ptr(16);
-
- /* go through all reloads for this spill */
- for(rld = si->reloaders; rld; rld = rld->next) {
- ir_node *new_val;
+ /* create a copy of the node */
+ res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
+ get_irn_op(spilled), get_irn_mode(spilled),
+ get_irn_arity(spilled), ins);
+ copy_node_attr(spilled, res);
+ new_backedge_info(res);
+ sched_reset(res);
- /* the spill for this reloader */
- ir_node *spill = be_spill_node(senv, si->spilled_node, visited_nr);
+ DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
-#ifdef REMAT
- if (check_remat_conditions(senv, spill, si->spilled_node, rld->reloader)) {
- new_val = do_remat(senv, si->spilled_node, rld->reloader);
- //pdeq_putl(possibly_dead, spill);
- }
- else
+ /* insert in schedule */
+ sched_add_before(reloader, res);
+#ifdef FIRM_STATISTICS
+ env->remat_count++;
#endif
- /* do a reload */
- new_val = be_reload(aenv, senv->cls, rld->reloader, mode, spill);
- DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader));
- pset_insert_ptr(values, new_val);
- if(reload_set)
- pset_insert_ptr(reload_set, new_val);
- }
-
- /* introduce copies, rewire the uses */
- assert(pset_count(values) > 0 && "???");
- pset_insert_ptr(values, si->spilled_node);
- be_ssa_constr_set_ignore(senv->chordal_env->dom_front, values, senv->mem_phis);
-
- del_pset(values);
- }
-
- del_pset(senv->mem_phis);
+ return res;
+}
- be_remove_dead_nodes_from_schedule(senv->chordal_env->irg);
+double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *after)
+{
+ ir_node *block = get_nodes_block(after);
+ double freq = get_block_execfreq(env->exec_freq, block);
- // reloads are placed now, but we might reuse the spill environment for further spilling decisions
- del_set(senv->spills);
- senv->spills = new_set(cmp_spillinfo, 1024);
+ return env->spill_cost * freq;
}
-void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
- spill_info_t templ, *res;
- reloader_t *rel;
+double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
+{
+ ir_node *block = get_nodes_block(before);
+ double freq = get_block_execfreq(env->exec_freq, block);
- templ.spilled_node = to_spill;
- templ.reloaders = NULL;
- res = set_insert(senv->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
+ if(be_do_remats) {
+ /* is the node rematerializable? */
+ int costs = check_remat_conditions_costs(env, to_spill, before, 0);
+ if(costs < env->reload_cost)
+ return costs * freq;
+ }
- rel = obstack_alloc(&senv->obst, sizeof(rel[0]));
- rel->reloader = before;
- rel->next = res->reloaders;
- res->reloaders = rel;
+ return env->reload_cost * freq;
}
-void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos) {
- ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos);
- be_add_reload(senv, to_spill, insert_bl);
+double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
+ ir_node *block, int pos)
+{
+ ir_node *before = get_block_insertion_point(block, pos);
+ return be_get_reload_costs(env, to_spill, before);
}
+/*
+ * ___ _ ____ _ _
+ * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
+ * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
+ * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \
+ * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
+ *
+ */
+void be_insert_spills_reloads(spill_env_t *env)
+{
+ const arch_env_t *arch_env = env->arch_env;
+ const ir_exec_freq *exec_freq = env->exec_freq;
+ spill_info_t *si;
+ ir_nodeset_iterator_t iter;
+ ir_node *node;
+
+ /* create all phi-ms first, this is needed so, that phis, hanging on
+ spilled phis work correctly */
+ foreach_ir_nodeset(&env->mem_phis, node, iter) {
+ spill_info_t *info = get_spillinfo(env, node);
+ spill_node(env, info);
+ }
-/****************************************
-
- SPILL SLOT MANAGEMENT AND OPTS
-
-****************************************/
-
-typedef struct _spill_slot_t {
- unsigned size;
- unsigned align;
- pset *members;
- ir_mode *largest_mode; /* the mode of all members with largest size */
-} spill_slot_t;
-
-typedef struct _ss_env_t {
- struct obstack ob;
- be_chordal_env_t *cenv;
- pmap *slots; /* maps spill_contexts to spill_slots */
- pmap *types; /* maps modes to types */
- DEBUG_ONLY(firm_dbg_module_t *dbg;)
-} ss_env_t;
-
-
-/**
- * Walker: compute the spill slots
- */
-static void compute_spill_slots_walker(ir_node *spill, void *env) {
- ss_env_t *ssenv = env;
- ir_node *ctx;
- pmap_entry *entry;
- spill_slot_t *ss;
+ /* process each spilled node */
+ for (si = set_first(env->spills); si; si = set_next(env->spills)) {
+ reloader_t *rld;
+ ir_node *to_spill = si->to_spill;
+ ir_mode *mode = get_irn_mode(to_spill);
+ ir_node **copies = NEW_ARR_F(ir_node*, 0);
+ double all_remat_costs = 0; /** costs when we would remat all nodes */
+ int force_remat = 0;
+
+ DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
+
+ /* determine possibility of rematerialisations */
+ if(be_do_remats) {
+ for (rld = si->reloaders; rld != NULL; rld = rld->next) {
+ double freq;
+ int remat_cost;
+ int remat_cost_delta;
+ ir_node *block;
+ ir_node *reloader = rld->reloader;
+
+ if(rld->rematted_node != NULL) {
+ DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
+ rld->rematted_node, reloader));
+ continue;
+ }
+ if(rld->remat_cost_delta >= REMAT_COST_INFINITE) {
+ DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
+ reloader));
+ all_remat_costs = REMAT_COST_INFINITE;
+ continue;
+ }
- if (!be_is_Spill(spill))
- return;
+ remat_cost = check_remat_conditions_costs(env, to_spill,
+ reloader, 0);
+ if(remat_cost >= REMAT_COST_INFINITE) {
+ DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
+ reloader));
+ rld->remat_cost_delta = REMAT_COST_INFINITE;
+ all_remat_costs = REMAT_COST_INFINITE;
+ continue;
+ }
- /* check, if this spill is for a context already known */
- ctx = be_get_Spill_context(spill);
- entry = pmap_find(ssenv->slots, ctx);
-
- if (!entry) {
- struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
- const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
- ir_mode *largest_mode = arch_register_class_mode(cls);
-
- /* this is a new spill context */
- ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
- ss->members = pset_new_ptr(8);
- ss->largest_mode = largest_mode;
- ss->size = get_mode_size_bytes(ss->largest_mode);
- ss->align = arch_isa_get_reg_class_alignment(arch_env->isa, cls);
- pmap_insert(ssenv->slots, ctx, ss);
- } else {
- /* values with the same spill_ctx must go into the same spill slot */
- ss = entry->value;
+ remat_cost_delta = remat_cost - env->reload_cost;
+ rld->remat_cost_delta = remat_cost_delta;
+ block = get_nodes_block(reloader);
+ freq = get_block_execfreq(exec_freq, block);
+ all_remat_costs += remat_cost_delta * freq;
+ DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
+ "%d (rel %f)\n", reloader, remat_cost_delta,
+ remat_cost_delta * freq));
+ }
+ if(all_remat_costs < REMAT_COST_INFINITE) {
+ ir_node *block = get_nodes_block(to_spill);
+ double freq = get_block_execfreq(exec_freq, block);
+ /* we don't need the costs for the spill if we can remat
+ all reloaders */
+ all_remat_costs -= env->spill_cost * freq;
+
+ DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
+ env->spill_cost, env->spill_cost * freq));
+ }
-#ifndef NDEBUG
- /* ugly mega assert :-) */
- {
- ir_node *irn;
- struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
- const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
- int size = get_mode_size_bytes(arch_register_class_mode(cls));
- assert((int) ss->size == size && "Different sizes for the same spill slot are not allowed.");
- for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
- /* use values_interfere here, because it uses the dominance check,
- which does work for values in memory */
- assert(!values_interfere(spill, irn) && "Spills for the same spill slot must not interfere!");
+ if(all_remat_costs < 0) {
+ DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
+ all_remat_costs));
+ force_remat = 1;
}
}
-#endif /* NDEBUG */
- }
-
- pset_insert_ptr(ss->members, spill);
-}
-/**
- * qsort compare function, sort spill slots by size.
- */
-static int ss_sorter(const void *v1, const void *v2) {
- const spill_slot_t **ss1 = (const spill_slot_t **)v1;
- const spill_slot_t **ss2 = (const spill_slot_t **)v2;
- return ((int) (*ss2)->size) - ((int) (*ss1)->size);
-}
+ /* go through all reloads for this spill */
+ for (rld = si->reloaders; rld != NULL; rld = rld->next) {
+ ir_node *copy; /* a reload is a "copy" of the original value */
+
+ if (rld->rematted_node != NULL) {
+ copy = rld->rematted_node;
+ sched_add_before(rld->reloader, copy);
+ } else if (be_do_remats &&
+ (force_remat || rld->remat_cost_delta < 0)) {
+ copy = do_remat(env, to_spill, rld->reloader);
+ } else {
+ /* make sure we have a spill */
+ if (si->spill == NULL) {
+ spill_node(env, si);
+ }
+ /* create a reload */
+ copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode,
+ si->spill);
+#ifdef FIRM_STATISTICS
+ env->reload_count++;
+#endif
+ }
-/**
- * This function should optimize the spill slots.
- * - Coalescing of multiple slots
- * - Ordering the slots
- *
- * Input slots are in @p ssenv->slots
- * @p size The count of initial spill slots in @p ssenv->slots
- * This also is the size of the preallocated array @p ass
- *
- * @return An array of spill slots @p ass in specific order
- **/
-static void optimize_slots(ss_env_t *ssenv, int size, spill_slot_t *ass[]) {
- int i, o, used_slots;
- pmap_entry *entr;
-
- i=0;
- pmap_foreach(ssenv->slots, entr)
- ass[i++] = entr->value;
-
- /* Sort the array to minimize fragmentation and cache footprint.
- Large slots come first */
- qsort(ass, size, sizeof(ass[0]), ss_sorter);
-
- /* For each spill slot:
- - assign a new offset to this slot
- - xor find another slot to coalesce with */
- used_slots = 0;
- for (i=0; i<size; ++i) { /* for each spill slot */
- ir_node *n1;
- int tgt_slot = -1;
-
- DBG((ssenv->dbg, LEVEL_1, "Spill slot %d members:\n", i));
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1));
-
-
- for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */
- /* check inter-slot-pairs for interference */
- ir_node *n2;
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members))
- if(values_interfere(n1, n2)) {
- pset_break(ass[i]->members);
- pset_break(ass[o]->members);
- DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2));
- goto interf_detected;
- }
-
- /* if we are here, there is no interference between ass[i] and ass[o] */
- tgt_slot = o;
-
-interf_detected: /*nothing*/ ;
+ DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
+ copy, to_spill, rld->reloader));
+ ARR_APP1(ir_node*, copies, copy);
}
- /* now the members of ass[i] join the members of ass[tgt_slot] */
-
- /* do we need a new slot? */
- if (tgt_slot == -1) {
- tgt_slot = used_slots;
- used_slots++;
-
- /* init slot */
- if (tgt_slot != i) {
- ass[tgt_slot]->size = ass[i]->size;
- del_pset(ass[tgt_slot]->members);
- ass[tgt_slot]->members = pset_new_ptr(8);
+ /* if we had any reloads or remats, then we need to reconstruct the
+ * SSA form for the spilled value */
+ if (ARR_LEN(copies) > 0) {
+ be_ssa_construction_env_t senv;
+ /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
+
+ be_ssa_construction_init(&senv, env->birg);
+ be_ssa_construction_add_copy(&senv, to_spill);
+ be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
+ be_ssa_construction_fix_users(&senv, to_spill);
+
+#if 0
+ /* no need to enable this as long as we invalidate liveness
+ after this function... */
+ be_ssa_construction_update_liveness_phis(&senv);
+ be_liveness_update(to_spill);
+ len = ARR_LEN(copies);
+ for(i = 0; i < len; ++i) {
+ be_liveness_update(lv, copies[i]);
}
+#endif
+ be_ssa_construction_destroy(&senv);
}
- /* copy the members to the target pset */
- /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */
- if (tgt_slot != i)
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- pset_insert_ptr(ass[tgt_slot]->members, n1);
+ DEL_ARR_F(copies);
+ si->reloaders = NULL;
}
-}
-#define ALIGN_SPILL_AREA 16
-#define pset_foreach(pset, elm) for(elm=pset_first(pset); elm; elm=pset_next(pset))
-
-/**
- * Returns a spill type for a mode. Keep them in a map to reduce
- * the number of types.
- *
- * @param types a map containing all created types
- * @param ss the spill slot
- *
- * Note that type types should are identical for every mode.
- * This rule might break if two different register classes return the same
- * mode but different alignments.
- */
-static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) {
- pmap_entry *e = pmap_find(types, ss->largest_mode);
- ir_type *res;
-
- if (! e) {
- char buf[64];
- snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode));
- res = new_type_primitive(new_id_from_str(buf), ss->largest_mode);
- set_type_alignment_bytes(res, ss->align);
- pmap_insert(types, ss->largest_mode, res);
- }
- else {
- res = e->value;
- assert(get_type_alignment_bytes(res) == (int)ss->align);
- }
- return res;
-}
-
-/**
- * Create spill slot entities on the frame type.
- *
- * @param ssenv the spill environment
- * @param n number of spill slots
- * @param ss array of spill slots
- */
-static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) {
- int i, offset, frame_align;
- ir_type *frame = get_irg_frame_type(ssenv->cenv->irg);
-
- /* aligning by increasing frame size */
- offset = get_type_size_bits(frame) / 8;
- offset = round_up2(offset, ALIGN_SPILL_AREA);
- set_type_size_bytes(frame, -1);
-
- /* create entities and assign offsets according to size and alignment*/
- for (i = 0; i < n_slots; ++i) {
- char buf[64];
- ident *name;
- entity *spill_ent;
- ir_node *irn;
-
- /* build entity */
- snprintf(buf, sizeof(buf), "spill_slot_%d", i);
- name = new_id_from_str(buf);
-
- spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]));
-
- /* align */
- offset = round_up2(offset, ss[i]->align);
- /* set */
- set_entity_offset_bytes(spill_ent, offset);
- /* next possible offset */
- offset += round_up2(ss[i]->size, ss[i]->align);
-
- pset_foreach(ss[i]->members, irn)
- be_set_Spill_entity(irn, spill_ent);
+#ifdef FIRM_STATISTICS
+ if (be_stat_ev_is_active()) {
+ be_stat_ev("spill_spills", env->spill_count);
+ be_stat_ev("spill_reloads", env->reload_count);
+ be_stat_ev("spill_remats", env->remat_count);
+ be_stat_ev("spill_spilled_phis", env->spilled_phi_count);
}
+#endif
- /* set final size of stack frame */
- frame_align = get_type_alignment_bytes(frame);
- set_type_size_bytes(frame, round_up2(offset, frame_align));
+ be_remove_dead_nodes_from_schedule(env->irg);
+ /* Matze: In theory be_ssa_construction should take care of the liveness...
+ * try to disable this again in the future */
+ be_invalidate_liveness(env->birg);
}
-void be_compute_spill_offsets(be_chordal_env_t *cenv) {
- ss_env_t ssenv;
- spill_slot_t **ss;
- int ss_size;
- pmap_entry *pme;
-
- obstack_init(&ssenv.ob);
- ssenv.cenv = cenv;
- ssenv.slots = pmap_create();
- ssenv.types = pmap_create();
- FIRM_DBG_REGISTER(ssenv.dbg, "ir.be.spillslots");
-
- /* Get initial spill slots */
- irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
-
- /* Build an empty array for optimized spill slots */
- ss_size = pmap_count(ssenv.slots);
- ss = obstack_alloc(&ssenv.ob, ss_size * sizeof(*ss));
- optimize_slots(&ssenv, ss_size, ss);
-
- /* Integrate slots into the stack frame entity */
- assign_entities(&ssenv, ss_size, ss);
-
- /* Clean up */
- pmap_foreach(ssenv.slots, pme)
- del_pset(((spill_slot_t *)pme->value)->members);
- pmap_destroy(ssenv.slots);
- pmap_destroy(ssenv.types);
- obstack_free(&ssenv.ob, NULL);
-
- be_copy_entities_to_reloads(cenv->irg);
+void be_init_spill(void)
+{
+ FIRM_DBG_REGISTER(dbg, "firm.be.spill");
}
+
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill);