}
}
+int arch_count_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls)
+{
+ int i;
+ int result = 0;
+
+ for(i = 0; i < cls->n_regs; ++i) {
+ if(!arch_register_type_is(&cls->regs[i], ignore))
+ result++;
+ }
+
+ return result;
+}
+
int arch_is_register_operand(const arch_env_t *env,
const ir_node *irn, int pos)
{
*/
extern void arch_put_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls, bitset_t *bs);
+/**
+ * Return the number of registers in a register class which should not be
+ * ignored by the register allocator.
+ * @param env The architecture environment.
+ * @param cls The register class to consider
+ * @return The number of non-ignore registers in the register class
+ */
+extern int arch_count_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls);
+
/**
* Check, if a register is assignable to an operand of a node.
* @param env The architecture environment.
#include "beifg_impl.h"
#include "bespillbelady.h"
+#include "bespillmorgan.h"
#include "belower.h"
#ifdef WITH_ILP
#include "becopystat.h"
#include "becopyopt.h"
#include "bessadestr.h"
+#include "beverify.h"
void be_ra_chordal_check(be_chordal_env_t *chordal_env) {
obstack_free(&ob, NULL);
}
-static void check_pressure_walker(ir_node *bl, void *data)
-{
- be_chordal_env_t *env = data;
- int n_regs = arch_register_class_n_regs(env->cls);
- bitset_t *live = bitset_irg_malloc(env->irg);
- int step = 0;
- ir_node *irn;
- bitset_pos_t elm;
- irn_live_t *li;
- DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
-
- live_foreach(bl, li)
- if(live_is_end(li) && chordal_has_class(env, li->irn))
- bitset_add_irn(live, li->irn);
-
- DBG((dbg, LEVEL_1, "end set for %+F\n", bl));
- bitset_foreach_irn(env->irg, live, elm, irn)
- DBG((dbg, LEVEL_1, "\t%+F\n", irn));
-
- sched_foreach_reverse(bl, irn) {
- int pressure = bitset_popcnt(live);
- int idx = get_irn_idx(irn);
- int i, n;
-
- DBG((dbg, LEVEL_1, "%+10F@%+10F: pressure %d\n", bl, irn, pressure));
-
- if(pressure > n_regs) {
- ir_node *x;
- ir_printf("%+10F@%+10F: pressure to high: %d\n", bl, irn, pressure);
- bitset_foreach_irn(env->irg, live, elm, x)
- ir_fprintf(stderr, "\t%+10F\n", x);
- }
-
- if(chordal_has_class(env, irn)) {
- if(!bitset_is_set(live, idx))
- ir_fprintf(stderr, "%+F is defined but was not live\n", irn);
- bitset_remv_irn(live, irn);
- }
-
- for(i = 0, n = get_irn_arity(irn); i < n; i++) {
- ir_node *op = get_irn_n(irn, i);
- if(chordal_has_class(env, op) && !is_Phi(irn))
- bitset_add_irn(live, op);
- }
- step++;
- }
-}
-
-void be_check_pressure(const be_chordal_env_t *env)
-{
- irg_block_walk_graph(env->irg, check_pressure_walker, NULL, (void *) env);
-}
-
int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
{
if(env->ifg)
static be_ra_chordal_opts_t options = {
BE_CH_DUMP_NONE,
- BE_CH_SPILL_BELADY,
+ BE_CH_SPILL_MORGAN,
BE_CH_COPYMIN_HEUR1,
BE_CH_IFG_STD,
BE_CH_LOWER_PERM_SWAP,
#ifdef WITH_LIBCORE
static const lc_opt_enum_int_items_t spill_items[] = {
+ { "morgan", BE_CH_SPILL_MORGAN },
{ "belady", BE_CH_SPILL_BELADY },
#ifdef WITH_ILP
{ "ilp", BE_CH_SPILL_ILP },
/* spilling */
switch(options.spill_method) {
+ case BE_CH_SPILL_MORGAN:
+ be_spill_morgan(&chordal_env);
+ break;
case BE_CH_SPILL_BELADY:
be_spill_belady(&chordal_env);
break;
}
dump(BE_CH_DUMP_SPILL, irg, chordal_env.cls, "-spill", dump_ir_block_graph_sched);
be_abi_fix_stack_nodes(bi->abi);
- be_liveness(irg);
- be_check_pressure(&chordal_env);
+
+ DEBUG_ONLY(be_verify_schedule(irg);)
+ DEBUG_ONLY(be_verify_register_pressure(chordal_env.birg->main_env->arch_env, chordal_env.cls, irg);)
/* Color the graph. */
+ be_liveness(irg);
be_ra_chordal_color(&chordal_env);
dump(BE_CH_DUMP_CONSTR, irg, chordal_env.cls, "-color", dump_ir_block_graph_sched);
enum {
/* spill method */
- BE_CH_SPILL_BELADY = 1,
+ BE_CH_SPILL_BELADY = 3,
BE_CH_SPILL_ILP = 2,
+ BE_CH_SPILL_MORGAN = 1,
/* Dump flags */
BE_CH_DUMP_NONE = (1 << 0),
return insn;
}
-be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, be_irg_t *birg, const arch_register_class_t *cls, struct obstack *obst)
+be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, const be_irg_t *birg, const arch_register_class_t *cls, struct obstack *obst)
{
ie->aenv = birg->main_env->arch_env;
ie->cls = cls;
ie->obst = obst;
+ ie->ignore_colors = bitset_obstack_alloc(obst, cls->n_regs);
be_abi_put_ignore_regs(birg->abi, cls, ie->ignore_colors);
return ie;
}
be_insn_t *be_scan_insn(const be_insn_env_t *env, ir_node *irn);
-be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, be_irg_t *birg, const arch_register_class_t *cls, struct obstack *obst);
+be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, const be_irg_t *birg, const arch_register_class_t *cls, struct obstack *obst);
#endif /* _BEINSN_T_H */
DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
)
+ /* You should better break out of your loop when hitting the first phi function. */
+ assert(!is_Phi(irn) && "liveness_transfer produces invalid results for phi nodes");
+
if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
ir_node *del = pset_remove_ptr(live, irn);
assert(irn == del);
#include "belower.h"
#include "beschedmris.h"
#include "bestat.h"
+#include "beverify.h"
#define DUMP_INITIAL (1 << 0)
#define DUMP_ABI (1 << 1)
/* instruction set architectures. */
static const lc_opt_enum_const_ptr_items_t isa_items[] = {
- { "firm", &firm_isa },
{ "ia32", &ia32_isa_if },
#if 0
{ "arm", &arm_isa_if },
list_sched(&birg, be_disable_mris);
dump(DUMP_SCHED, irg, "-sched", dump_ir_block_graph_sched);
+ DEBUG_ONLY(be_verify_schedule(birg.irg);)
+
be_do_stat_nodes(irg, "04 Schedule");
/* add Keeps for should_be_different constrained nodes */
arch_code_generator_after_ra(birg.cg);
be_abi_fix_stack_bias(birg.abi);
+ DEBUG_ONLY(be_verify_schedule(birg.irg);)
+
arch_code_generator_done(birg.cg);
dump(DUMP_FINAL, irg, "-end", dump_ir_extblock_graph_sched);
be_abi_free(birg.abi);
return res;
}
-ir_node *(be_get_Reload_mem)(const ir_node *irn)
+ir_node *be_get_Reload_mem(const ir_node *irn)
{
assert(be_is_Reload(irn));
return get_irn_n(irn, be_pos_Reload_mem);
}
+ir_node *be_get_Reload_frame(const ir_node *irn)
+{
+ assert(be_is_Reload(irn));
+ return get_irn_n(irn, be_pos_Reload_frame);
+}
+
ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
{
int i;
ir_node *be_get_Spill_context(const ir_node *irn);
+
+ir_node* be_get_Reload_mem(const ir_node *irn);
+ir_node* be_get_Reload_frame(const ir_node* irn);
+
/**
* Set the entities of a Reload to the ones of the Spill it is pointing to.
* @param irg The graph.
*/
static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
spill_ctx_t *ctx;
+ const be_main_env_t *env = senv->chordal_env->birg->main_env;
DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
+ // Has the value already been spilled?
ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
- if(!ctx->spill) {
- const be_main_env_t *env = senv->chordal_env->birg->main_env;
- ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
+ if(ctx->spill)
+ return ctx->spill;
+
+ /* Trying to spill an already spilled value, no need for a new spill
+ * node then, we can simply connect to the same one for this reload
+ */
+ if(be_is_Reload(irn)) {
+ return get_irn_n(irn, be_pos_Reload_mem);
}
+ ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
return ctx->spill;
}
* Spill a node.
*
* @param senv the spill environment
- * @param irn the node that should be spilled
- * @param ctx_irn an user of the spilled node
+ * @param to_spill the node that should be spilled
*
* @return a be_Spill node
*/
DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
possibly_dead = new_pdeq();
for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
+ int i;
reloader_t *rld;
ir_mode *mode = get_irn_mode(si->spilled_node);
+ ir_node *value;
pset *values = pset_new_ptr(16);
/* go through all reloads for this spill */
pset_insert_ptr(values, si->spilled_node);
be_ssa_constr_set_ignore(senv->chordal_env->dom_front, values, senv->mem_phis);
+ /* Remove reloads which are not used by anyone */
+ /* TODO: Better call a general garbage collection routine here... this here gets clunky
+ * and doesn't handle all cases (like memphis)
+ */
+ foreach_pset(values, value) {
+ if(get_irn_n_edges(value) == 0) {
+ sched_remove(value);
+ // remove the node from preds
+ if(be_is_Reload(value)) {
+ ir_node* spill = get_irn_n(value, be_pos_Reload_mem);
+ if(be_is_Spill(spill)) {
+ assert(be_is_Spill(spill));
+
+ set_irn_n(value, be_pos_Reload_mem, new_r_Bad(irg));
+ set_irn_n(value, be_pos_Reload_frame, new_r_Bad(irg));
+
+ // maybe the spill is not used anymoe too now?
+ if(get_irn_n_edges(spill) == 0) {
+ sched_remove(spill);
+ set_irn_n(spill, be_pos_Spill_val, new_r_Bad(irg));
+ set_irn_n(spill, be_pos_Spill_frame, new_r_Bad(irg));
+ }
+ } else if(is_Phi(spill)) {
+ // TODO memphi
+ } else {
+ assert(0 && "Only spill or mem-phi expected here");
+ }
+ } else if(is_Phi(value)) {
+ for(i = 0; i < get_Phi_n_preds(value); ++i)
+ set_irn_n(value, i, new_r_Bad(irg));
+ } else {
+ assert(0);
+ }
+ }
+ }
+
del_pset(values);
}
}
del_pdeq(possibly_dead);
del_pset(senv->mem_phis);
+
+ // reloads are placed now, but we might reuse the spill environment for further spilling decisions
+ del_set(senv->spills);
+ senv->spills = new_set(cmp_spillinfo, 1024);
}
void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
#define DBG_START 16
#define DBG_SLOTS 32
#define DBG_TRACE 64
+#define DBG_WORKSET 128
#define DEBUG_LVL 0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
pset *used; /**< holds the values used (so far) in the current BB */
pset *copies; /**< holds all copies placed due to phi-spilling */
- spill_env_t *senv; /* see bespill.h */
+ spill_env_t *senv; /**< see bespill.h */
pset *reloads; /**< all reload nodes placed */
} belady_env_t;
int i;
/* check for current regclass */
if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
- DBG((dbg, DBG_DECIDE, "Dropped %+F\n", val));
+ DBG((dbg, DBG_WORKSET, "Dropped %+F\n", val));
return;
}
to_insert[demand++] = val;
if (is_usage)
be_add_reload(bel->senv, val, bel->instr);
- } else
+ } else {
DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
+ }
}
DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
len = workset_get_length(ws);
max_allowed = bel->n_regs - demand;
+ DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
+
/* Only make more free room if we do not have enough */
if (len > max_allowed) {
/* get current next-use distance */
workset_remove(ws_start, irn);
DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
- } else
+ } else {
DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
+ }
}
/* kill the last 'demand' entries in the array */
DBG((dbg, DBG_START, "Living at start of %+F:\n", blk));
first = sched_first(blk);
count = 0;
- sched_foreach(blk, irn)
+ sched_foreach(blk, irn) {
if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, -1) == bel->cls) {
loc.irn = irn;
loc.time = get_distance(bel, first, 0, irn, 0);
count++;
} else
break;
+ }
- live_foreach(blk, li)
+ live_foreach(blk, li) {
if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, -1) == bel->cls) {
loc.irn = (ir_node *)li->irn;
loc.time = get_distance(bel, first, 0, li->irn, 0);
obstack_grow(&ob, &loc, sizeof(loc));
- DBG((dbg, DBG_START, " %+F:\n", irn));
+ DBG((dbg, DBG_START, " %+F:\n", li->irn));
count++;
}
+ }
starters = obstack_finish(&ob);
qsort(starters, count, sizeof(starters[0]), loc_compare);
continue;
/* check if irnb is in a register at end of pred */
- workset_foreach(wsp, irnp, iter2)
+ workset_foreach(wsp, irnp, iter2) {
if (irnb == irnp)
goto next_value;
+ }
/* irnb is in memory at the end of pred, so we have to reload it */
DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
}
void be_spill_belady(const be_chordal_env_t *chordal_env) {
+ be_spill_belady_spill_env(chordal_env, NULL);
+}
+
+void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t *spill_env) {
belady_env_t bel;
FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
bel.n_regs = arch_register_class_n_regs(bel.cls);
bel.ws = new_workset(&bel.ob, &bel);
bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls);
- bel.senv = be_new_spill_env(chordal_env, is_mem_phi, NULL);
+ if(spill_env == NULL) {
+ bel.senv = be_new_spill_env(chordal_env, is_mem_phi, NULL);
+ } else {
+ bel.senv = spill_env;
+ }
DEBUG_ONLY(be_set_spill_env_dbg_module(bel.senv, dbg);)
bel.reloads = pset_new_ptr_default();
bel.copies = pset_new_ptr_default();
/* clean up */
del_pset(bel.reloads);
- be_delete_spill_env(bel.senv);
+ if(spill_env == NULL)
+ be_delete_spill_env(bel.senv);
be_end_uses(bel.uses);
obstack_free(&bel.ob, NULL);
}
#include "bespill.h"
void be_spill_belady(const be_chordal_env_t *env);
-
+/// Same as be_spill_belady but reuses an existing spill enviornment
+void be_spill_belady_spill_env(const be_chordal_env_t *env, spill_env_t *spill_env);
#endif /*BESPILLBELADY_H_*/
--- /dev/null
+/*
+ * Author: Matthias Braun
+ * Date: 05.05.2006
+ * Copyright: (c) Universitaet Karlsruhe
+ * License: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ *
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "bespillmorgan.h"
+
+#include "bechordal.h"
+#include "bechordal_t.h"
+#include "bespill.h"
+#include "belive.h"
+#include "belive_t.h"
+#include "beinsn_t.h"
+#include "irgwalk.h"
+#include "besched.h"
+#include "beutil.h"
+#include "beuses.h"
+#include "interval_analysis.h"
+#include "irloop.h"
+#include "irloop_t.h"
+#include "irgraph.h"
+#include "irgraph_t.h"
+#include "irphase.h"
+#include "irphase_t.h"
+#include "irprintf.h"
+
+// remove me later
+#include "bespillbelady.h"
+
+#define DBG_LIVE 1
+#define DBG_PRESSURE 2
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
+
+typedef struct _morgan_env_t {
+ const arch_env_t *arch;
+ const arch_register_class_t *cls;
+ ir_graph *irg;
+ phase_t phase;
+ // maximum safe register pressure
+ int registers_available;
+
+ be_insn_env_t insn_env;
+ spill_env_t *senv;
+ be_uses_t *uses;
+
+ set *loop_attr_set;
+ set *block_attr_set;
+} morgan_env_t;
+
+typedef struct _loop_out_edge_t {
+ ir_node *block;
+ int pos;
+} loop_out_edge_t;
+
+typedef struct _loop_attr_t {
+ ir_loop *loop;
+ set *out_edges;
+ /// The set of all values that live through the loop and are not used
+ bitset_t *livethrough_unused;
+} loop_attr_t;
+
+typedef struct _block_attr_t {
+ ir_node *block;
+ bitset_t *livethrough_unused;
+} block_attr_t;
+
+//---------------------------------------------------------------------------
+
+int loop_out_edge_cmp(const void* p1, const void* p2, size_t s) {
+ loop_out_edge_t *e1 = (loop_out_edge_t*) p1;
+ loop_out_edge_t *e2 = (loop_out_edge_t*) p2;
+
+ return e1->block != e2->block || e1->pos != e2->pos;
+}
+
+int loop_attr_cmp(const void *e1, const void *e2, size_t s) {
+ loop_attr_t *la1 = (loop_attr_t*) e1;
+ loop_attr_t *la2 = (loop_attr_t*) e2;
+
+ return la1->loop != la2->loop;
+}
+
+int block_attr_cmp(const void *e1, const void *e2, size_t s) {
+ block_attr_t *b1 = (block_attr_t*) e1;
+ block_attr_t *b2 = (block_attr_t*) e2;
+
+ return b1->block != b2->block;
+}
+
+static INLINE int loop_attr_hash(const loop_attr_t *a) {
+ return HASH_PTR(a->loop);
+}
+
+static INLINE int block_attr_hash(const block_attr_t *b) {
+ return HASH_PTR(b->block);
+}
+
+static INLINE int loop_out_edge_hash(const loop_out_edge_t *e) {
+ return HASH_PTR(e->block) ^ (e->pos * 31);
+}
+
+static INLINE loop_attr_t *get_loop_attr(morgan_env_t *env, ir_loop *loop) {
+ loop_attr_t l_attr, *res;
+ int hash;
+ l_attr.loop = loop;
+
+ hash = loop_attr_hash(&l_attr);
+ res = set_find(env->loop_attr_set, &l_attr, sizeof(l_attr), hash);
+
+ // create new loop_attr if none exists yet
+ if (!res) {
+ l_attr.out_edges = new_set(loop_out_edge_cmp, 1);
+ l_attr.livethrough_unused = bitset_obstack_alloc(&env->phase.obst, get_irg_last_idx(env->irg));
+ res = set_insert(env->loop_attr_set, &l_attr, sizeof(l_attr), hash);
+ }
+
+ return res;
+}
+
+static INLINE block_attr_t *get_block_attr(morgan_env_t *env, ir_node *block) {
+ block_attr_t b_attr, *res;
+ int hash;
+ b_attr.block = block;
+
+ hash = block_attr_hash(&b_attr);
+ res = set_find(env->block_attr_set, &b_attr, sizeof(b_attr), hash);
+
+ if(!res) {
+ b_attr.livethrough_unused = bitset_obstack_alloc(&env->phase.obst, get_irg_last_idx(env->irg));
+ res = set_insert(env->block_attr_set, &b_attr, sizeof(b_attr), hash);
+ }
+
+ return res;
+}
+
+static int is_mem_phi(const ir_node *irn, void *data) {
+ // TODO what is this for?
+ return 0;
+}
+
+//---------------------------------------------------------------------------
+
+/**
+ * Determine edges going out of a loop (= edges that go to a block that is not inside
+ * the loop or one of its subloops)
+ */
+static INLINE void construct_loop_out_edges(ir_node* block, void* e) {
+ morgan_env_t *env = (morgan_env_t*) e;
+ int n_cfgpreds = get_Block_n_cfgpreds(block);
+ int i;
+ ir_loop* loop = get_irn_loop(block);
+
+ for(i = 0; i < n_cfgpreds; ++i) {
+ ir_node* cfgpred = get_Block_cfgpred(block, i);
+ ir_node* cfgpred_block = get_nodes_block(cfgpred);
+ ir_loop* cfgpred_loop = get_irn_loop(cfgpred_block);
+ loop_attr_t *outedges = get_loop_attr(env, cfgpred_loop);
+
+ if(cfgpred_loop != loop && get_loop_depth(cfgpred_loop) >= get_loop_depth(loop)) {
+ loop_out_edge_t edge;
+ edge.block = block;
+ edge.pos = i;
+ set_insert(outedges->out_edges, &edge, sizeof(edge), loop_out_edge_hash(&edge));
+ }
+ }
+}
+
+/**
+ * Construct the livethrough unused information for a block
+ */
+static bitset_t *construct_block_livethrough_unused(morgan_env_t* env, ir_node* block) {
+ int i;
+ int node_idx;
+ ir_node *irn;
+ block_attr_t *block_attr = get_block_attr(env, block);
+
+ /*
+ * This is the first block in a sequence, all variables that are livethrough this block are potential
+ * candidates for livethrough_unused
+ */
+ irn_live_t *li;
+
+ // copy all live-outs into the livethrough_unused set
+ live_foreach(block, li) {
+ if(!live_is_in(li) || !live_is_out(li))
+ continue;
+ if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, li->irn))
+ continue;
+
+ node_idx = get_irn_idx(li->irn);
+ bitset_set(block_attr->livethrough_unused, node_idx);
+ }
+
+ /*
+ * All values that are used within the block are not unused (and therefore not
+ * livethrough_unused)
+ */
+ sched_foreach(block, irn) {
+ be_insn_t *insn = be_scan_insn(&env->insn_env, irn);
+
+ for(i = insn->use_start; i < insn->n_ops; ++i) {
+ const be_operand_t *op = &insn->ops[i];
+ int idx = get_irn_idx(op->irn);
+ bitset_clear(block_attr->livethrough_unused, idx);
+ }
+ }
+
+ return block_attr->livethrough_unused;
+}
+
+/**
+ * Debugging help, shows all nodes in a (node-)bitset
+ */
+static void show_nodebitset(ir_graph* irg, bitset_t* bitset) {
+ int i;
+
+ bitset_foreach(bitset, i) {
+ ir_node* node = get_idx_irn(irg, i);
+ DBG((dbg, DBG_LIVE, "\t%+F\n", node));
+ }
+}
+
+static bitset_t *construct_loop_livethrough_unused(morgan_env_t *env, ir_loop *loop) {
+ int i;
+ loop_attr_t* loop_attr = get_loop_attr(env, loop);
+
+ DBG((dbg, DBG_LIVE, "Processing Loop %d\n", loop->loop_nr));
+ assert(get_loop_n_elements(loop) > 0);
+ for(i = 0; i < get_loop_n_elements(loop); ++i) {
+ loop_element elem = get_loop_element(loop, i);
+ switch (*elem.kind) {
+ case k_ir_node: {
+ bitset_t *livethrough_block_unused;
+ assert(is_Block(elem.node));
+ livethrough_block_unused = construct_block_livethrough_unused(env, elem.node);
+ if(i == 0) {
+ bitset_copy(loop_attr->livethrough_unused, livethrough_block_unused);
+ } else {
+ bitset_and(loop_attr->livethrough_unused, livethrough_block_unused);
+ }
+ break;
+ }
+ case k_ir_loop: {
+ bitset_t *livethrough_son_unused;
+
+ livethrough_son_unused = construct_loop_livethrough_unused(env, elem.son);
+ if(i == 0) {
+ bitset_copy(loop_attr->livethrough_unused, livethrough_son_unused);
+ } else {
+ bitset_and(loop_attr->livethrough_unused, livethrough_son_unused);
+ }
+ break;
+ }
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ // remove all unused livethroughs that are remembered for this loop from child loops and blocks
+ for(i = 0; i < get_loop_n_elements(loop); ++i) {
+ const loop_element elem = get_loop_element(loop, i);
+
+ if(*elem.kind == k_ir_loop) {
+ loop_attr_t *son_attr = get_loop_attr(env, elem.son);
+ bitset_andnot(son_attr->livethrough_unused, loop_attr->livethrough_unused);
+
+ DBG((dbg, DBG_LIVE, "Livethroughs for loop %d:\n", loop->loop_nr));
+ show_nodebitset(env->irg, son_attr->livethrough_unused);
+ } else if(*elem.kind == k_ir_node) {
+ block_attr_t *block_attr = get_block_attr(env, elem.node);
+ bitset_andnot(block_attr->livethrough_unused, loop_attr->livethrough_unused);
+
+ DBG((dbg, DBG_LIVE, "Livethroughs for block %+F\n", elem.node));
+ show_nodebitset(env->irg, block_attr->livethrough_unused);
+ } else {
+ assert(0);
+ }
+ }
+
+ return loop_attr->livethrough_unused;
+}
+
+//---------------------------------------------------------------------------
+
+static int reduce_register_pressure_in_block(morgan_env_t *env, ir_node* block, int loop_unused_spills_possible) {
+ int pressure;
+ ir_node *irn;
+ int max_pressure = 0;
+ int spills_needed;
+ int loop_unused_spills_needed;
+ block_attr_t *block_attr = get_block_attr(env, block);
+ int block_unused_spills_possible = bitset_popcnt(block_attr->livethrough_unused);
+ int unused_spills_possible = loop_unused_spills_possible + block_unused_spills_possible;
+ pset *live_nodes = pset_new_ptr_default();
+
+ be_liveness_end_of_block(env->arch, env->cls, block, live_nodes);
+ pressure = pset_count(live_nodes);
+
+ DBG((dbg, DBG_LIVE, "Reduce pressure to %d In Block %+F:\n", env->registers_available, block));
+
+ /**
+ * Walk over all irns in the schedule and check register pressure for each of them
+ */
+ sched_foreach_reverse(block, irn) {
+ // do we need more spills than possible with unused libethroughs?
+ int spills_needed = pressure - unused_spills_possible - env->registers_available;
+ if(spills_needed > 0) {
+ DBG((dbg, DBG_PRESSURE, "\tWARNING %d more spills needed at %+F\n", spills_needed, irn));
+ // TODO further spills needed
+ //assert(0);
+ }
+ if(pressure > max_pressure) {
+ max_pressure = pressure;
+ }
+
+ /* Register pressure is only important until we reach the first phi (the rest of the block
+ * will only be phis.)
+ */
+ if(is_Phi(irn))
+ break;
+
+ // update pressure
+ {
+ int pressure_old = pressure;
+ be_liveness_transfer(env->arch, env->cls, irn, live_nodes);
+ pressure = pset_count(live_nodes);
+ DBG((dbg, DBG_PRESSURE, "\tPressure at %+F - before: %d after: %d\n", irn, pressure_old, pressure));
+ }
+ }
+
+ /*
+ * Calculate number of spills from loop_unused_spills_possible that we want to use,
+ * and spill unused livethroughs from the block if we still don't have enough registers
+ */
+ spills_needed = max_pressure - env->registers_available;
+ if(spills_needed < 0) {
+ loop_unused_spills_needed = 0;
+ } else if(spills_needed > loop_unused_spills_possible) {
+ int i, spills;
+ int block_unused_spills_needed;
+
+ loop_unused_spills_needed = loop_unused_spills_possible;
+ block_unused_spills_needed = spills_needed - loop_unused_spills_possible;
+ if(block_unused_spills_needed > block_unused_spills_possible) {
+ block_unused_spills_needed = block_unused_spills_possible;
+ }
+
+ spills = 0;
+ /*
+ * Spill/Reload unused livethroughs from the block
+ */
+ bitset_foreach(block_attr->livethrough_unused, i) {
+ ir_node *to_spill;
+ const ir_edge_t *edge;
+
+ if(spills >= block_unused_spills_needed)
+ break;
+
+ to_spill = get_idx_irn(env->irg, i);
+ foreach_block_succ(block, edge) {
+ DBG((dbg, DBG_PRESSURE, "Spilling node %+F around block %+F\n", to_spill, block));
+ be_add_reload_on_edge(env->senv, to_spill, edge->src, edge->pos);
+ }
+ }
+ } else {
+ loop_unused_spills_needed = spills_needed;
+ }
+
+ del_pset(live_nodes);
+
+ DBG((dbg, DBG_PRESSURE, "Unused spills for Block %+F needed: %d\n", block, loop_unused_spills_needed));
+ return loop_unused_spills_needed;
+}
+
+/**
+ * Reduce register pressure in a loop
+ *
+ * @param unused_spills_possible Number of spills from livethrough_unused variables possible in outer loops
+ * @return Number of spills of livethrough_unused variables needed in outer loops
+ */
+static int reduce_register_pressure_in_loop(morgan_env_t *env, ir_loop *loop, int outer_spills_possible) {
+ int i;
+ loop_attr_t* loop_attr = get_loop_attr(env, loop);
+ int spills_needed = 0;
+ int spills_possible = outer_spills_possible + bitset_popcnt(loop_attr->livethrough_unused);
+ int outer_spills_needed;
+
+ DBG((dbg, DBG_PRESSURE, "Reducing Pressure in loop %d\n", loop->loop_nr));
+ for(i = 0; i < get_loop_n_elements(loop); ++i) {
+ loop_element elem = get_loop_element(loop, i);
+ switch (*elem.kind) {
+ case k_ir_node: {
+ int needed;
+ assert(is_Block(elem.node));
+ needed = reduce_register_pressure_in_block(env, elem.node, spills_possible);
+ assert(needed <= spills_possible);
+ if(needed > spills_needed)
+ spills_needed = needed;
+ break;
+ }
+ case k_ir_loop: {
+ int needed = reduce_register_pressure_in_loop(env, elem.son, spills_possible);
+ assert(needed <= spills_possible);
+ if(needed > spills_needed)
+ spills_needed = needed;
+ break;
+ }
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ // calculate number of spills needed in outer loop and spill
+ // unused livethrough nodes around this loop
+ if(spills_needed > outer_spills_possible) {
+ outer_spills_needed = outer_spills_possible;
+ spills_needed -= outer_spills_possible;
+
+ bitset_foreach(loop_attr->livethrough_unused, i) {
+ loop_out_edge_t *edge;
+ ir_node *to_spill = get_idx_irn(env->irg, i);
+
+ for(edge = set_first(loop_attr->out_edges); edge != NULL; edge = set_next(loop_attr->out_edges)) {
+ be_add_reload_on_edge(env->senv, to_spill, edge->block, edge->pos);
+ }
+ }
+ } else {
+ outer_spills_needed = spills_needed;
+ }
+
+ return outer_spills_needed;
+}
+
+static void *init_phase_data(phase_t *phase, ir_node *irn, void *old) {
+ return old;
+}
+
+typedef struct _liveness_dump_env_t {
+ const be_chordal_env_t *chordal_env;
+ FILE *f;
+} liveness_dump_env_t;
+
+/**
+ * Pre-walker: dump liveness data to a file
+ */
+static void dump_liveness_walker(ir_node *bl, void *data)
+{
+ liveness_dump_env_t *env = (liveness_dump_env_t*) data;
+ FILE *f = env->f;
+ const irn_live_t *li;
+ ir_node* irn;
+ int in = 0, end = 0, out = 0;
+ int max_pressure = 0;
+ pset *live_nodes;
+
+ // collect some statistics
+ live_foreach(bl, li) {
+ const ir_node* irn = li->irn;
+ if(!arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->chordal_env->cls, irn))
+ continue;
+
+ if(live_is_in(li))
+ in++;
+ if(live_is_end(li))
+ end++;
+ if(live_is_out(li))
+ out++;
+ }
+
+ // collect register pressure info
+ live_nodes = pset_new_ptr_default();
+ be_liveness_end_of_block(env->chordal_env->birg->main_env->arch_env, env->chordal_env->cls, bl, live_nodes);
+ max_pressure = pset_count(live_nodes);
+ sched_foreach_reverse(bl, irn) {
+ int pressure;
+
+ if(is_Phi(irn))
+ break;
+
+ be_liveness_transfer(env->chordal_env->birg->main_env->arch_env, env->chordal_env->cls, irn, live_nodes);
+ pressure = pset_count(live_nodes);
+ if(pressure > max_pressure)
+ max_pressure = pressure;
+ }
+ del_pset(live_nodes);
+
+ ir_fprintf(f, "%+20F (%d in) (%d end) (%d out) (max_pressure %d)\n", bl, in, end, out, max_pressure);
+ live_foreach(bl, li) {
+ const ir_node* irn = li->irn;
+ if(!arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->chordal_env->cls, irn))
+ continue;
+
+ ir_fprintf(f, "\t%+30F %4s %4s %4s\n",
+ irn,
+ live_is_in(li) ? "in" : "",
+ live_is_end(li) ? "end" : "",
+ live_is_out(li) ? "out" : "");
+ }
+}
+
+static void dump_liveness_info(const be_chordal_env_t *chordal_env, const char* name) {
+ char buf[128];
+ liveness_dump_env_t env;
+
+ env.chordal_env = chordal_env;
+ ir_snprintf(buf, sizeof(buf), "%F_%s_%s-live.txt", chordal_env->irg, chordal_env->cls->name, name);
+ env.f = fopen(buf, "wt");
+ if(env.f == NULL)
+ return;
+
+ irg_block_walk_graph(chordal_env->irg, dump_liveness_walker, NULL, &env);
+ fclose(env.f);
+}
+
+
+void be_spill_morgan(const be_chordal_env_t *chordal_env) {
+ morgan_env_t env;
+
+ FIRM_DBG_REGISTER(dbg, "ir.be.spillmorgan");
+ //firm_dbg_set_mask(dbg, DBG_LIVE | DBG_PRESSURE);
+
+ env.arch = chordal_env->birg->main_env->arch_env;
+ env.irg = chordal_env->irg;
+ env.cls = chordal_env->cls;
+ env.senv = be_new_spill_env(chordal_env, is_mem_phi, NULL);
+ DEBUG_ONLY(be_set_spill_env_dbg_module(env.senv, dbg);)
+ env.uses = be_begin_uses(env.irg, env.arch, env.cls);
+
+ phase_init(&env.phase, "spillmorgan", env.irg, PHASE_DEFAULT_GROWTH, init_phase_data);
+
+ env.registers_available = arch_count_non_ignore_regs(env.arch, env.cls);
+
+ be_insn_env_init(&env.insn_env, chordal_env->birg, chordal_env->cls, &env.phase.obst);
+
+ env.loop_attr_set = new_set(loop_attr_cmp, 5);
+ env.block_attr_set = new_set(block_attr_cmp, 20);
+
+
+ /*-- Part1: Analysis --*/
+ be_liveness(env.irg);
+
+ // construct control flow loop tree
+ construct_cf_backedges(chordal_env->irg);
+
+ // construct loop out edges and livethrough_unused sets for loops and blocks
+ irg_block_walk_graph(chordal_env->irg, construct_loop_out_edges, NULL, &env);
+ construct_loop_livethrough_unused(&env, get_irg_loop(env.irg));
+
+ /*-- Part2: Transformation --*/
+
+ // reduce register pressure to number of available registers
+ reduce_register_pressure_in_loop(&env, get_irg_loop(env.irg), 0);
+
+ be_insert_spills_reloads(env.senv, NULL);
+
+ // cleanup
+ be_end_uses(env.uses);
+ be_dump(env.irg, "-spillmorgan", dump_ir_block_graph_sched);
+ del_set(env.loop_attr_set);
+ del_set(env.block_attr_set);
+
+ be_liveness(env.irg);
+ dump_liveness_info(chordal_env, "spillmorgan");
+
+ // fix the remaining places with too high register pressure with beladies algorithm
+ be_spill_belady_spill_env(chordal_env, env.senv);
+
+ be_liveness(env.irg);
+ dump_liveness_info(chordal_env, "spillcomplete");
+
+ be_delete_spill_env(env.senv);
+ phase_free(&env.phase);
+}
--- /dev/null
+/*
+ * Author: Matthias Braun
+ * Date: 05.05.2006
+ * Copyright: (c) Universitaet Karlsruhe
+ * License: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ */
+#ifndef BESPILLMORGAN_H_
+#define BESPILLMORGAN_H_
+
+#include "be_t.h"
+#include "bechordal.h"
+
+void be_spill_morgan(const be_chordal_env_t *env);
+
+#endif
sched_foreach_reverse(block, irn) {
int cnt;
+ if(is_Phi(irn))
+ break;
+
live_nodes = be_liveness_transfer(aenv, cls, irn, live_nodes);
cnt = pset_count(live_nodes);
const ir_node *bl;
const ir_node *irn;
unsigned next_use;
- int is_set;
} be_use_t;
struct _be_uses_t {
}
static INLINE be_use_t *get_or_set_use(be_uses_t *uses,
- const ir_node *bl, const ir_node *irn, unsigned next_use)
+ const ir_node *bl, const ir_node *def, unsigned next_use)
{
- unsigned hash = HASH_COMBINE(HASH_PTR(bl), HASH_PTR(irn));
+ unsigned hash = HASH_COMBINE(HASH_PTR(bl), HASH_PTR(def));
be_use_t templ;
+ be_use_t* result;
templ.bl = bl;
- templ.irn = irn;
- templ.next_use = next_use;
- templ.is_set = 0;
- return set_insert(uses->uses, &templ, sizeof(templ), hash);
+ templ.irn = def;
+ templ.next_use = be_get_next_use(uses, sched_first(bl), 0, def, 0);
+ result = set_insert(uses->uses, &templ, sizeof(templ), hash);
+
+ return result;
}
unsigned be_get_next_use(be_uses_t *uses, const ir_node *from,
static unsigned get_next_use_bl(be_uses_t *uses, const ir_node *bl,
const ir_node *def)
{
- be_use_t *u;
-
- u = get_or_set_use(uses, bl, def, 0);
- if (! u->is_set) {
- u->is_set = 1;
- u->next_use = USES_INFINITY;
- u->next_use = be_get_next_use(uses, sched_first(bl), 0, def, 0);
- }
+ be_use_t *u = get_or_set_use(uses, bl, def, 0);
+
return u->next_use;
}
*/
void be_dump(ir_graph *irg, const char *suffix, void (*dumper)(ir_graph *, const char *));
-
-#endif /* _BEUTIL_H */
-
/**
* Search for an irn in @p accept.
* The search is started at @p start_point_exclusive and continues upwards the dom-tree
* @return The first node out of accept if found. Else NULL is returned.
*/
ir_node *dom_up_search(pset *accept, ir_node *start_point_exclusive);
+
+#endif /* _BEUTIL_H */
--- /dev/null
+/*
+ * Author: Matthias Braun
+ * Date: 05.05.2006
+ * Copyright: (c) Universitaet Karlsruhe
+ * License: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ *
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "beverify.h"
+#include "belive.h"
+#include "besched.h"
+
+#include "irnode.h"
+#include "irgraph.h"
+#include "irgwalk.h"
+#include "irprintf.h"
+#include "irdump_t.h"
+
+typedef struct be_verify_register_pressure_env_t_ {
+ const arch_env_t *arch_env;
+ const arch_register_class_t *cls;
+ int registers_available;
+ int problem_found;
+} be_verify_register_pressure_env_t;
+
+static void verify_liveness_walker(ir_node *bl, void *data)
+{
+ be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t*) data;
+ int pressure;
+ pset *live_nodes = pset_new_ptr_default();
+ ir_node *irn;
+
+ // collect register pressure info
+ be_liveness_end_of_block(env->arch_env, env->cls, bl, live_nodes);
+ pressure = pset_count(live_nodes);
+ if(pressure > env->registers_available) {
+ ir_printf("Verify Warning: Register pressure too high at end of block %+F (%d/%d).\n",
+ bl, pressure, env->registers_available);
+ env->problem_found = 1;
+ }
+ sched_foreach_reverse(bl, irn) {
+ int pressure;
+
+ if(is_Phi(irn))
+ break;
+
+ be_liveness_transfer(env->arch_env, env->cls, irn, live_nodes);
+ pressure = pset_count(live_nodes);
+
+ if(pressure > env->registers_available) {
+ ir_printf("Verify Warning: Register pressure too high before %+F (in block %+F) (%d/%d).\n",
+ irn, bl, pressure, env->registers_available);
+ env->problem_found = 1;
+ }
+ }
+ del_pset(live_nodes);
+}
+
+void be_verify_register_pressure(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_graph *irg)
+{
+ be_verify_register_pressure_env_t env;
+
+ be_liveness(irg);
+
+ env.arch_env = arch_env;
+ env.cls = cls;
+ env.registers_available = arch_count_non_ignore_regs(arch_env, cls);
+ env.problem_found = 0;
+
+ irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
+
+ assert(env.problem_found == 0);
+}
+
+typedef struct be_verify_schedule_env_t_ {
+ int problem_found;
+ ir_graph *irg;
+} be_verify_schedule_env_t;
+
+static void verify_schedule_walker(ir_node *bl, void *data)
+{
+ be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;
+ ir_node *irn;
+ int non_phi_found = 0;
+ int first_cfchange_found = 0;
+
+ /*
+ * Make sure that all phi nodes are scheduled at the beginning of the block, and that there
+ * are no nodes scheduled after a control flow changing node
+ */
+ sched_foreach(bl, irn) {
+ if(is_Phi(irn)) {
+ if(non_phi_found) {
+ ir_printf("Verify Warning: Phi node %+F scheduled after non-Phi nodes in block %+F (%s)\n",
+ irn, bl, get_irg_dump_name(env->irg));
+ env->problem_found = 1;
+ }
+ continue;
+ }
+
+ non_phi_found = 1;
+ if(is_cfop(irn) && get_irn_opcode(irn) != iro_Start) {
+ first_cfchange_found = 1;
+ } else {
+ if(first_cfchange_found) {
+ ir_printf("Verify Warning: Node %+F scheduled after control flow changing node in block %+F (%s)\n",
+ irn, bl, get_irg_dump_name(env->irg));
+ env->problem_found = 1;
+ }
+ }
+ }
+}
+
+
+void be_verify_schedule(ir_graph *irg)
+{
+ be_verify_schedule_env_t env;
+
+ env.problem_found = 0;
+ env.irg = irg;
+
+ irg_block_walk_graph(irg, verify_schedule_walker, NULL, &env);
+
+ assert(env.problem_found == 0);
+}
--- /dev/null
+/**
+ * Author: Matthias Braun
+ * Date: 05.05.2006
+ * Copyright: (c) Universitaet Karlsruhe
+ * License: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ */
+
+/**
+ * @file beverify.h
+ *
+ * Various verify routines that check a scheduled graph for correctness
+ *
+ * @author Matthias Braun
+ */
+#ifndef BEVERIFY_H_
+#define BEVERIFY_H_
+
+#include "bechordal.h"
+
+/**
+ * Verifies, that the register pressure for a given register class doesn't exceed the limit
+ * of available registers.
+ */
+void be_verify_register_pressure(const arch_env_t *arch_env, const arch_register_class_t* cls, ir_graph *irg);
+
+/**
+ * Does some sanity checks on the schedule
+ */
+void be_verify_schedule(ir_graph *irg);
+
+#endif