#include "obst.h"
#include "set.h"
#include "pset.h"
+#include "irprintf_t.h"
#include "irgraph.h"
#include "irnode.h"
#include "irmode.h"
#include "irgwalk.h"
#include "iredges_t.h"
#include "ircons_t.h"
+#include "irprintf.h"
#include "beutil.h"
#include "bearch.h"
#define DBG_START 16
#define DBG_SLOTS 32
#define DBG_TRACE 64
+#define DBG_WORKSET 128
#define DEBUG_LVL 0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
-static firm_dbg_module_t *dbg = NULL;
-
-#define MIN(a,b) (((a)<(b))?(a):(b))
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
typedef struct _workset_t workset_t;
ir_node *instr; /**< current instruction */
unsigned instr_nr; /**< current instruction number (relative to block start) */
pset *used; /**< holds the values used (so far) in the current BB */
- pset *copies; /**< holds all copies placed due to phi-spilling */
+ ir_node **copies; /**< holds all copies placed due to phi-spilling */
- spill_env_t *senv; /* see bespill.h */
- pset *reloads; /**< all reload nodes placed */
+ spill_env_t *senv; /**< see bespill.h */
} belady_env_t;
struct _workset_t {
int i;
/* check for current regclass */
if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
- DBG((dbg, DBG_DECIDE, "Dropped %+F\n", val));
+ DBG((dbg, DBG_WORKSET, "Dropped %+F\n", val));
return;
}
static INLINE unsigned get_distance(belady_env_t *bel, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
{
arch_irn_flags_t fl = arch_irn_get_flags(bel->arch, def);
- if((fl & (arch_irn_flags_ignore | arch_irn_flags_dont_spill)) != 0)
+ unsigned dist = be_get_next_use(bel->uses, from, from_step, def, skip_from_uses);
+
+ if(!USES_IS_INIFINITE(dist) && (fl & (arch_irn_flags_ignore | arch_irn_flags_dont_spill)) != 0)
return 0;
- else
- return be_get_next_use(bel->uses, from, from_step, def, skip_from_uses);
+
+ return dist;
}
/**
to_insert[demand++] = val;
if (is_usage)
be_add_reload(bel->senv, val, bel->instr);
- } else
+ } else {
DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
+ }
}
DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
-
/*
* 2. Make room for at least 'demand' slots
*/
len = workset_get_length(ws);
max_allowed = bel->n_regs - demand;
+ DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
+
/* Only make more free room if we do not have enough */
if (len > max_allowed) {
/* get current next-use distance */
before its first usage, remove it from start workset */
for (i=max_allowed; i<ws->len; ++i) {
ir_node *irn = ws->vals[i].irn;
+
if (!pset_find_ptr(bel->used, irn)) {
ir_node *curr_bb = get_nodes_block(bel->instr);
workset_t *ws_start = get_block_info(curr_bb)->ws_start;
workset_remove(ws_start, irn);
DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
- } else
+ } else {
DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
+ }
}
/* kill the last 'demand' entries in the array */
static void belady(ir_node *blk, void *env);
+/**
+ * Inserts a spill of a value at the earliest possible location in a block.
+ * That is after the last use of the value or at the beginning of the block if
+ * there is no use
+ */
+static ir_node *insert_copy(belady_env_t *env, ir_node *block, ir_node *value) {
+ ir_node* node;
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *copy = be_new_Copy(env->cls, irg, block, value);
+
+ ARR_APP1(ir_node*, env->copies, copy);
+
+ // walk schedule backwards until we find a usage, or until we have reached the first phi
+ // TODO can we do this faster somehow? This makes insert_copy O(n) in block_size...
+ sched_foreach_reverse(block, node) {
+ int i, arity;
+
+ if(is_Phi(node)) {
+ sched_add_after(node, copy);
+ goto placed;
+ }
+ if(value == node) {
+ sched_add_after(node, copy);
+ goto placed;
+ }
+ for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
+ ir_node *arg = get_irn_n(node, i);
+ if(arg == value) {
+ sched_add_after(node, copy);
+ goto placed;
+ }
+ }
+ }
+ // we didn't find a use or a phi yet, so place the copy at the beginning of the block
+ sched_add_before(sched_first(block), copy);
+
+placed:
+
+ return copy;
+}
+
/**
* Collects all values live-in at block @p blk and all phi results in this block.
* Then it adds the best values (at most n_regs) to the blocks start_workset.
* their args to break interference and make it possible to spill them to the
* same spill slot.
*/
-static block_info_t *compute_block_start_info(ir_node *blk, void *env) {
- belady_env_t *bel = env;
+static block_info_t *compute_block_start_info(ir_node *blk, void *data) {
+ belady_env_t *env = data;
ir_node *irn, *first;
irn_live_t *li;
int i, count, ws_count;
return res;
/* Create the block info for this block. */
- res = new_block_info(&bel->ob);
+ res = new_block_info(&env->ob);
set_block_info(blk, res);
DBG((dbg, DBG_START, "Living at start of %+F:\n", blk));
first = sched_first(blk);
count = 0;
- sched_foreach(blk, irn)
- if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, -1) == bel->cls) {
+ sched_foreach(blk, irn) {
+ if (is_Phi(irn) && arch_get_irn_reg_class(env->arch, irn, -1) == env->cls) {
loc.irn = irn;
- loc.time = get_distance(bel, first, 0, irn, 0);
+ loc.time = get_distance(env, first, 0, irn, 0);
obstack_grow(&ob, &loc, sizeof(loc));
DBG((dbg, DBG_START, " %+F:\n", irn));
count++;
} else
break;
+ }
- live_foreach(blk, li)
- if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, -1) == bel->cls) {
+ live_foreach(blk, li) {
+ if (live_is_in(li) && arch_get_irn_reg_class(env->arch, li->irn, -1) == env->cls) {
loc.irn = (ir_node *)li->irn;
- loc.time = get_distance(bel, first, 0, li->irn, 0);
+ loc.time = get_distance(env, first, 0, li->irn, 0);
obstack_grow(&ob, &loc, sizeof(loc));
- DBG((dbg, DBG_START, " %+F:\n", irn));
+ DBG((dbg, DBG_START, " %+F:\n", li->irn));
count++;
}
+ }
starters = obstack_finish(&ob);
qsort(starters, count, sizeof(starters[0]), loc_compare);
/* if pred block has not been processed yet, do it now */
if (! pred_info) {
- belady(pred_blk, bel);
+ belady(pred_blk, env);
pred_info = get_block_info(pred_blk);
}
/* now we have an end_set of pred */
assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
- res->ws_start = workset_clone(&bel->ob, pred_info->ws_end);
+ res->ws_start = workset_clone(&env->ob, pred_info->ws_end);
} else
/* Else we want the start_set to be the values used 'the closest' */
{
/* Copy the best ones from starters to start workset */
- ws_count = MIN(count, bel->n_regs);
- res->ws_start = new_workset(&bel->ob, bel);
+ ws_count = MIN(count, env->n_regs);
+ res->ws_start = new_workset(&env->ob, env);
workset_bulk_fill(res->ws_start, ws_count, starters);
}
+
/* The phis of this block which are not in the start set have to be spilled later.
* Therefore we add temporary copies in the pred_blocks so the spills can spill
* into the same spill slot.
DBG((dbg, DBG_START, "For %+F:\n", irn));
for (max=get_irn_arity(irn), o=0; o<max; ++o) {
- ir_node *arg = get_irn_n(irn, o);
ir_node *pred_block = get_Block_cfgpred_block(get_nodes_block(irn), o);
- ir_node *cpy = be_new_Copy(bel->cls, irg, pred_block, arg);
- pset_insert_ptr(bel->copies, cpy);
- DBG((dbg, DBG_START, " place a %+F of %+F in %+F\n", cpy, arg, pred_block));
- sched_add_before(pred_block, cpy);
- set_irn_n(irn, o, cpy);
+ ir_node *arg = get_irn_n(irn, o);
+ ir_node* copy = insert_copy(env, pred_block, arg);
+
+ set_irn_n(irn, o, copy);
}
}
continue;
/* check if irnb is in a register at end of pred */
- workset_foreach(wsp, irnp, iter2)
+ workset_foreach(wsp, irnp, iter2) {
if (irnb == irnp)
goto next_value;
+ }
/* irnb is in memory at the end of pred, so we have to reload it */
DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
}
}
-/**
- * Removes all used reloads from bel->reloads.
- * The remaining nodes in bel->reloads will be removed from the graph.
- */
-static void rescue_used_reloads(ir_node *irn, void *env) {
- pset *rlds = (pset *)env;
- if (pset_find_ptr(rlds, irn))
- pset_remove_ptr(rlds, irn);
-}
-
/**
* Removes all copies introduced for phi-spills
*/
-static void remove_copies(belady_env_t *bel) {
- ir_node *irn;
-
- for (irn = pset_first(bel->copies); irn; irn = pset_next(bel->copies)) {
- ir_node *src, *spill;
-
- assert(be_is_Copy(irn));
- assert(get_irn_n_edges(irn) == 1 && "This is not a copy introduced in 'compute_block_start_info()'. Who created it?");
-
- spill = get_irn_edge(get_irn_irg(irn), irn, 0)->src;
- assert(be_is_Spill(spill) && "This is not a copy introduced in 'compute_block_start_info()'. Who created it?");
-
- src = get_irn_n(irn, 0);
- set_irn_n(spill, 0, src);
- }
-}
-
-/**
- * Finds all unused reloads and remove them from the schedule
- * Also removes spills if they are not used anymore after removing reloads
- */
-static void remove_unused_reloads(ir_graph *irg, belady_env_t *bel) {
- ir_node *irn;
+static void remove_copies(belady_env_t *env) {
+ int i;
- irg_walk_graph(irg, rescue_used_reloads, NULL, bel->reloads);
- for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads)) {
- ir_node *spill;
- DBG((dbg, DBG_SPILL, "Removing %+F before %+F in %+F\n", irn, sched_next(irn), get_nodes_block(irn)));
+ for(i = 0; i < ARR_LEN(env->copies); ++i) {
+ ir_node *node = env->copies[i];
+ ir_node *src;
+ const ir_edge_t *edge, *ne;
- spill = get_irn_n(irn, 0);
+ assert(be_is_Copy(node));
- /* remove reload */
- set_irn_n(irn, 0, new_Bad());
- sched_remove(irn);
+ src = be_get_Copy_op(node);
+ foreach_out_edge_safe(node, edge, ne) {
+ ir_node *user = get_edge_src_irn(edge);
+ int user_pos = get_edge_src_pos(edge);
- /* if spill not used anymore, remove it too
- * test of regclass is necessary since spill may be a phi-M */
- if (get_irn_n_edges(spill) == 0 && bel->cls == arch_get_irn_reg_class(bel->arch, spill, -1)) {
- set_irn_n(spill, 0, new_Bad());
- sched_remove(spill);
+ set_irn_n(user, user_pos, src);
}
}
}
void be_spill_belady(const be_chordal_env_t *chordal_env) {
+ be_spill_belady_spill_env(chordal_env, NULL);
+}
+
+void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t *spill_env) {
belady_env_t bel;
- dbg = firm_dbg_register("ir.be.spillbelady");
+ FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
/* init belady env */
obstack_init(&bel.ob);
- bel.arch = chordal_env->birg->main_env->arch_env;
- bel.cls = chordal_env->cls;
- bel.n_regs = arch_register_class_n_regs(bel.cls);
- bel.ws = new_workset(&bel.ob, &bel);
- bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls);
- bel.senv = be_new_spill_env(dbg, chordal_env, is_mem_phi, NULL);
- bel.reloads = pset_new_ptr_default();
- bel.copies = pset_new_ptr_default();
+ bel.arch = chordal_env->birg->main_env->arch_env;
+ bel.cls = chordal_env->cls;
+ bel.n_regs = arch_register_class_n_regs(bel.cls);
+ bel.ws = new_workset(&bel.ob, &bel);
+ bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls);
+ if(spill_env == NULL) {
+ bel.senv = be_new_spill_env(chordal_env, is_mem_phi, NULL);
+ } else {
+ bel.senv = spill_env;
+ be_set_is_spilled_phi(bel.senv, is_mem_phi, NULL);
+ }
+ DEBUG_ONLY(be_set_spill_env_dbg_module(bel.senv, dbg);)
+ bel.copies = NEW_ARR_F(ir_node*, 0);
DBG((dbg, LEVEL_1, "running on register class: %s\n", bel.cls->name));
be_clear_links(chordal_env->irg);
irg_block_walk_graph(chordal_env->irg, NULL, belady, &bel);
irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &bel);
- be_insert_spills_reloads(bel.senv, bel.reloads);
- remove_unused_reloads(chordal_env->irg, &bel);
+ be_insert_spills_reloads(bel.senv);
remove_copies(&bel);
+ DEL_ARR_F(bel.copies);
+
+ be_remove_dead_nodes_from_schedule(chordal_env->irg);
/* clean up */
- del_pset(bel.reloads);
- be_delete_spill_env(bel.senv);
+ if(spill_env == NULL)
+ be_delete_spill_env(bel.senv);
be_end_uses(bel.uses);
obstack_free(&bel.ob, NULL);
}