#define DBG_START 16
#define DBG_SLOTS 32
#define DBG_TRACE 64
+#define DBG_WORKSET 128
#define DEBUG_LVL 0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
-static firm_dbg_module_t *dbg = NULL;
-
-#define MIN(a,b) (((a)<(b))?(a):(b))
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
typedef struct _workset_t workset_t;
be_uses_t *uses; /**< env for the next-use magic */
ir_node *instr; /**< current instruction */
unsigned instr_nr; /**< current instruction number (relative to block start) */
- pset *used; /**< holds the values used (so far) in the current BB */
- pset *copies; /**< holds all copies placed due to phi-spilling */
+ pset *used;
- spill_env_t *senv; /* see bespill.h */
- pset *reloads; /**< all reload nodes placed */
+ spill_env_t *senv; /**< see bespill.h */
} belady_env_t;
struct _workset_t {
- belady_env_t *bel;
int len; /**< current length */
- loc_t vals[1]; /**< inlined array of the values/distances in this working set */
+ loc_t vals[0]; /**< inlined array of the values/distances in this working set */
};
void workset_print(const workset_t *w)
/**
* Alloc a new workset on obstack @p ob with maximum size @p max
*/
-static INLINE workset_t *new_workset(struct obstack *ob, belady_env_t *bel) {
+static INLINE workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
workset_t *res;
- size_t size = sizeof(*res) + (bel->n_regs-1)*sizeof(res->vals[0]);
+ size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
res = obstack_alloc(ob, size);
memset(res, 0, size);
- res->bel = bel;
return res;
}
/**
* Alloc a new instance on obstack and make it equal to @param ws
*/
-static INLINE workset_t *workset_clone(struct obstack *ob, workset_t *ws) {
+static INLINE workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
workset_t *res;
- size_t size = sizeof(*res) + (ws->bel->n_regs-1)*sizeof(res->vals[0]);
+ size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
res = obstack_alloc(ob, size);
memcpy(res, ws, size);
return res;
/**
* Do NOT alloc anything. Make @param tgt equal to @param src.
- * returns @param tgt for convinience
+ * returns @param tgt for convenience
*/
-static INLINE workset_t *workset_copy(workset_t *tgt, workset_t *src) {
- size_t size = sizeof(*src) + (src->bel->n_regs-1)*sizeof(src->vals[0]);
+static INLINE workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
+ size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
memcpy(tgt, src, size);
return tgt;
}
* @param count locations given at memory @param locs.
* Set the length of @param ws to count.
*/
-#define workset_bulk_fill(ws, count, locs) memcpy(&(ws)->vals[0], locs, ((ws)->len=count)*sizeof(locs[0]));
-
+static INLINE void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
+ workset->len = count;
+ memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
+}
/**
* Inserts the value @p val into the workset, iff it is not
* already contained. The workset must not be full.
*/
-static INLINE void workset_insert(workset_t *ws, ir_node *val) {
+static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
int i;
/* check for current regclass */
- if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
- DBG((dbg, DBG_DECIDE, "Dropped %+F\n", val));
+ if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
+ DBG((dbg, DBG_WORKSET, "Dropped %+F\n", val));
return;
}
return;
/* insert val */
- assert(ws->len < ws->bel->n_regs && "Workset already full!");
+ assert(ws->len < env->n_regs && "Workset already full!");
ws->vals[ws->len++].irn = val;
}
-/**
- * Inserts all values in array @p vals of length @p cnt
- * into the workset. There must be enough space for the
- * entries.
- */
-static INLINE void workset_bulk_insert(workset_t *ws, int cnt, ir_node **vals) {
- int i, o;
-
- for(o=0; o<cnt; ++o) {
- ir_node *val = vals[o];
- DBG((dbg, DBG_TRACE, "Bulk insert %+F\n", val));
- /* check for current regclass */
- if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
- DBG((dbg, DBG_TRACE, "Wrong reg class\n"));
- goto no_insert;
- }
-
- /* check if val is already contained */
- for(i=0; i<ws->len; ++i)
- if (ws->vals[i].irn == val) {
- DBG((dbg, DBG_TRACE, "Already contained\n"));
- goto no_insert;
- }
-
- /* insert val */
- assert(ws->len < ws->bel->n_regs && "Workset does not have enough room!");
- ws->vals[ws->len++].irn = val;
- DBG((dbg, DBG_TRACE, "Inserted\n"));
-
-no_insert:
- /*epsilon statement :)*/;
- }
-}
-
/**
* Removes all entries from this workset
*/
-#define workset_clear(ws) (ws)->len = 0;
+static INLINE void workset_clear(workset_t *ws) {
+ ws->len = 0;
+}
/**
* Removes the value @p val from the workset if present.
*/
static INLINE void workset_remove(workset_t *ws, ir_node *val) {
int i;
- for(i=0; i<ws->len; ++i)
+ for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val) {
ws->vals[i] = ws->vals[--ws->len];
return;
}
+ }
}
static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
int i;
- for(i=0; i<ws->len; ++i)
+ for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val)
return 1;
+ }
+
return 0;
}
typedef struct _block_info_t {
workset_t *ws_start, *ws_end;
+ int processed;
} block_info_t;
block_info_t *res = obstack_alloc(ob, sizeof(*res));
res->ws_start = NULL;
res->ws_end = NULL;
+ res->processed = 0;
return res;
}
#define get_block_info(blk) ((block_info_t *)get_irn_link(blk))
#define set_block_info(blk, info) set_irn_link(blk, info)
-static int is_mem_phi(const ir_node *irn, void *data) {
- workset_t *sws;
- ir_node *blk = get_nodes_block(irn);
-
- DBG((dbg, DBG_SPILL, "Is %+F a mem-phi?\n", irn));
- sws = get_block_info(blk)->ws_start;
- DBG((dbg, DBG_SPILL, " %d\n", !workset_contains(sws, irn)));
- return !workset_contains(sws, irn);
-}
-
/**
- * @return The distance to the next use
- * Or 0 if irn is an ignore node
+ * @return The distance to the next use or 0 if irn has dont_spill flag set
*/
-
-static INLINE unsigned get_distance(belady_env_t *bel, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
+static INLINE unsigned get_distance(belady_env_t *env, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
{
- arch_irn_flags_t fl = arch_irn_get_flags(bel->arch, def);
- unsigned dist = be_get_next_use(bel->uses, from, from_step, def, skip_from_uses);
+ int flags = arch_irn_get_flags(env->arch, def);
+ unsigned dist = be_get_next_use(env->uses, from, from_step, def, skip_from_uses);
- if(!USES_IS_INIFINITE(dist) && (fl & (arch_irn_flags_ignore | arch_irn_flags_dont_spill)) != 0)
+ assert(! (flags & arch_irn_flags_ignore));
+ // we have to keep nonspillable nodes in the workingset
+ if(flags & arch_irn_flags_dont_spill)
return 0;
return dist;
* @p is_usage indicates that the values in new_vals are used (not defined)
* In this case reloads must be performed
*/
-static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) {
+static void displace(belady_env_t *env, workset_t *new_vals, int is_usage) {
ir_node *val;
int i, len, max_allowed, demand, iter;
- workset_t *ws = bel->ws;
- ir_node **to_insert = alloca(bel->n_regs * sizeof(*to_insert));
+ workset_t *ws = env->ws;
+ ir_node **to_insert = alloca(env->n_regs * sizeof(*to_insert));
/*
* 1. Identify the number of needed slots and the values to reload
workset_foreach(new_vals, val, iter) {
/* mark value as used */
if (is_usage)
- pset_insert_ptr(bel->used, val);
+ pset_insert_ptr(env->used, val);
if (!workset_contains(ws, val)) {
DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
to_insert[demand++] = val;
if (is_usage)
- be_add_reload(bel->senv, val, bel->instr);
- } else
+ be_add_reload(env->senv, val, env->instr);
+ } else {
+ assert(is_usage || "Defined value already in workset?!?");
DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
+ }
}
DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
-
/*
* 2. Make room for at least 'demand' slots
*/
len = workset_get_length(ws);
- max_allowed = bel->n_regs - demand;
+ max_allowed = env->n_regs - demand;
+
+ DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
/* Only make more free room if we do not have enough */
if (len > max_allowed) {
/* get current next-use distance */
for (i=0; i<ws->len; ++i)
- workset_set_time(ws, i, get_distance(bel, bel->instr, bel->instr_nr, workset_get_val(ws, i), !is_usage));
+ workset_set_time(ws, i, get_distance(env, env->instr, env->instr_nr, workset_get_val(ws, i), !is_usage));
/* sort entries by increasing nextuse-distance*/
workset_sort(ws);
/* Logic for not needed live-ins: If a value is disposed
- before its first usage, remove it from start workset */
+ * before its first usage, remove it from start workset
+ * We don't do this for phis though
+ */
for (i=max_allowed; i<ws->len; ++i) {
ir_node *irn = ws->vals[i].irn;
- if (!pset_find_ptr(bel->used, irn)) {
- ir_node *curr_bb = get_nodes_block(bel->instr);
+
+ if(is_Phi(irn))
+ continue;
+
+ if (!pset_find_ptr(env->used, irn)) {
+ ir_node *curr_bb = get_nodes_block(env->instr);
workset_t *ws_start = get_block_info(curr_bb)->ws_start;
workset_remove(ws_start, irn);
DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
- } else
+ } else {
DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
+ }
}
/* kill the last 'demand' entries in the array */
/*
* 3. Insert the new values into the workset
*/
- workset_bulk_insert(bel->ws, demand, to_insert);
+ for(i = 0; i < demand; ++i)
+ workset_insert(env, env->ws, to_insert[i]);
}
static void belady(ir_node *blk, void *env);
-/**
- * Collects all values live-in at block @p blk and all phi results in this block.
- * Then it adds the best values (at most n_regs) to the blocks start_workset.
- * The phis among the remaining values get spilled: Introduce psudo-copies of
- * their args to break interference and make it possible to spill them to the
- * same spill slot.
+/*
+ * Computes set of live-ins for each block with multiple predecessors and
+ * places copies in the predecessors when phis get spilled
*/
-static block_info_t *compute_block_start_info(ir_node *blk, void *env) {
- belady_env_t *bel = env;
- ir_node *irn, *first;
+static void place_copy_walker(ir_node *block, void *data) {
+ belady_env_t *env = data;
+ block_info_t *block_info;
irn_live_t *li;
- int i, count, ws_count;
+ ir_node *first, *irn;
loc_t loc, *starters;
- ir_graph *irg = get_irn_irg(blk);
- struct obstack ob;
- block_info_t *res = get_block_info(blk);
+ int i, len, ws_count;
- /* Have we seen this block before? */
- if (res)
- return res;
-
- /* Create the block info for this block. */
- res = new_block_info(&bel->ob);
- set_block_info(blk, res);
-
-
- /* Get all values living at the block start sorted by next use*/
- obstack_init(&ob);
-
- DBG((dbg, DBG_START, "Living at start of %+F:\n", blk));
- first = sched_first(blk);
- count = 0;
- sched_foreach(blk, irn)
- if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, -1) == bel->cls) {
- loc.irn = irn;
- loc.time = get_distance(bel, first, 0, irn, 0);
- obstack_grow(&ob, &loc, sizeof(loc));
- DBG((dbg, DBG_START, " %+F:\n", irn));
- count++;
- } else
- break;
-
- live_foreach(blk, li)
- if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, -1) == bel->cls) {
- loc.irn = (ir_node *)li->irn;
- loc.time = get_distance(bel, first, 0, li->irn, 0);
- obstack_grow(&ob, &loc, sizeof(loc));
- DBG((dbg, DBG_START, " %+F:\n", irn));
- count++;
- }
-
- starters = obstack_finish(&ob);
- qsort(starters, count, sizeof(starters[0]), loc_compare);
+ if(get_Block_n_cfgpreds(block) == 1 && get_irg_start_block(get_irn_irg(block)) != block)
+ return;
+ block_info = new_block_info(&env->ob);
+ set_block_info(block, block_info);
- /* If we have only one predecessor, we want the start_set of blk to be the end_set of pred */
- if (get_Block_n_cfgpreds(blk) == 1 && blk != get_irg_start_block(get_irn_irg(blk))) {
- ir_node *pred_blk = get_Block_cfgpred_block(blk, 0);
- block_info_t *pred_info = get_block_info(pred_blk);
+ /* Collect all values living at start of block */
+ starters = NEW_ARR_F(loc_t, 0);
- /* if pred block has not been processed yet, do it now */
- if (! pred_info) {
- belady(pred_blk, bel);
- pred_info = get_block_info(pred_blk);
- }
+ DBG((dbg, DBG_START, "Living at start of %+F:\n", block));
+ first = sched_first(block);
+ sched_foreach(block, irn) {
+ if(!is_Phi(irn))
+ break;
+ if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, irn))
+ continue;
- /* now we have an end_set of pred */
- assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
- res->ws_start = workset_clone(&bel->ob, pred_info->ws_end);
+ loc.irn = irn;
+ loc.time = get_distance(env, first, 0, irn, 0);
+ ARR_APP1(loc_t, starters, loc);
+ DBG((dbg, DBG_START, " %+F:\n", irn));
+ }
- } else
+ live_foreach(block, li) {
+ if (!live_is_in(li) || !arch_irn_consider_in_reg_alloc(env->arch, env->cls, li->irn))
+ continue;
- /* Else we want the start_set to be the values used 'the closest' */
- {
- /* Copy the best ones from starters to start workset */
- ws_count = MIN(count, bel->n_regs);
- res->ws_start = new_workset(&bel->ob, bel);
- workset_bulk_fill(res->ws_start, ws_count, starters);
+ loc.irn = (ir_node *)li->irn;
+ loc.time = get_distance(env, first, 0, li->irn, 0);
+ ARR_APP1(loc_t, starters, loc);
+ DBG((dbg, DBG_START, " %+F:\n", li->irn));
}
+ // Sort start values by first use
+ qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
- /* The phis of this block which are not in the start set have to be spilled later.
- * Therefore we add temporary copies in the pred_blocks so the spills can spill
- * into the same spill slot.
- * After spilling these copies get deleted. */
- for (i=workset_get_length(res->ws_start); i<count; ++i) {
- int o, max;
+ /* Copy the best ones from starters to start workset */
+ ws_count = MIN(ARR_LEN(starters), env->n_regs);
+ block_info->ws_start = new_workset(env, &env->ob);
+ workset_bulk_fill(block_info->ws_start, ws_count, starters);
+ /* The phis of this block which are not in the start set have to be spilled later. */
+ for (i = ws_count, len = ARR_LEN(starters); i < len; ++i) {
irn = starters[i].irn;
- if (!is_Phi(irn) || get_nodes_block(irn) != blk)
+ if (!is_Phi(irn) || get_nodes_block(irn) != block)
continue;
- DBG((dbg, DBG_START, "For %+F:\n", irn));
+ be_spill_phi(env->senv, irn);
+ }
- for (max=get_irn_arity(irn), o=0; o<max; ++o) {
- ir_node *arg = get_irn_n(irn, o);
- ir_node *pred_block = get_Block_cfgpred_block(get_nodes_block(irn), o);
- ir_node *cpy = be_new_Copy(bel->cls, irg, pred_block, arg);
- pset_insert_ptr(bel->copies, cpy);
- DBG((dbg, DBG_START, " place a %+F of %+F in %+F\n", cpy, arg, pred_block));
- sched_add_before(pred_block, cpy);
- set_irn_n(irn, o, cpy);
- }
+ DEL_ARR_F(starters);
+}
+
+/**
+ * Collects all values live-in at block @p blk and all phi results in this block.
+ * Then it adds the best values (at most n_regs) to the blocks start_workset.
+ * The phis among the remaining values get spilled: Introduce psudo-copies of
+ * their args to break interference and make it possible to spill them to the
+ * same spill slot.
+ */
+static block_info_t *compute_block_start_info(belady_env_t *env, ir_node *block) {
+ ir_node *pred_block;
+ block_info_t *res, *pred_info;
+
+ /* Have we seen this block before? */
+ res = get_block_info(block);
+ if (res)
+ return res;
+
+ /* Create the block info for this block. */
+ res = new_block_info(&env->ob);
+ set_block_info(block, res);
+
+ /* Use endset of predecessor block as startset */
+ assert(get_Block_n_cfgpreds(block) == 1 && block != get_irg_start_block(get_irn_irg(block)));
+ pred_block = get_Block_cfgpred_block(block, 0);
+ pred_info = get_block_info(pred_block);
+
+ /* if pred block has not been processed yet, do it now */
+ if (pred_info == NULL || pred_info->processed == 0) {
+ belady(pred_block, env);
+ pred_info = get_block_info(pred_block);
}
- obstack_free(&ob, NULL);
+ /* now we have an end_set of pred */
+ assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
+ res->ws_start = workset_clone(env, &env->ob, pred_info->ws_end);
+
return res;
}
* whether it is used from a register or is reloaded
* before the use.
*/
-static void belady(ir_node *blk, void *env) {
- belady_env_t *bel = env;
+static void belady(ir_node *block, void *data) {
+ belady_env_t *env = data;
workset_t *new_vals;
ir_node *irn;
int iter;
- block_info_t *blk_info;
+ block_info_t *block_info;
+
+ /* make sure we have blockinfo (with startset) */
+ block_info = get_block_info(block);
+ if (block_info == NULL)
+ block_info = compute_block_start_info(env, block);
/* Don't do a block twice */
- if (get_block_info(blk))
+ if(block_info->processed)
return;
/* get the starting workset for this block */
- blk_info = compute_block_start_info(blk, bel);
-
DBG((dbg, DBG_DECIDE, "\n"));
- DBG((dbg, DBG_DECIDE, "Decide for %+F\n", blk));
+ DBG((dbg, DBG_DECIDE, "Decide for %+F\n", block));
- workset_copy(bel->ws, blk_info->ws_start);
- DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", blk));
- workset_foreach(bel->ws, irn, iter)
+ workset_copy(env, env->ws, block_info->ws_start);
+ DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
+ workset_foreach(env->ws, irn, iter)
DBG((dbg, DBG_WSETS, " %+F\n", irn));
/* process the block from start to end */
DBG((dbg, DBG_WSETS, "Processing...\n"));
- bel->used = pset_new_ptr(32);
- bel->instr_nr = 0;
- new_vals = new_workset(&bel->ob, bel);
- sched_foreach(blk, irn) {
- assert(workset_get_length(bel->ws) <= bel->n_regs && "Too much values in workset!");
-
+ env->used = pset_new_ptr_default();
+ env->instr_nr = 0;
+ new_vals = new_workset(env, &env->ob);
+ sched_foreach(block, irn) {
+ int i, arity;
+ assert(workset_get_length(env->ws) <= env->n_regs && "Too much values in workset!");
/* projs are handled with the tuple value.
* Phis are no real instr (see insert_starters())
DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
/* set instruction in the workset */
- bel->instr = irn;
+ env->instr = irn;
/* allocate all values _used_ by this instruction */
workset_clear(new_vals);
- workset_bulk_insert(new_vals, get_irn_arity(irn)+1, get_irn_in(irn));
- displace(bel, new_vals, 1);
+ for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
+ workset_insert(env, new_vals, get_irn_n(irn, i));
+ }
+ displace(env, new_vals, 1);
/* allocate all values _defined_ by this instruction */
workset_clear(new_vals);
if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
ir_node *proj;
for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
- workset_insert(new_vals, proj);
+ workset_insert(env, new_vals, proj);
} else {
- workset_insert(new_vals, irn);
+ workset_insert(env, new_vals, irn);
}
- displace(bel, new_vals, 0);
+ displace(env, new_vals, 0);
- bel->instr_nr++;
+ env->instr_nr++;
}
- del_pset(bel->used);
+ del_pset(env->used);
/* Remember end-workset for this block */
- blk_info->ws_end = workset_clone(&bel->ob, bel->ws);
- DBG((dbg, DBG_WSETS, "End workset for %+F:\n", blk));
- workset_foreach(blk_info->ws_end, irn, iter)
+ block_info->ws_end = workset_clone(env, &env->ob, env->ws);
+ block_info->processed = 1;
+ DBG((dbg, DBG_WSETS, "End workset for %+F:\n", block));
+ workset_foreach(block_info->ws_end, irn, iter)
DBG((dbg, DBG_WSETS, " %+F\n", irn));
}
* about the set of live-ins. Thus we must adapt the
* live-outs to the live-ins at each block-border.
*/
-static void fix_block_borders(ir_node *blk, void *env) {
+static void fix_block_borders(ir_node *blk, void *data) {
+ belady_env_t *env = data;
workset_t *wsb;
- belady_env_t *bel = env;
int i, max, iter, iter2;
DBG((dbg, DBG_FIX, "\n"));
workset_foreach(wsb, irnb, iter) {
/* if irnb is a phi of the current block we reload
* the corresponding argument, else irnb itself */
- if(is_Phi(irnb) && blk == get_nodes_block(irnb))
+ if(is_Phi(irnb) && blk == get_nodes_block(irnb)) {
irnb = get_irn_n(irnb, i);
+ // we might have unknowns as argument for the phi
+ if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, irnb))
+ continue;
+ }
+
/* Unknowns are available everywhere */
if(get_irn_opcode(irnb) == iro_Unknown)
continue;
/* check if irnb is in a register at end of pred */
- workset_foreach(wsp, irnp, iter2)
+ workset_foreach(wsp, irnp, iter2) {
if (irnb == irnp)
goto next_value;
+ }
- /* irnb is in memory at the end of pred, so we have to reload it */
+ /* irnb is not in memory at the end of pred, so we have to reload it */
DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
- be_add_reload_on_edge(bel->senv, irnb, blk, i);
+ be_add_reload_on_edge(env->senv, irnb, blk, i);
next_value:
/*epsilon statement :)*/;
}
}
-/**
- * Removes all used reloads from bel->reloads.
- * The remaining nodes in bel->reloads will be removed from the graph.
- */
-static void rescue_used_reloads(ir_node *irn, void *env) {
- pset *rlds = (pset *)env;
- if (pset_find_ptr(rlds, irn))
- pset_remove_ptr(rlds, irn);
+void be_spill_belady(const be_chordal_env_t *chordal_env) {
+ be_spill_belady_spill_env(chordal_env, NULL);
}
-/**
- * Removes all copies introduced for phi-spills
- */
-static void remove_copies(belady_env_t *bel) {
- ir_node *irn;
-
- for (irn = pset_first(bel->copies); irn; irn = pset_next(bel->copies)) {
- ir_node *src, *user;
+void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t *spill_env) {
+ belady_env_t env;
- assert(be_is_Copy(irn));
- assert(get_irn_n_edges(irn) == 1 && "This is not a copy introduced in 'compute_block_start_info()'. Who created it?");
+ FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
+ //firm_dbg_set_mask(dbg, DBG_WSETS);
- user = get_irn_edge(get_irn_irg(irn), irn, 0)->src;
-
- src = get_irn_n(irn, be_pos_Copy_orig);
- set_irn_n(user, 0, src);
- }
-}
-
-/**
- * Finds all unused reloads and remove them from the schedule
- * Also removes spills if they are not used anymore after removing reloads
- */
-static void remove_unused_reloads(ir_graph *irg, belady_env_t *bel) {
- ir_node *irn;
-
- irg_walk_graph(irg, rescue_used_reloads, NULL, bel->reloads);
- for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads)) {
- ir_node *spill;
- DBG((dbg, DBG_SPILL, "Removing %+F before %+F in %+F\n", irn, sched_next(irn), get_nodes_block(irn)));
-
- if (be_is_Reload(irn))
- spill = get_irn_n(irn, be_pos_Reload_mem);
-
- /* remove reload */
- set_irn_n(irn, 0, new_Bad());
- sched_remove(irn);
-
- if (be_is_Reload(irn)) {
- /* if spill not used anymore, remove it too
- * test of regclass is necessary since spill may be a phi-M */
- if (get_irn_n_edges(spill) == 0 && bel->cls == arch_get_irn_reg_class(bel->arch, spill, -1)) {
- set_irn_n(spill, 0, new_Bad());
- sched_remove(spill);
- }
- }
+ /* init belady env */
+ obstack_init(&env.ob);
+ env.arch = chordal_env->birg->main_env->arch_env;
+ env.cls = chordal_env->cls;
+ env.n_regs = arch_count_non_ignore_regs(env.arch, env.cls);
+ env.ws = new_workset(&env, &env.ob);
+ env.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, env.cls);
+ if(spill_env == NULL) {
+ env.senv = be_new_spill_env(chordal_env);
+ } else {
+ env.senv = spill_env;
}
-}
+ DEBUG_ONLY(be_set_spill_env_dbg_module(env.senv, dbg);)
-void be_spill_belady(const be_chordal_env_t *chordal_env) {
- belady_env_t bel;
-
- FIRM_DBG_REGISTER(dbg, "ir.be.spillbelady");
+ DBG((dbg, LEVEL_1, "running on register class: %s\n", env.cls->name));
- /* init belady env */
- obstack_init(&bel.ob);
- bel.arch = chordal_env->birg->main_env->arch_env;
- bel.cls = chordal_env->cls;
- bel.n_regs = arch_register_class_n_regs(bel.cls);
- bel.ws = new_workset(&bel.ob, &bel);
- bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls);
- bel.senv = be_new_spill_env(dbg, chordal_env, is_mem_phi, NULL);
- bel.reloads = pset_new_ptr_default();
- bel.copies = pset_new_ptr_default();
-
- DBG((dbg, LEVEL_1, "running on register class: %s\n", bel.cls->name));
-
- /* do the work */
be_clear_links(chordal_env->irg);
- irg_block_walk_graph(chordal_env->irg, NULL, belady, &bel);
- irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &bel);
- be_insert_spills_reloads(bel.senv, bel.reloads);
- remove_unused_reloads(chordal_env->irg, &bel);
- remove_copies(&bel);
+ /* Decide which phi nodes will be spilled and place copies for them into the graph */
+ irg_block_walk_graph(chordal_env->irg, place_copy_walker, NULL, &env);
+ be_place_copies(env.senv);
+ /* Fix high register pressure with belady algorithm */
+ irg_block_walk_graph(chordal_env->irg, NULL, belady, &env);
+ /* belady was block-local, fix the global flow by adding reloads on the edges */
+ irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &env);
+ /* Insert spill/reload nodes into the graph and fix usages */
+ be_insert_spills_reloads(env.senv);
+
+ be_remove_dead_nodes_from_schedule(chordal_env->irg);
/* clean up */
- del_pset(bel.reloads);
- be_delete_spill_env(bel.senv);
- be_end_uses(bel.uses);
- obstack_free(&bel.ob, NULL);
+ if(spill_env == NULL)
+ be_delete_spill_env(env.senv);
+ be_end_uses(env.uses);
+ obstack_free(&env.ob, NULL);
}