{
int i;
- for(i = 0; i < w->len; ++i) {
+ for (i = 0; i < w->len; ++i) {
ir_fprintf(stderr, "%+F %d\n", w->vals[i].irn, w->vals[i].time);
}
}
/**
* Alloc a new workset on obstack @p ob with maximum size @p max
*/
-static inline workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
+static inline workset_t *new_workset(belady_env_t *env, struct obstack *ob)
+{
return OALLOCFZ(ob, workset_t, vals, env->n_regs);
}
/**
* Alloc a new instance on obstack and make it equal to @param ws
*/
-static inline workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
+static inline workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws)
+{
workset_t *res = OALLOCF(ob, workset_t, vals, env->n_regs);
memcpy(res, ws, sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]));
return res;
* Do NOT alloc anything. Make @param tgt equal to @param src.
* returns @param tgt for convenience
*/
-static inline workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
+static inline workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src)
+{
size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
memcpy(tgt, src, size);
return tgt;
* @param count locations given at memory @param locs.
* Set the length of @param ws to count.
*/
-static inline void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
+static inline void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs)
+{
workset->len = count;
memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
}
* Inserts the value @p val into the workset, iff it is not
* already contained. The workset must not be full.
*/
-static inline void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
+static inline void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val)
+{
int i;
/* check for current regclass */
if (!arch_irn_consider_in_reg_alloc(env->cls, val)) {
}
/* check if val is already contained */
- for(i=0; i<ws->len; ++i)
+ for (i=0; i<ws->len; ++i)
if (ws->vals[i].irn == val)
return;
/**
* Removes all entries from this workset
*/
-static inline void workset_clear(workset_t *ws) {
+static inline void workset_clear(workset_t *ws)
+{
ws->len = 0;
}
/**
* Removes the value @p val from the workset if present.
*/
-static inline void workset_remove(workset_t *ws, ir_node *val) {
+static inline void workset_remove(workset_t *ws, ir_node *val)
+{
int i;
- for(i=0; i<ws->len; ++i) {
+ for (i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val) {
ws->vals[i] = ws->vals[--ws->len];
return;
}
}
-static inline int workset_get_index(const workset_t *ws, const ir_node *val) {
+static inline int workset_get_index(const workset_t *ws, const ir_node *val)
+{
int i;
- for(i=0; i<ws->len; ++i) {
+ for (i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val)
return i;
}
* @p v A variable to put the current value in
* @p i An integer for internal use
*/
-#define workset_foreach(ws, v, i) for(i=0; \
+#define workset_foreach(ws, v, i) for (i=0; \
v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
++i)
or NULL. */
} next_use_t;
-static void *next_use_init(ir_phase *phase, const ir_node *irn, void *old)
-{
- (void) phase;
- (void) irn;
- (void) old;
- return NULL;
-}
-
static void build_next_uses(block_info_t *bi)
{
ir_node *irn;
sched_renumber(bi->bl);
- phase_init(&bi->next_uses, "next uses", bi->bel->irg, PHASE_DEFAULT_GROWTH, next_use_init, NULL);
+ phase_init(&bi->next_uses, bi->bel->irg, phase_irn_init_default);
sched_foreach_reverse(bi->bl, irn) {
int i;
assert(!arch_irn_is_ignore(irn));
/* We have to keep non-spillable nodes in the working set */
- if(flags & arch_irn_flags_dont_spill)
+ if (flags & arch_irn_flags_dont_spill)
return 0;
if (!is_usage && use && use->step == curr_step)
* @p is_usage indicates that the values in new_vals are used (not defined)
* In this case reloads must be performed
*/
-static void displace(block_info_t *bi, workset_t *new_vals, int is_usage) {
+static void displace(block_info_t *bi, workset_t *new_vals, int is_usage)
+{
belady_env_t *env = bi->bel;
workset_t *ws = env->ws;
ir_node **to_insert = ALLOCAN(ir_node*, env->n_regs);
* whether it is used from a register or is reloaded
* before the use.
*/
-static void belady(belady_env_t *env, int id) {
+static void belady(belady_env_t *env, int id)
+{
block_info_t *block_info = new_block_info(env, id);
const ir_node *block = block_info->bl;
/* allocate all values _used_ by this instruction */
workset_clear(new_vals);
- for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
+ for (i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
workset_insert(env, new_vals, get_irn_n(irn, i));
}
DBG((dbg, DBG_DECIDE, "\t* uses\n"));
* augmenting the pressure. Note, that a variable is dead
* if it has no further use in this block and is *not* live end
*/
- for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
+ for (i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
ir_node *op = get_irn_n(irn, i);
next_use_t *use = get_current_use(block_info, op);
env->instr_nr++;
}
- phase_free(&block_info->next_uses);
+ phase_deinit(&block_info->next_uses);
/* Remember end-workset for this block */
block_info->ws_end = workset_clone(env, &env->ob, env->ws);
* In the transformed graph, the register pressure never exceeds the number
* of available registers.
*
- * @param birg The backend graph
+ * @param irg The graph
* @param cls The register class to spill
*/
-void be_spill_belady(be_irg_t *birg, const arch_register_class_t *cls)
+static void be_spill_belady(ir_graph *irg, const arch_register_class_t *cls)
{
- ir_graph *irg = be_get_birg_irg(birg);
belady_env_t env;
int i, n_regs;
/* some special classes contain only ignore regs, nothing to do then */
- n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
- if(n_regs == 0)
+ n_regs = cls->n_regs - be_put_ignore_regs(irg, cls, NULL);
+ if (n_regs == 0)
return;
be_clear_links(irg);
/* init belady env */
obstack_init(&env.ob);
env.irg = irg;
- env.arch = be_get_birg_arch_env(birg);
+ env.arch = be_get_irg_arch_env(irg);
env.cls = cls;
- env.lv = be_get_birg_liveness(birg);
+ env.lv = be_get_irg_liveness(irg);
env.dfs = env.lv->dfs;
env.n_regs = n_regs;
env.ws = new_workset(&env, &env.ob);
- env.senv = be_new_spill_env(birg);
- env.ef = be_get_birg_exec_freq(birg);
+ env.senv = be_new_spill_env(irg);
+ env.ef = be_get_irg_exec_freq(irg);
env.spilled = bitset_irg_obstack_alloc(&env.ob, irg);
env.extra_spilled = ir_nodeset_new(64);
env.n_blocks = 0;
obstack_free(&env.ob, NULL);
}
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady2);
void be_init_spillbelady2(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
be_register_spiller("belady2", &belady_spiller);
FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady2");
}
-
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady2);