#include "entity_t.h"
#include "debug.h"
#include "irgwalk.h"
+#include "array.h"
-#include "besched.h"
+#include "belive_t.h"
+#include "besched_t.h"
#include "bespill.h"
#include "benode_t.h"
#include "bechordal_t.h"
+#undef REMAT
+/* This enables re-computation of values. Current state: Unfinished and buggy. */
+#undef BUGGY_REMAT
+
typedef struct _reloader_t reloader_t;
typedef struct _spill_info_t spill_info_t;
} spill_ctx_t;
struct _spill_env_t {
- firm_dbg_module_t *dbg;
const arch_register_class_t *cls;
const be_chordal_env_t *chordal_env;
struct obstack obst;
set *spill_ctxs;
set *spills; /**< all spill_info_t's, which must be placed */
- pset *mem_phis; /**< set of all special spilled phis. allocated and freed seperately */
+ pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */
decide_irn_t is_mem_phi; /**< callback func to decide if a phi needs special spilling */
void *data; /**< data passed to all callbacks */
+ DEBUG_ONLY(firm_dbg_module_t *dbg;)
};
static int cmp_spillctx(const void *a, const void *b, size_t n) {
return ! (xx->spilled_node == yy->spilled_node);
}
-spill_env_t *be_new_spill_env(firm_dbg_module_t *dbg,
- const be_chordal_env_t *chordal_env,
- decide_irn_t is_mem_phi, void *data) {
+DEBUG_ONLY(
+void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
+ env->dbg = dbg;
+}
+);
- spill_env_t *env = malloc(sizeof(env[0]));
+spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env, decide_irn_t is_mem_phi, void *data) {
+ spill_env_t *env = xmalloc(sizeof(env[0]));
env->spill_ctxs = new_set(cmp_spillctx, 1024);
env->spills = new_set(cmp_spillinfo, 1024);
env->cls = chordal_env->cls;
- env->dbg = dbg;
env->is_mem_phi = is_mem_phi;
env->data = data;
env->chordal_env = chordal_env;
/* if not found spill the phi */
if(!ctx->spill) {
/* build a new PhiM with dummy in-array */
- ins = malloc(n * sizeof(ins[0]));
+ NEW_ARR_A(ir_node *, ins, n);
for(i=0; i<n; ++i)
ins[i] = new_r_Unknown(irg, mode_M);
ctx->spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M);
- free(ins);
/* re-wire the phiM */
for(i=0; i<n; ++i) {
}
}
+#ifdef REMAT
+
+#ifdef BUGGY_REMAT
+
+static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
+ int pos, max;
+
+ /* check for 'normal' spill and general remat condition */
+ if (!be_is_Spill(spill) || !arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable))
+ return 0;
+
+ /* check availability of original arguments */
+ if (is_Block(reloader)) {
+
+ /* we want to remat at the end of a block.
+ * thus all arguments must be alive at the end of the block
+ */
+ for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
+ ir_node *arg = get_irn_n(spilled, pos);
+ if (!is_live_end(reloader, arg))
+ return 0;
+ }
+
+ } else {
+
+ /* we want to remat before the insn reloader
+ * thus an arguments is alive if
+ * - it interferes with the reloaders result
+ * or
+ * - or it is (last-) used by reloader itself
+ */
+ for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
+ ir_node *arg = get_irn_n(spilled, pos);
+ int i, m;
+
+ if (values_interfere(reloader, arg))
+ goto is_alive;
+
+ for (i=0, m=get_irn_arity(reloader); i<m; ++i) {
+ ir_node *rel_arg = get_irn_n(reloader, i);
+ if (rel_arg == arg)
+ goto is_alive;
+ }
+
+ /* arg is not alive before reloader */
+ return 0;
+
+is_alive: ;
+
+ }
+
+ }
+
+ return 1;
+}
+
+#else /* BUGGY_REMAT */
+
+static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
+ const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
+
+ return get_irn_arity(spilled) == 0 &&
+ be_is_Spill(spill) &&
+ arch_irn_is(aenv, spilled, rematerializable);
+}
+
+#endif /* BUGGY_REMAT */
+
+static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
+ ir_node *res;
+ ir_node *bl = (is_Block(reloader)) ? reloader : get_nodes_block(reloader);
+
+ /* recompute the value */
+ res = new_ir_node(get_irn_dbg_info(spilled), senv->chordal_env->irg, bl,
+ get_irn_op(spilled),
+ get_irn_mode(spilled),
+ get_irn_arity(spilled),
+ get_irn_in(spilled));
+ copy_node_attr(spilled, res);
+
+ DBG((senv->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader));
+
+ /* insert in schedule */
+ if (is_Block(reloader)) {
+ ir_node *insert = sched_skip(reloader, 0, sched_skip_cf_predicator, (void *) senv->chordal_env->birg->main_env->arch_env);
+ sched_add_after(insert, res);
+ } else {
+ sched_add_before(reloader, res);
+ }
+
+ return res;
+}
+
+#endif
+
void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
- ir_graph *irg = senv->chordal_env->irg;
+ const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
+ ir_graph *irg = senv->chordal_env->irg;
ir_node *irn;
spill_info_t *si;
- struct obstack ob;
-
- obstack_init(&ob);
/* get all special spilled phis */
DBG((senv->dbg, LEVEL_1, "Mem-phis:\n"));
DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
reloader_t *rld;
- ir_node **reloads;
- int n_reloads = 0;
ir_mode *mode = get_irn_mode(si->spilled_node);
+ pset *values = pset_new_ptr(16);
/* go through all reloads for this spill */
for(rld = si->reloaders; rld; rld = rld->next) {
+ ir_node *new_val;
+
/* the spill for this reloader */
ir_node *spill = be_spill_node(senv, si->spilled_node);
- /* the reload */
- ir_node *bl = is_Block(rld->reloader) ? rld->reloader : get_nodes_block(rld->reloader);
- ir_node *reload = be_new_Reload(senv->cls, irg, bl, mode, spill);
+#ifdef REMAT
+ if (check_remat_conditions(senv, spill, si->spilled_node, rld->reloader))
+ new_val = do_remat(senv, si->spilled_node, rld->reloader);
+ else
+#endif
+ /* do a reload */
+ new_val = be_reload(aenv, senv->cls, rld->reloader, mode, spill);
- DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", reload, si->spilled_node, rld->reloader));
+ DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader));
+ pset_insert_ptr(values, new_val);
if(reload_set)
- pset_insert_ptr(reload_set, reload);
-
- /* remember the reaload */
- obstack_ptr_grow(&ob, reload);
- sched_add_before(rld->reloader, reload);
- n_reloads++;
+ pset_insert_ptr(reload_set, new_val);
}
- assert(n_reloads > 0);
- obstack_ptr_grow(&ob, si->spilled_node);
- reloads = obstack_finish(&ob);
- be_ssa_constr_ignore(senv->chordal_env->dom_front, n_reloads + 1, reloads, senv->mem_phis);
- obstack_free(&ob, reloads);
- }
+ /* introduce copies, rewire the uses */
+ assert(pset_count(values) > 0 && "???");
+ pset_insert_ptr(values, si->spilled_node);
+ be_ssa_constr_set_ignore(senv->chordal_env->dom_front, values, senv->mem_phis);
- obstack_free(&ob, NULL);
+ del_pset(values);
+ }
for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
int i, n;
} spill_slot_t;
typedef struct _ss_env_t {
- firm_dbg_module_t *dbg;
struct obstack ob;
be_chordal_env_t *cenv;
pmap *slots; /* maps spill_contexts to spill_slots */
- pmap *types; /* maps modes to types */
+ pmap *types; /* maps modes to types */
+ DEBUG_ONLY(firm_dbg_module_t *dbg;)
} ss_env_t;
-
+/**
+ * Walker: compute the spill slots
+ */
static void compute_spill_slots_walker(ir_node *spill, void *env) {
ss_env_t *ssenv = env;
ir_node *ctx;
entry = pmap_find(ssenv->slots, ctx);
if (!entry) {
+ struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
+ ir_node *spilled = get_irn_n(spill, be_pos_Spill_val);
+ const arch_register_t *reg = arch_get_irn_register(arch_env, spilled);
+ const arch_register_class_t *cls = arch_register_get_class(reg);
+ ir_mode *largest_mode = arch_register_class_mode(cls);
+
/* this is a new spill context */
ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
- ss->members = pset_new_ptr(8);
- ss->largest_mode = get_irn_mode(get_irn_n(spill, 0));
- ss->size = get_mode_size_bytes(ss->largest_mode);
- ss->align = ss->size; /* TODO Assumed for now */
+ ss->members = pset_new_ptr(8);
+ ss->largest_mode = largest_mode;
+ ss->size = get_mode_size_bytes(ss->largest_mode);
+ ss->align = arch_isa_get_reg_class_alignment(arch_env->isa, cls);
pmap_insert(ssenv->slots, ctx, ss);
} else {
ir_node *irn;
/* values with the same spill_ctx must go into the same spill slot */
ss = entry->value;
- assert(ss->size == (unsigned)get_mode_size_bytes(get_irn_mode(get_irn_n(spill, 0))) && "Different sizes for the same spill slot are not allowed yet.");
+ assert(ss->size == (unsigned)get_mode_size_bytes(get_irn_mode(get_irn_n(spill, be_pos_Spill_val))) && "Different sizes for the same spill slot are not allowed yet.");
for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
/* use values_interfere here, because it uses the dominance check,
which does work for values in memory */
pset_insert_ptr(ss->members, spill);
}
+/**
+ * qsort compare function, sort spill slots by size.
+ */
static int ss_sorter(const void *v1, const void *v2) {
const spill_slot_t *ss1 = v1;
const spill_slot_t *ss2 = v2;
/**
* Returns a spill type for a mode. Keep them in a map to reduce
* the number of types.
+ *
+ * @param types a map containing all created types
+ * @param ss the spill slot
+ *
+ * Note that type types should are identical for every mode.
+ * This rule might break if two different register classes return the same
+ * mode but different alignments.
*/
-static ir_type *get_spill_type(pmap *types, ir_mode *mode) {
- pmap_entry *e = pmap_find(types, mode);
+static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) {
+ pmap_entry *e = pmap_find(types, ss->largest_mode);
ir_type *res;
if (! e) {
char buf[64];
- snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(mode));
- res = new_type_primitive(new_id_from_str(buf), mode);
- pmap_insert(types, mode, res);
+ snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode));
+ res = new_type_primitive(new_id_from_str(buf), ss->largest_mode);
+ set_type_alignment_bytes(res, ss->align);
+ pmap_insert(types, ss->largest_mode, res);
}
- else
+ else {
res = e->value;
+ assert(get_type_alignment_bytes(res) == (int)ss->align);
+ }
return res;
}
-static void assign_entities(ss_env_t *ssenv, int n, spill_slot_t **ss) {
- int i, offset;
+/**
+ * Create spill slot entities on the frame type.
+ *
+ * @param ssenv the spill environment
+ * @param n number of spill slots
+ * @param ss array of spill slots
+ */
+static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) {
+ int i, offset, frame_align;
ir_type *frame = get_irg_frame_type(ssenv->cenv->irg);
/* aligning by increasing frame size */
set_type_size_bytes(frame, -1);
/* create entities and assign offsets according to size and alignment*/
- for (i=0; i<n; ++i) {
+ for (i = 0; i < n_slots; ++i) {
char buf[64];
ident *name;
entity *spill_ent;
snprintf(buf, sizeof(buf), "spill_slot_%d", i);
name = new_id_from_str(buf);
- spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]->largest_mode));
+ spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]));
/* align */
offset = round_up2(offset, ss[i]->align);
/* set */
set_entity_offset_bytes(spill_ent, offset);
/* next possible offset */
- offset += ss[i]->size;
+ offset += round_up2(ss[i]->size, ss[i]->align);
pset_foreach(ss[i]->members, irn)
be_set_Spill_entity(irn, spill_ent);
}
/* set final size of stack frame */
- set_type_size_bytes(frame, offset);
+ frame_align = get_type_alignment_bytes(frame);
+ set_type_size_bytes(frame, round_up2(offset, frame_align));
}
void be_compute_spill_offsets(be_chordal_env_t *cenv) {
ssenv.cenv = cenv;
ssenv.slots = pmap_create();
ssenv.types = pmap_create();
- ssenv.dbg = firm_dbg_register("ir.be.spillslots");
+ FIRM_DBG_REGISTER(ssenv.dbg, "ir.be.spillslots");
/* Get initial spill slots */
irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
/* Clean up */
pmap_foreach(ssenv.slots, pme)
- del_pset(((spill_slot_t *)pme->value)->members);
+ del_pset(((spill_slot_t *)pme->value)->members);
pmap_destroy(ssenv.slots);
- pmap_destroy(ssenv.types);
+ pmap_destroy(ssenv.types);
obstack_free(&ssenv.ob, NULL);
+
+ be_copy_entities_to_reloads(cenv->irg);
}