#include "benode_t.h"
#include "bechordal_t.h"
-#define REMAT
/* This enables re-computation of values. Current state: Unfinished and buggy. */
#undef BUGGY_REMAT
return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
}
+/**
+ * Schedules a node after an instruction. (That is the place after all projs and phis
+ * that are scheduled after the instruction)
+ */
+static void sched_add_after_insn(ir_node *sched_after, ir_node *node) {
+ ir_node *next = sched_next(sched_after);
+ while(!sched_is_end(next)) {
+ if(!is_Proj(next) && !is_Phi(next))
+ break;
+ next = sched_next(next);
+ }
+
+ if(sched_is_end(next)) {
+ next = sched_last(get_nodes_block(sched_after));
+ sched_add_after(next, node);
+ } else {
+ sched_add_before(next, node);
+ }
+}
+
/**
* Creates a spill.
*
}
ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
+ sched_add_after_insn(irn, ctx->spill);
return ctx->spill;
}
ARR_SETLEN(ir_node*, env->copies, 0);
}
+static INLINE ir_node *skip_projs(ir_node *node) {
+ while(is_Proj(node)) {
+ node = sched_next(node);
+ assert(!sched_is_end(node));
+ }
+
+ return node;
+}
+
/**
- * Inserts a copy (needed for spilled phi handling) of a value at the earliest
- * possible location in a block. That is after the last use/def of the value or at
- * the beginning of the block if there is no use/def.
+ * Searchs the schedule backwards until we reach the first use or def of a
+ * value or a phi.
+ * Returns the node after this node (so that you can do sched_add_before)
*/
-static ir_node *insert_copy(spill_env_t *env, ir_node *block, ir_node *value) {
- ir_node* node;
- ir_graph *irg = get_irn_irg(block);
- ir_node *copy = be_new_Copy(env->cls, irg, block, value);
-
- ARR_APP1(ir_node*, env->copies, copy);
+static ir_node *find_last_use_def(spill_env_t *env, ir_node *block, ir_node *value) {
+ ir_node *node, *last;
- // walk schedule backwards until we find a use/def, or until we have reached the first phi
- // TODO we could also do this by iterating over all uses and checking the
- // sched_get_time_step value. Need benchmarks to decide this...
+ last = NULL;
sched_foreach_reverse(block, node) {
int i, arity;
if(is_Phi(node)) {
- sched_add_after(node, copy);
- goto placed;
+ return last;
}
if(value == node) {
- sched_add_after(node, copy);
- goto placed;
+ return skip_projs(last);
}
for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
ir_node *arg = get_irn_n(node, i);
if(arg == value) {
- sched_add_after(node, copy);
- goto placed;
+ return skip_projs(last);
}
}
+ last = node;
}
- // we didn't find a use or a phi yet, so place the copy at the beginning of the block
- sched_add_before(sched_first(block), copy);
-placed:
-
- return copy;
+ // simply return first node if no def or use found
+ return sched_first(block);
}
/**
phi_spill_assoc_t *entry;
if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) {
- if (! bitset_is_set(bs, get_irn_idx(arg)))
+ // looping edge?
+ if(arg == phi) {
+ sub_res = phi_spill;
+ } else if (! bitset_is_set(bs, get_irn_idx(arg))) {
sub_res = spill_phi(senv, arg, ctx_irn, already_visited_phis, bs);
- else {
+ } else {
/* we already visited the argument phi: get it's spill */
key.phi = arg;
key.spill = NULL;
sub_res = entry->spill;
assert(sub_res && "spill missing?!?");
}
- }
- else
+ } else {
sub_res = be_spill_irn(senv, arg, ctx_irn);
+ }
set_irn_n(phi_spill, i, sub_res);
}
return res;
}
-#ifdef REMAT
-
#ifdef BUGGY_REMAT
/**
* @param spilled the node that was spilled
* @param reloader a irn that requires a reload
*/
-static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
+static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
int pos, max;
/* check for 'normal' spill and general remat condition */
- if (!be_is_Spill(spill) || !arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable))
+ if (!arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable))
return 0;
/* check availability of original arguments */
* @param spilled the node that was spilled
* @param reloader a irn that requires a reload
*/
-static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
+static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
return get_irn_arity(spilled) == 0 &&
- be_is_Spill(spill) &&
arch_irn_is(aenv, spilled, rematerializable);
}
#endif /* BUGGY_REMAT */
-#endif /* REMAT */
-
/**
* Re-materialize a node.
*
return res;
}
+static place_copies_for_phi(spill_env_t *env, ir_node* node) {
+ int i, arity;
+
+ assert(is_Phi(node));
+
+ /* We have to place copy nodes in the predecessor blocks to temporarily
+ * produce new values that get separate spill slots
+ */
+ for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
+ ir_node *pred_block, *arg, *copy, *insert_point;
+
+ /* Don't do anything for looping edges (there's no need
+ * and placing copies here breaks stuff as it suddenly
+ * generates new living values through the whole loop)
+ */
+ arg = get_irn_n(node, i);
+ if(arg == node)
+ continue;
+
+ pred_block = get_Block_cfgpred_block(get_nodes_block(node), i);
+ copy = be_new_Copy(env->cls, get_irn_irg(arg), pred_block, arg);
+
+ ARR_APP1(ir_node*, env->copies, copy);
+ insert_point = find_last_use_def(env, pred_block, arg);
+ sched_add_before(insert_point, copy);
+
+ set_irn_n(node, i, copy);
+ }
+}
+
+void be_place_copies(spill_env_t *env) {
+ ir_node *node;
+
+ foreach_pset(env->mem_phis, node) {
+ place_copies_for_phi(env, node);
+ }
+}
+
void be_spill_phi(spill_env_t *env, ir_node *node) {
+ spill_ctx_t *spill_ctx;
+
assert(is_Phi(node));
pset_insert_ptr(env->mem_phis, node);
+
+ // remove spill context for this phi (if there was one)
+ spill_ctx = be_get_spill_ctx(env->spill_ctxs, node, node);
+ if(spill_ctx != NULL) {
+ spill_ctx->spill = NULL;
+ }
}
void be_insert_spills_reloads(spill_env_t *env) {
const arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env;
- ir_node *node;
+ //ir_node *node;
spill_info_t *si;
+#if 0
+ // Matze: This should be pointless as beladies fix_block_borders
+ // should result in the same
DBG((env->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
foreach_pset(env->mem_phis, node) {
const ir_edge_t *e;
- int i, arity;
- /* We have to place copy nodes in the predecessor blocks to temporarily
- * produce new values that get separate spill slots
- */
- for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
- ir_node *pred_block = get_Block_cfgpred_block(get_nodes_block(node), i);
- ir_node *arg = get_irn_n(node, i);
- ir_node* copy = insert_copy(env, pred_block, arg);
-
- set_irn_n(node, i, copy);
- }
+ assert(is_Phi(node));
/* Add reloads for mem_phis */
/* BETTER: These reloads (1) should only be inserted, if they are really needed */
}
}
}
+#endif
/* process each spilled node */
DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n"));
for(si = set_first(env->spills); si; si = set_next(env->spills)) {
reloader_t *rld;
ir_mode *mode = get_irn_mode(si->spilled_node);
- //ir_node *value;
pset *values = pset_new_ptr(16);
/* go through all reloads for this spill */
for(rld = si->reloaders; rld; rld = rld->next) {
ir_node *new_val;
- /* the spill for this reloader */
- ir_node *spill = be_spill_node(env, si->spilled_node);
-
-#ifdef REMAT
- if (check_remat_conditions(env, spill, si->spilled_node, rld->reloader)) {
+ if (check_remat_conditions(env, si->spilled_node, rld->reloader)) {
new_val = do_remat(env, si->spilled_node, rld->reloader);
- //pdeq_putl(possibly_dead, spill);
- }
- else
-#endif
+ } else {
+ /* the spill for this reloader */
+ ir_node *spill = be_spill_node(env, si->spilled_node);
+
/* do a reload */
new_val = be_reload(arch_env, env->cls, rld->reloader, mode, spill);
+ }
DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader));
pset_insert_ptr(values, new_val);
spill_info_t templ, *res;
reloader_t *rel;
+ assert(sched_is_scheduled(before));
assert(arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->cls, to_spill));
templ.spilled_node = to_spill;
res->reloaders = rel;
}
-void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *bl, int pos) {
- ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos);
- be_add_reload(env, to_spill, insert_bl);
-}
+void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
+ ir_node *predblock, *last;
+
+ /* simply add the reload to the beginning of the block if we only have 1 predecessor
+ * (we don't need to check for phis as there can't be any in a block with only 1 pred)
+ */
+ if(get_Block_n_cfgpreds(block) == 1) {
+ assert(!is_Phi(sched_first(block)));
+ be_add_reload(env, to_spill, sched_first(block));
+ return;
+ }
+
+ /* We have to reload the value in pred-block */
+ predblock = get_Block_cfgpred_block(block, pos);
+ last = sched_last(predblock);
+ /* we might have projs and keepanys behind the jump... */
+ while(is_Proj(last) || be_is_Keep(last)) {
+ last = sched_prev(last);
+ assert(!sched_is_end(last));
+ }
+ assert(is_cfop(last));
+ // add the reload before the (cond-)jump
+ be_add_reload(env, to_spill, last);
+}
/****************************************
- assign a new offset to this slot
- xor find another slot to coalesce with */
used_slots = 0;
- for (i=0; i<size; ++i) { /* for each spill slot */
+ for (i=0; i<size; ++i) {
+ /* for each spill slot */
ir_node *n1;
int tgt_slot = -1;
* mode but different alignments.
*/
static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) {
- pmap_entry *e = pmap_find(types, ss->largest_mode);
- ir_type *res;
+ pmap_entry *e = pmap_find(types, ss->largest_mode);
+ ir_type *res;
- if (! e) {
+ if (! e) {
char buf[64];
- snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode));
- res = new_type_primitive(new_id_from_str(buf), ss->largest_mode);
+ snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode));
+ buf[sizeof(buf) - 1] = '\0';
+ res = new_type_primitive(new_id_from_str(buf), ss->largest_mode);
set_type_alignment_bytes(res, ss->align);
- pmap_insert(types, ss->largest_mode, res);
- }
- else {
- res = e->value;
+ pmap_insert(types, ss->largest_mode, res);
+ } else {
+ res = e->value;
assert(get_type_alignment_bytes(res) == (int)ss->align);
}
- return res;
+
+ return res;
}
/**
*/
static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) {
int i, offset, frame_align;
- ir_type *frame = get_irg_frame_type(ssenv->cenv->irg);
+ ir_type *frame;
+
+ /* do not align the frame if no spill slots are needed */
+ if (n_slots <= 0)
+ return;
+
+ frame = get_irg_frame_type(ssenv->cenv->irg);
/* aligning by increasing frame size */
- offset = get_type_size_bits(frame) / 8;
+ offset = get_type_size_bytes(frame);
offset = round_up2(offset, ALIGN_SPILL_AREA);
- set_type_size_bytes(frame, -1);
/* create entities and assign offsets according to size and alignment*/
for (i = 0; i < n_slots; ++i) {
/* build entity */
snprintf(buf, sizeof(buf), "spill_slot_%d", i);
+ buf[sizeof(buf) - 1] = '\0';
name = new_id_from_str(buf);
spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]));