X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespill.c;h=aabdbfec2a009d245065cbd519af1a3f87948bf5;hb=d920844bd5cd3296b03bb281ce43782967e0ce65;hp=7640de586f385ea1a944def536720d8a1db9b5e6;hpb=44c68b33c3d7ca8ebf83e5b269fdf8fb21ee0c63;p=libfirm diff --git a/ir/be/bespill.c b/ir/be/bespill.c index 7640de586..aabdbfec2 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -144,6 +144,26 @@ static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_ir return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn))); } +/** + * Schedules a node after an instruction. (That is the place after all projs and phis + * that are scheduled after the instruction) + */ +static void sched_add_after_insn(ir_node *sched_after, ir_node *node) { + ir_node *next = sched_next(sched_after); + while(!sched_is_end(next)) { + if(!is_Proj(next) && !is_Phi(next)) + break; + next = sched_next(next); + } + + if(sched_is_end(next)) { + next = sched_last(get_nodes_block(sched_after)); + sched_add_after(next, node); + } else { + sched_add_before(next, node); + } +} + /** * Creates a spill. * @@ -171,6 +191,7 @@ static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) } ctx->spill = be_spill(env->arch_env, irn, ctx_irn); + sched_add_after_insn(irn, ctx->spill); return ctx->spill; } @@ -200,46 +221,44 @@ static void remove_copies(spill_env_t *env) { ARR_SETLEN(ir_node*, env->copies, 0); } +static INLINE ir_node *skip_projs(ir_node *node) { + while(is_Proj(node)) { + node = sched_next(node); + assert(!sched_is_end(node)); + } + + return node; +} + /** - * Inserts a copy (needed for spilled phi handling) of a value at the earliest - * possible location in a block. That is after the last use/def of the value or at - * the beginning of the block if there is no use/def. + * Searchs the schedule backwards until we reach the first use or def of a + * value or a phi. + * Returns the node before this node (so that you can do sched_add_before) */ -static ir_node *insert_copy(spill_env_t *env, ir_node *block, ir_node *value) { - ir_node* node; - ir_graph *irg = get_irn_irg(block); - ir_node *copy = be_new_Copy(env->cls, irg, block, value); +static ir_node *find_last_use_def(spill_env_t *env, ir_node *block, ir_node *value) { + ir_node *node, *last; - ARR_APP1(ir_node*, env->copies, copy); - - // walk schedule backwards until we find a use/def, or until we have reached the first phi - // TODO we could also do this by iterating over all uses and checking the - // sched_get_time_step value. Need benchmarks to decide this... + last = NULL; sched_foreach_reverse(block, node) { int i, arity; if(is_Phi(node)) { - sched_add_after(node, copy); - goto placed; + return last; } if(value == node) { - sched_add_after(node, copy); - goto placed; + return skip_projs(last); } for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *arg = get_irn_n(node, i); if(arg == value) { - sched_add_after(node, copy); - goto placed; + return skip_projs(node); } } + last = node; } - // we didn't find a use or a phi yet, so place the copy at the beginning of the block - sched_add_before(sched_first(block), copy); - -placed: - return copy; + // simply return first node if no def or use found + return sched_first(block); } /** @@ -289,9 +308,12 @@ static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set phi_spill_assoc_t *entry; if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) { - if (! bitset_is_set(bs, get_irn_idx(arg))) + // looping edge? + if(arg == phi) { + sub_res = phi_spill; + } else if (! bitset_is_set(bs, get_irn_idx(arg))) { sub_res = spill_phi(senv, arg, ctx_irn, already_visited_phis, bs); - else { + } else { /* we already visited the argument phi: get it's spill */ key.phi = arg; key.spill = NULL; @@ -300,9 +322,9 @@ static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set sub_res = entry->spill; assert(sub_res && "spill missing?!?"); } - } - else + } else { sub_res = be_spill_irn(senv, arg, ctx_irn); + } set_irn_n(phi_spill, i, sub_res); } @@ -455,9 +477,35 @@ static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) } void be_spill_phi(spill_env_t *env, ir_node *node) { + int i, arity; + assert(is_Phi(node)); pset_insert_ptr(env->mem_phis, node); + + /* We have to place copy nodes in the predecessor blocks to temporarily + * produce new values that get separate spill slots + */ + for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + ir_node *pred_block, *arg, *copy, *insert_point; + + /* Don't do anything for looping edges (there's no need + * and placing copies here breaks stuff as it suddenly + * generates new living values through the whole loop) + */ + arg = get_irn_n(node, i); + if(arg == node) + continue; + + pred_block = get_Block_cfgpred_block(get_nodes_block(node), i); + copy = be_new_Copy(env->cls, get_irn_irg(arg), pred_block, arg); + + ARR_APP1(ir_node*, env->copies, copy); + insert_point = find_last_use_def(env, pred_block, arg); + sched_add_before(insert_point, copy); + + set_irn_n(node, i, copy); + } } void be_insert_spills_reloads(spill_env_t *env) { @@ -468,18 +516,8 @@ void be_insert_spills_reloads(spill_env_t *env) { DBG((env->dbg, LEVEL_1, "Reloads for mem-phis:\n")); foreach_pset(env->mem_phis, node) { const ir_edge_t *e; - int i, arity; - - /* We have to place copy nodes in the predecessor blocks to temporarily - * produce new values that get separate spill slots - */ - for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { - ir_node *pred_block = get_Block_cfgpred_block(get_nodes_block(node), i); - ir_node *arg = get_irn_n(node, i); - ir_node *copy = insert_copy(env, pred_block, arg); - set_irn_n(node, i, copy); - } + assert(is_Phi(node)); /* Add reloads for mem_phis */ /* BETTER: These reloads (1) should only be inserted, if they are really needed */ @@ -499,7 +537,6 @@ void be_insert_spills_reloads(spill_env_t *env) { for(si = set_first(env->spills); si; si = set_next(env->spills)) { reloader_t *rld; ir_mode *mode = get_irn_mode(si->spilled_node); - //ir_node *value; pset *values = pset_new_ptr(16); /* go through all reloads for this spill */ @@ -512,9 +549,7 @@ void be_insert_spills_reloads(spill_env_t *env) { #ifdef REMAT if (check_remat_conditions(env, spill, si->spilled_node, rld->reloader)) { new_val = do_remat(env, si->spilled_node, rld->reloader); - //pdeq_putl(possibly_dead, spill); - } - else + } else #endif /* do a reload */ new_val = be_reload(arch_env, env->cls, rld->reloader, mode, spill); @@ -542,12 +577,9 @@ void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { spill_info_t templ, *res; reloader_t *rel; + assert(sched_is_scheduled(before)); assert(arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->cls, to_spill)); - if(is_Phi(to_spill)) { - be_spill_phi(env, to_spill); - } - templ.spilled_node = to_spill; templ.reloaders = NULL; res = set_insert(env->spills, &templ, sizeof(templ), HASH_PTR(to_spill)); @@ -558,12 +590,32 @@ void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { res->reloaders = rel; } -void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *bl, int pos) { - ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos); - be_add_reload(env, to_spill, insert_bl); -} +void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { + ir_node *predblock, *last; + /* simply add the reload to the beginning of the block if we only have 1 predecessor + * (we don't need to check for phis as there can't be any in a block with only 1 pred) + */ + if(get_Block_n_cfgpreds(block) == 1) { + assert(!is_Phi(sched_first(block))); + be_add_reload(env, to_spill, sched_first(block)); + return; + } + + /* We have to reload the value in pred-block */ + predblock = get_Block_cfgpred_block(block, pos); + last = sched_last(predblock); + + /* we might have projs and keepanys behind the jump... */ + while(is_Proj(last) || be_is_Keep(last)) { + last = sched_prev(last); + assert(!sched_is_end(last)); + } + assert(is_cfop(last)); + // add the reload before the (cond-)jump + be_add_reload(env, to_spill, last); +} /**************************************** @@ -741,21 +793,21 @@ interf_detected: /*nothing*/ ; * mode but different alignments. */ static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) { - pmap_entry *e = pmap_find(types, ss->largest_mode); - ir_type *res; + pmap_entry *e = pmap_find(types, ss->largest_mode); + ir_type *res; - if (! e) { + if (! e) { char buf[64]; - snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode)); - res = new_type_primitive(new_id_from_str(buf), ss->largest_mode); + snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode)); + res = new_type_primitive(new_id_from_str(buf), ss->largest_mode); set_type_alignment_bytes(res, ss->align); - pmap_insert(types, ss->largest_mode, res); - } - else { - res = e->value; + pmap_insert(types, ss->largest_mode, res); + } else { + res = e->value; assert(get_type_alignment_bytes(res) == (int)ss->align); } - return res; + + return res; } /** @@ -767,12 +819,17 @@ static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) { */ static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) { int i, offset, frame_align; - ir_type *frame = get_irg_frame_type(ssenv->cenv->irg); + ir_type *frame; + + /* do not align the frame if no spill slots are needed */ + if (n_slots <= 0) + return; + + frame = get_irg_frame_type(ssenv->cenv->irg); /* aligning by increasing frame size */ - offset = get_type_size_bits(frame) / 8; + offset = get_type_size_bytes(frame); offset = round_up2(offset, ALIGN_SPILL_AREA); - set_type_size_bytes(frame, -1); /* create entities and assign offsets according to size and alignment*/ for (i = 0; i < n_slots; ++i) {