X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespill.c;h=43ed0d08136cb67d70dcc50f949731b6d0cb168e;hb=857cb493902e7d54651141fd600f980c383d5fe6;hp=7cc6bfd522c2b0d059962cb41d33563cee476fed;hpb=eea08e1ca13760ac00e2613014cff58855144ef4;p=libfirm diff --git a/ir/be/bespill.c b/ir/be/bespill.c index 7cc6bfd52..43ed0d081 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -28,7 +28,6 @@ #include "benode_t.h" #include "bechordal_t.h" -#define REMAT /* This enables re-computation of values. Current state: Unfinished and buggy. */ #undef BUGGY_REMAT @@ -144,6 +143,26 @@ static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_ir return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn))); } +/** + * Schedules a node after an instruction. (That is the place after all projs and phis + * that are scheduled after the instruction) + */ +static void sched_add_after_insn(ir_node *sched_after, ir_node *node) { + ir_node *next = sched_next(sched_after); + while(!sched_is_end(next)) { + if(!is_Proj(next) && !is_Phi(next)) + break; + next = sched_next(next); + } + + if(sched_is_end(next)) { + next = sched_last(get_nodes_block(sched_after)); + sched_add_after(next, node); + } else { + sched_add_before(next, node); + } +} + /** * Creates a spill. * @@ -171,6 +190,7 @@ static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) } ctx->spill = be_spill(env->arch_env, irn, ctx_irn); + sched_add_after_insn(irn, ctx->spill); return ctx->spill; } @@ -200,44 +220,44 @@ static void remove_copies(spill_env_t *env) { ARR_SETLEN(ir_node*, env->copies, 0); } +static INLINE ir_node *skip_projs(ir_node *node) { + while(is_Proj(node)) { + node = sched_next(node); + assert(!sched_is_end(node)); + } + + return node; +} + /** - * Inserts a copy (needed for spilled phi handling) of a value at the earliest - * possible location in a block. That is after the last use/def of the value or at - * the beginning of the block if there is no use/def. + * Searchs the schedule backwards until we reach the first use or def of a + * value or a phi. + * Returns the node after this node (so that you can do sched_add_before) */ -static ir_node *insert_copy(spill_env_t *env, ir_node *block, ir_node *value) { - ir_node* node; - ir_graph *irg = get_irn_irg(block); - ir_node *copy = be_new_Copy(env->cls, irg, block, value); +static ir_node *find_last_use_def(spill_env_t *env, ir_node *block, ir_node *value) { + ir_node *node, *last; - ARR_APP1(ir_node*, env->copies, copy); - - // walk schedule backwards until we find a use, a def, or until we have reached the first phi + last = NULL; sched_foreach_reverse(block, node) { int i, arity; if(is_Phi(node)) { - sched_add_after(node, copy); - goto placed; + return last; } if(value == node) { - sched_add_after(node, copy); - goto placed; + return skip_projs(last); } for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *arg = get_irn_n(node, i); if(arg == value) { - sched_add_after(node, copy); - goto placed; + return skip_projs(last); } } + last = node; } - // we didn't find a use or a phi yet, so place the copy at the beginning of the block - sched_add_before(sched_first(block), copy); - -placed: - return copy; + // simply return first node if no def or use found + return sched_first(block); } /** @@ -301,9 +321,9 @@ static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set sub_res = entry->spill; assert(sub_res && "spill missing?!?"); } - } - else + } else { sub_res = be_spill_irn(senv, arg, ctx_irn); + } set_irn_n(phi_spill, i, sub_res); } @@ -337,8 +357,6 @@ static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) { return res; } -#ifdef REMAT - #ifdef BUGGY_REMAT /** @@ -349,11 +367,11 @@ static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) { * @param spilled the node that was spilled * @param reloader a irn that requires a reload */ -static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) { +static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) { int pos, max; /* check for 'normal' spill and general remat condition */ - if (!be_is_Spill(spill) || !arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable)) + if (!arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable)) return 0; /* check availability of original arguments */ @@ -411,18 +429,15 @@ is_alive: ; * @param spilled the node that was spilled * @param reloader a irn that requires a reload */ -static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) { +static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) { const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env; return get_irn_arity(spilled) == 0 && - be_is_Spill(spill) && arch_irn_is(aenv, spilled, rematerializable); } #endif /* BUGGY_REMAT */ -#endif /* REMAT */ - /** * Re-materialize a node. * @@ -455,44 +470,72 @@ static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) return res; } +static place_copies_for_phi(spill_env_t *env, ir_node* node) { + int i, arity; + + assert(is_Phi(node)); + + /* We have to place copy nodes in the predecessor blocks to temporarily + * produce new values that get separate spill slots + */ + for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + ir_node *pred_block, *arg, *copy, *insert_point; + + /* Don't do anything for looping edges (there's no need + * and placing copies here breaks stuff as it suddenly + * generates new living values through the whole loop) + */ + arg = get_irn_n(node, i); + if(arg == node) + continue; + + pred_block = get_Block_cfgpred_block(get_nodes_block(node), i); + copy = be_new_Copy(env->cls, get_irn_irg(arg), pred_block, arg); + + ARR_APP1(ir_node*, env->copies, copy); + insert_point = find_last_use_def(env, pred_block, arg); + sched_add_before(insert_point, copy); + + set_irn_n(node, i, copy); + } +} + +void be_place_copies(spill_env_t *env) { + ir_node *node; + + foreach_pset(env->mem_phis, node) { + place_copies_for_phi(env, node); + } +} + void be_spill_phi(spill_env_t *env, ir_node *node) { + spill_ctx_t *spill_ctx; + assert(is_Phi(node)); pset_insert_ptr(env->mem_phis, node); + + // remove spill context for this phi (if there was one) + spill_ctx = be_get_spill_ctx(env->spill_ctxs, node, node); + if(spill_ctx != NULL) { + spill_ctx->spill = NULL; + } } void be_insert_spills_reloads(spill_env_t *env) { const arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env; - ir_node *node; + //ir_node *node; spill_info_t *si; +#if 0 + // Matze: This should be pointless as beladies fix_block_borders + // should result in the same DBG((env->dbg, LEVEL_1, "Reloads for mem-phis:\n")); foreach_pset(env->mem_phis, node) { const ir_edge_t *e; - int i, arity; assert(is_Phi(node)); - /* We have to place copy nodes in the predecessor blocks to temporarily - * produce new values that get separate spill slots - */ - for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { - ir_node *pred_block, *arg, *copy; - - /* Don't do anything for looping edges (there's no need - * and placing copies here breaks stuff as it suddenly - * generates new living values through the whole loop) - */ - arg = get_irn_n(node, i); - if(arg == node) - continue; - - pred_block = get_Block_cfgpred_block(get_nodes_block(node), i); - copy = insert_copy(env, pred_block, arg); - - set_irn_n(node, i, copy); - } - /* Add reloads for mem_phis */ /* BETTER: These reloads (1) should only be inserted, if they are really needed */ DBG((env->dbg, LEVEL_1, " Mem-phi %+F\n", node)); @@ -505,31 +548,28 @@ void be_insert_spills_reloads(spill_env_t *env) { } } } +#endif /* process each spilled node */ DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n")); for(si = set_first(env->spills); si; si = set_next(env->spills)) { reloader_t *rld; ir_mode *mode = get_irn_mode(si->spilled_node); - //ir_node *value; pset *values = pset_new_ptr(16); /* go through all reloads for this spill */ for(rld = si->reloaders; rld; rld = rld->next) { ir_node *new_val; - /* the spill for this reloader */ - ir_node *spill = be_spill_node(env, si->spilled_node); - -#ifdef REMAT - if (check_remat_conditions(env, spill, si->spilled_node, rld->reloader)) { + if (check_remat_conditions(env, si->spilled_node, rld->reloader)) { new_val = do_remat(env, si->spilled_node, rld->reloader); - //pdeq_putl(possibly_dead, spill); - } - else -#endif + } else { + /* the spill for this reloader */ + ir_node *spill = be_spill_node(env, si->spilled_node); + /* do a reload */ new_val = be_reload(arch_env, env->cls, rld->reloader, mode, spill); + } DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader)); pset_insert_ptr(values, new_val); @@ -573,16 +613,21 @@ void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, /* simply add the reload to the beginning of the block if we only have 1 predecessor * (we don't need to check for phis as there can't be any in a block with only 1 pred) */ - if(get_irn_arity(block) == 1) { + if(get_Block_n_cfgpreds(block) == 1) { assert(!is_Phi(sched_first(block))); be_add_reload(env, to_spill, sched_first(block)); return; } - // We have to reload the value in pred-block - predblock = get_nodes_block(get_irn_n(block, pos)); + /* We have to reload the value in pred-block */ + predblock = get_Block_cfgpred_block(block, pos); last = sched_last(predblock); - // there should be exactly 1 jump at the end of the block + + /* we might have projs and keepanys behind the jump... */ + while(is_Proj(last) || be_is_Keep(last)) { + last = sched_prev(last); + assert(!sched_is_end(last)); + } assert(is_cfop(last)); // add the reload before the (cond-)jump @@ -700,7 +745,8 @@ static void optimize_slots(ss_env_t *ssenv, int size, spill_slot_t *ass[]) { - assign a new offset to this slot - xor find another slot to coalesce with */ used_slots = 0; - for (i=0; ilargest_mode)); + buf[sizeof(buf) - 1] = '\0'; res = new_type_primitive(new_id_from_str(buf), ss->largest_mode); set_type_alignment_bytes(res, ss->align); pmap_insert(types, ss->largest_mode, res); @@ -791,12 +838,17 @@ static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) { */ static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) { int i, offset, frame_align; - ir_type *frame = get_irg_frame_type(ssenv->cenv->irg); + ir_type *frame; + + /* do not align the frame if no spill slots are needed */ + if (n_slots <= 0) + return; + + frame = get_irg_frame_type(ssenv->cenv->irg); /* aligning by increasing frame size */ - offset = get_type_size_bits(frame) / 8; + offset = get_type_size_bytes(frame); offset = round_up2(offset, ALIGN_SPILL_AREA); - set_type_size_bytes(frame, -1); /* create entities and assign offsets according to size and alignment*/ for (i = 0; i < n_slots; ++i) { @@ -807,6 +859,7 @@ static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) { /* build entity */ snprintf(buf, sizeof(buf), "spill_slot_%d", i); + buf[sizeof(buf) - 1] = '\0'; name = new_id_from_str(buf); spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]));