X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespill.c;h=c2a1b6fdc9b5c32b38deb49afe089042ed4fd222;hb=482b61870b54c36cdc78dfee6ee144135319aa93;hp=449f8ba7a5ab071ed74e89e05d559403fb4043d2;hpb=fd1a2c6ca51ee2b6ff838581b79cf7a3c4553e36;p=libfirm diff --git a/ir/be/bespill.c b/ir/be/bespill.c index 449f8ba7a..c2a1b6fdc 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -14,6 +14,7 @@ #include "irnode_t.h" #include "ircons_t.h" #include "iredges_t.h" +#include "irbackedge_t.h" #include "irprintf.h" #include "ident_t.h" #include "type_t.h" @@ -32,10 +33,13 @@ #include "benode_t.h" #include "bechordal_t.h" #include "bejavacoal.h" +#include "benodesets.h" +#include "bespilloptions.h" +#include "bestatevent.h" // only rematerialise when costs are less than REMAT_COST_LIMIT // TODO determine a good value here... -#define REMAT_COST_LIMIT 80 +#define REMAT_COST_LIMIT 10 typedef struct _reloader_t reloader_t; @@ -45,10 +49,16 @@ struct _reloader_t { }; typedef struct _spill_info_t { + /** the value that should get spilled */ ir_node *spilled_node; + /** list of places where the value should get reloaded */ reloader_t *reloaders; + /** the spill node, or a PhiM node */ ir_node *spill; + /** if we had the value of a phi spilled before but not the phi itself then + * this field contains the spill for the phi value */ + ir_node *old_spill; } spill_info_t; struct _spill_env_t { @@ -56,8 +66,11 @@ struct _spill_env_t { const arch_env_t *arch_env; const be_chordal_env_t *chordal_env; struct obstack obst; - set *spills; /**< all spill_info_t's, which must be placed */ - pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */ + be_irg_t *birg; + int spill_cost; /**< the cost of a single spill node */ + int reload_cost; /**< the cost of a reload node */ + set *spills; /**< all spill_info_t's, which must be placed */ + pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */ DEBUG_ONLY(firm_dbg_module_t *dbg;) }; @@ -71,12 +84,25 @@ static int cmp_spillinfo(const void *x, const void *y, size_t size) { return xx->spilled_node != yy->spilled_node; } +/** + * Returns spill info for a specific value (returns NULL if the info doesn't + * exist yet) + */ +static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) { + spill_info_t info; + int hash = nodeset_hash(value); + + info.spilled_node = value; + + return set_find(env->spills, &info, sizeof(info), hash); +} + /** * Returns spill info for a specific value (the value that is to be spilled) */ static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) { spill_info_t info, *res; - int hash = HASH_PTR(value); + int hash = nodeset_hash(value); info.spilled_node = value; res = set_find(env->spills, &info, sizeof(info), hash); @@ -84,6 +110,7 @@ static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) { if (res == NULL) { info.reloaders = NULL; info.spill = NULL; + info.old_spill = NULL; res = set_insert(env->spills, &info, sizeof(info), hash); } @@ -103,8 +130,12 @@ spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env) { env->spills = new_set(cmp_spillinfo, 1024); env->cls = chordal_env->cls; env->chordal_env = chordal_env; + env->birg = chordal_env->birg; env->arch_env = env->chordal_env->birg->main_env->arch_env; env->mem_phis = pset_new_ptr_default(); + // TODO, ask backend about costs... + env->spill_cost = 8; + env->reload_cost = 5; obstack_init(&env->obst); return env; } @@ -117,7 +148,7 @@ void be_delete_spill_env(spill_env_t *env) { free(env); } -/** +/* * ____ _ ____ _ _ * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___ * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __| @@ -130,7 +161,6 @@ void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { spill_info_t *info; reloader_t *rel; - assert(sched_is_scheduled(before)); assert(arch_irn_consider_in_reg_alloc(env->arch_env, env->cls, to_spill)); info = get_spillinfo(env, to_spill); @@ -142,6 +172,18 @@ void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { ir_node *arg = get_irn_n(to_spill, i); get_spillinfo(env, arg); } + +#if 1 + // hackery... sometimes the morgan algo spilled the value of a phi, + // the belady algo decides later to spill the whole phi, then sees the + // spill node and adds a reload for that spill node, problem is the + // reload gets attach to that same spill (and is totally unnecessary) + if(info->old_spill != NULL && + (before == info->old_spill || value_dominates(before, info->old_spill))) { + printf("spilledphi hack was needed...\n"); + before = sched_next(info->old_spill); + } +#endif } rel = obstack_alloc(&env->obst, sizeof(rel[0])); @@ -150,7 +192,7 @@ void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) { info->reloaders = rel; } -void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { +static ir_node *get_reload_insertion_point(ir_node *block, int pos) { ir_node *predblock, *last; /* simply add the reload to the beginning of the block if we only have 1 predecessor @@ -158,8 +200,7 @@ void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, */ if(get_Block_n_cfgpreds(block) == 1) { assert(!is_Phi(sched_first(block))); - be_add_reload(env, to_spill, sched_first(block)); - return; + return sched_first(block); } /* We have to reload the value in pred-block */ @@ -171,13 +212,27 @@ void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, last = sched_prev(last); assert(!sched_is_end(last)); } - assert(is_cfop(last)); + + if(!is_cfop(last)) { + ir_graph *irg = get_irn_irg(block); + ir_node *startblock = get_irg_start_block(irg); + + last = sched_next(last); + // last node must be a cfop, only exception is the start block + assert(last == startblock); + } // add the reload before the (cond-)jump - be_add_reload(env, to_spill, last); + return last; +} + +void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { + ir_node *before = get_reload_insertion_point(block, pos); + be_add_reload(env, to_spill, before); } void be_spill_phi(spill_env_t *env, ir_node *node) { + spill_info_t* spill; int i, arity; assert(is_Phi(node)); @@ -185,11 +240,19 @@ void be_spill_phi(spill_env_t *env, ir_node *node) { pset_insert_ptr(env->mem_phis, node); // create spillinfos for the phi arguments - get_spillinfo(env, node); + spill = get_spillinfo(env, node); for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *arg = get_irn_n(node, i); get_spillinfo(env, arg); } + + // if we had a spill for the phi value before, then remove this spill from + // schedule, as we will remove it in the insert spill/reload phase + if(spill->spill != NULL && !is_Phi(spill->spill)) { + assert(spill->old_spill == NULL); + spill->old_spill = spill->spill; + spill->spill = NULL; + } } /* @@ -292,6 +355,21 @@ static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) { set_irn_n(spillinfo->spill, i, arg_info->spill); } + + // rewire reloads from old_spill to phi + if(spillinfo->old_spill != NULL) { + const ir_edge_t *edge, *next; + ir_node *old_spill = spillinfo->old_spill; + + foreach_out_edge_safe(old_spill, edge, next) { + ir_node* reload = get_edge_src_irn(edge); + assert(be_is_Reload(reload) || is_Phi(reload)); + set_irn_n(reload, get_edge_src_pos(edge), spillinfo->spill); + } + set_irn_n(old_spill, be_pos_Spill_val, new_Bad()); + //sched_remove(old_spill); + spillinfo->old_spill = NULL; + } } /** @@ -339,13 +417,18 @@ static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) if(arg == get_irg_frame(env->chordal_env->irg)) return 1; + // hack for now (happens when command should be inserted at end of block) + if(is_Block(reloader)) { + return 0; + } + /* the following test does not work while spilling, * because the liveness info is not adapted yet to the effects of the * additional spills/reloads. * * So we can only do this test for ignore registers (of our register class) */ - if(arch_get_irn_reg_class(env->arch_env, arg, -1) == env->chordal_env->cls + if(arch_get_irn_reg_class(env->arch_env, arg, -1) == env->cls && arch_irn_is(env->arch_env, arg, ignore)) { int i, arity; @@ -354,7 +437,7 @@ static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) * - it interferes with the reloaders result * - or it is (last-) used by reloader itself */ - if (values_interfere(env->chordal_env->lv, reloader, arg)) { + if (values_interfere(env->birg->lv, reloader, arg)) { return 1; } @@ -377,15 +460,9 @@ static int is_remat_node(spill_env_t *env, ir_node *node) { assert(!be_is_Spill(node)); - if(be_is_Reload(node)) - return 1; - - // TODO why does arch_irn_is say rematerializable anyway? - if(be_is_Barrier(node)) - return 0; - - if(arch_irn_is(arch_env, node, rematerializable)) + if(arch_irn_is(arch_env, node, rematerializable)) { return 1; + } if(be_is_StackParam(node)) return 1; @@ -416,8 +493,9 @@ static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_n } else { costs += arch_get_op_estimated_cost(env->arch_env, spilled); } - if(parentcosts + costs >= REMAT_COST_LIMIT) + if(parentcosts + costs >= REMAT_COST_LIMIT) { return REMAT_COST_LIMIT; + } argremats = 0; for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { @@ -435,7 +513,6 @@ static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_n } argremats++; - // TODO can we get more accurate costs than +1? costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs); if(parentcosts + costs >= REMAT_COST_LIMIT) return REMAT_COST_LIMIT; @@ -445,7 +522,7 @@ static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_n } static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) { - int costs = check_remat_conditions_costs(env, spilled, reloader, 1); + int costs = check_remat_conditions_costs(env, spilled, reloader, 0); return costs < REMAT_COST_LIMIT; } @@ -460,9 +537,15 @@ static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *r static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) { int i, arity; ir_node *res; - ir_node *bl = get_nodes_block(reloader); + ir_node *bl; ir_node **ins; + if(is_Block(reloader)) { + bl = reloader; + } else { + bl = get_nodes_block(reloader); + } + ins = alloca(get_irn_arity(spilled) * sizeof(ins[0])); for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { ir_node *arg = get_irn_n(spilled, i); @@ -481,16 +564,39 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) get_irn_arity(spilled), ins); copy_node_attr(spilled, res); + new_backedge_info(res); DBG((env->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader)); /* insert in schedule */ - assert(!is_Block(reloader)); sched_add_before(reloader, res); return res; } +int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) { + spill_info_t *spill_info; + + if(be_do_remats) { + // is the node rematerializable? + int costs = check_remat_conditions_costs(env, to_spill, before, 0); + if(costs < REMAT_COST_LIMIT) + return costs; + } + + // do we already have a spill? + spill_info = find_spillinfo(env, to_spill); + if(spill_info != NULL && spill_info->spill != NULL) + return env->reload_cost; + + return env->spill_cost + env->reload_cost; +} + +int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) { + ir_node *before = get_reload_insertion_point(block, pos); + return be_get_reload_costs(env, to_spill, before); +} + /* * ___ _ ____ _ _ * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___ @@ -503,9 +609,11 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) void be_insert_spills_reloads(spill_env_t *env) { const arch_env_t *arch_env = env->arch_env; spill_info_t *si; + int remats = 0; + int reloads = 0; + int spills = 0; /* process each spilled node */ - DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n")); for(si = set_first(env->spills); si; si = set_next(env->spills)) { reloader_t *rld; ir_mode *mode = get_irn_mode(si->spilled_node); @@ -515,14 +623,19 @@ void be_insert_spills_reloads(spill_env_t *env) { for(rld = si->reloaders; rld; rld = rld->next) { ir_node *new_val; - if (check_remat_conditions(env, si->spilled_node, rld->reloader)) { + if (be_do_remats && check_remat_conditions(env, si->spilled_node, rld->reloader)) { new_val = do_remat(env, si->spilled_node, rld->reloader); + remats++; } else { /* make sure we have a spill */ - spill_node(env, si); + if(si->spill == NULL) { + spill_node(env, si); + spills++; + } - /* do a reload */ + /* create a reload */ new_val = be_reload(arch_env, env->cls, rld->reloader, mode, si->spill); + reloads++; } DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader)); @@ -532,17 +645,21 @@ void be_insert_spills_reloads(spill_env_t *env) { if(pset_count(values) > 0) { /* introduce copies, rewire the uses */ pset_insert_ptr(values, si->spilled_node); - be_ssa_constr_set_ignore(env->chordal_env->dom_front, env->chordal_env->lv, values, env->mem_phis); + be_ssa_constr_set_ignore(env->birg->dom_front, env->birg->lv, values, env->mem_phis); } del_pset(values); + + si->reloaders = NULL; } - // reloads are placed now, but we might reuse the spill environment for further spilling decisions - del_set(env->spills); - env->spills = new_set(cmp_spillinfo, 1024); + if(be_stat_ev_is_active()) { + be_stat_ev("spill_spills", spills); + be_stat_ev("spill_reloads", reloads); + be_stat_ev("spill_remats", remats); + } be_remove_dead_nodes_from_schedule(env->chordal_env->irg); - //be_liveness_add_missing(env->chordal_env->lv); - be_liveness_recompute(env->chordal_env->lv); + //be_liveness_recompute(env->birg->lv); + be_invalidate_liveness(env->birg); }