X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespillutil.c;h=1b2023010159485d9ffee893937cb4d4e81bf6ae;hb=4f945762ec7e9a345cbf8a80ef7f4eb96a39a389;hp=01013bbef31ec477712276992e3243c7550a6e2a;hpb=e9c215cf4349b990b65b0cd9caf1978c3550ccaa;p=libfirm diff --git a/ir/be/bespillutil.c b/ir/be/bespillutil.c index 01013bbef..1b2023010 100644 --- a/ir/be/bespillutil.c +++ b/ir/be/bespillutil.c @@ -21,8 +21,7 @@ * @file * @brief implementation of the spill/reload placement abstraction layer * @author Daniel Grund, Sebastian Hack, Matthias Braun - * @date 29.09.2005 - * @version $Id$ + * @date 29.09.2005 */ #include "config.h" @@ -54,9 +53,10 @@ #include "belive_t.h" #include "benode.h" #include "bechordal_t.h" -#include "bestatevent.h" +#include "statev_t.h" #include "bessaconstr.h" #include "beirg.h" +#include "beirgmod.h" #include "beintlive_t.h" #include "bemodule.h" #include "be_t.h" @@ -92,26 +92,25 @@ struct spill_info_t { double spill_costs; /**< costs needed for spilling the value */ const arch_register_class_t *reload_cls; /** the register class in which the reload should be placed */ + bool spilled_phi; /* true when the whole Phi has been spilled and + will be replaced with a PhiM. false if only the + value of the Phi gets spilled */ }; struct spill_env_t { const arch_env_t *arch_env; ir_graph *irg; struct obstack obst; - be_irg_t *birg; int spill_cost; /**< the cost of a single spill node */ int reload_cost; /**< the cost of a reload node */ set *spills; /**< all spill_info_t's, which must be placed */ - ir_nodeset_t mem_phis; /**< set of all spilled phis. */ - ir_exec_freq *exec_freq; + spill_info_t **mem_phis; /**< set of all spilled phis. */ -#ifdef FIRM_STATISTICS unsigned spill_count; unsigned reload_count; unsigned remat_count; unsigned spilled_phi_count; -#endif }; /** @@ -119,8 +118,8 @@ struct spill_env_t { */ static int cmp_spillinfo(const void *x, const void *y, size_t size) { - const spill_info_t *xx = x; - const spill_info_t *yy = y; + const spill_info_t *xx = (const spill_info_t*)x; + const spill_info_t *yy = (const spill_info_t*)y; (void) size; return xx->to_spill != yy->to_spill; @@ -135,40 +134,37 @@ static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) int hash = hash_irn(value); info.to_spill = value; - res = set_find(env->spills, &info, sizeof(info), hash); + res = set_find(spill_info_t, env->spills, &info, sizeof(info), hash); if (res == NULL) { info.reloaders = NULL; info.spills = NULL; info.spill_costs = -1; info.reload_cls = NULL; - res = set_insert(env->spills, &info, sizeof(info), hash); + info.spilled_phi = false; + res = set_insert(spill_info_t, env->spills, &info, sizeof(info), hash); } return res; } -spill_env_t *be_new_spill_env(be_irg_t *birg) +spill_env_t *be_new_spill_env(ir_graph *irg) { - const arch_env_t *arch_env = birg->main_env->arch_env; + const arch_env_t *arch_env = be_get_irg_arch_env(irg); spill_env_t *env = XMALLOC(spill_env_t); - env->spills = new_set(cmp_spillinfo, 1024); - env->irg = be_get_birg_irg(birg); - env->birg = birg; + env->spills = new_set(cmp_spillinfo, 1024); + env->irg = irg; env->arch_env = arch_env; - ir_nodeset_init(&env->mem_phis); + env->mem_phis = NEW_ARR_F(spill_info_t*, 0); env->spill_cost = arch_env->spill_cost; env->reload_cost = arch_env->reload_cost; - env->exec_freq = be_get_birg_exec_freq(birg); obstack_init(&env->obst); -#ifdef FIRM_STATISTICS env->spill_count = 0; env->reload_count = 0; env->remat_count = 0; env->spilled_phi_count = 0; -#endif return env; } @@ -176,7 +172,7 @@ spill_env_t *be_new_spill_env(be_irg_t *birg) void be_delete_spill_env(spill_env_t *env) { del_set(env->spills); - ir_nodeset_destroy(&env->mem_phis); + DEL_ARR_F(env->mem_phis); obstack_free(&env->obst, NULL); free(env); } @@ -193,12 +189,11 @@ void be_delete_spill_env(spill_env_t *env) void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after) { spill_info_t *spill_info = get_spillinfo(env, to_spill); - const ir_node *insn = skip_Proj_const(to_spill); spill_t *spill; spill_t *s; spill_t *last; - assert(!arch_irn_is(insn, dont_spill)); + assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill)); DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after)); /* Just for safety make sure that we do not insert the spill in front of a phi */ @@ -207,16 +202,16 @@ void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after) /* spills that are dominated by others are not needed */ last = NULL; s = spill_info->spills; - for( ; s != NULL; s = s->next) { + for ( ; s != NULL; s = s->next) { /* no need to add this spill if it is dominated by another */ - if(value_dominates(s->after, after)) { + if (value_dominates(s->after, after)) { DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after)); return; } /* remove spills that we dominate */ - if(value_dominates(after, s->after)) { + if (value_dominates(after, s->after)) { DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after)); - if(last != NULL) { + if (last != NULL) { last->next = s->next; } else { spill_info->spills = s->next; @@ -234,38 +229,14 @@ void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after) spill_info->spills = spill; } -void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before, - ir_node *rematted_node) -{ - spill_info_t *spill_info; - reloader_t *reloader; - - spill_info = get_spillinfo(env, to_spill); - - /* add the remat information */ - reloader = OALLOC(&env->obst, reloader_t); - reloader->next = spill_info->reloaders; - reloader->reloader = before; - reloader->rematted_node = rematted_node; - reloader->remat_cost_delta = 0; /* We will never have a cost win over a - reload since we're not even allowed to - create a reload */ - - spill_info->reloaders = reloader; - - DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n", - to_spill, before)); -} - void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before, ir_node *can_spill_after, const arch_register_class_t *reload_cls, int allow_remat) { spill_info_t *info; reloader_t *rel; - const ir_node *insn = skip_Proj_const(to_spill); - assert(!arch_irn_is(insn, dont_spill)); + assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill)); info = get_spillinfo(env, to_spill); @@ -320,11 +291,16 @@ ir_node *be_get_end_of_block_insertion_point(const ir_node *block) return last; } -static ir_node *skip_keeps_phis(ir_node *node) +/** + * determine final spill position: it should be after all phis, keep nodes + * and behind nodes marked as prolog + */ +static ir_node *determine_spill_point(ir_node *node) { - while(true) { + node = skip_Proj(node); + while (true) { ir_node *next = sched_next(node); - if(!is_Phi(next) && !be_is_Keep(next) && !be_is_CopyKeep(next)) + if (!is_Phi(next) && !be_is_Keep(next) && !be_is_CopyKeep(next)) break; node = next; } @@ -342,7 +318,7 @@ static ir_node *get_block_insertion_point(ir_node *block, int pos) /* simply add the reload to the beginning of the block if we only have 1 * predecessor. We don't need to check for phis as there can't be any in a * block with only 1 pred. */ - if(get_Block_n_cfgpreds(block) == 1) { + if (get_Block_n_cfgpreds(block) == 1) { assert(!is_Phi(sched_first(block))); return sched_first(block); } @@ -362,7 +338,7 @@ void be_add_reload_at_end(spill_env_t *env, ir_node *to_spill, } void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, - int pos, const arch_register_class_t *reload_cls, + int pos, const arch_register_class_t *reload_cls, int allow_remat) { ir_node *before = get_block_insertion_point(block, pos); @@ -372,29 +348,29 @@ void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, void be_spill_phi(spill_env_t *env, ir_node *node) { ir_node *block; - spill_info_t* spill; int i, arity; + spill_info_t *info; assert(is_Phi(node)); - ir_nodeset_insert(&env->mem_phis, node); + info = get_spillinfo(env, node); + info->spilled_phi = true; + ARR_APP1(spill_info_t*, env->mem_phis, info); /* create spills for the phi arguments */ block = get_nodes_block(node); - spill = get_spillinfo(env, node); - for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { + for (i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *arg = get_irn_n(node, i); ir_node *insert; - //get_spillinfo(env, arg); /* some backends have virtual noreg/unknown nodes that are not scheduled * and simply always available. */ - if(!sched_is_scheduled(arg)) { + if (!sched_is_scheduled(arg)) { ir_node *pred_block = get_Block_cfgpred_block(block, i); insert = be_get_end_of_block_insertion_point(pred_block); insert = sched_prev(insert); } else { - insert = skip_keeps_phis(arg); + insert = determine_spill_point(arg); } be_add_spill(env, arg, insert); @@ -432,27 +408,23 @@ static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) /* some backends have virtual noreg/unknown nodes that are not scheduled * and simply always available. */ - if(!sched_is_scheduled(insn)) { + if (!sched_is_scheduled(insn)) { /* override spillinfos or create a new one */ - spillinfo->spills->spill = new_NoMem(); + ir_graph *irg = get_irn_irg(to_spill); + spillinfo->spills->spill = get_irg_no_mem(irg); DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill)); return; } DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill)); spill = spillinfo->spills; - for( ; spill != NULL; spill = spill->next) { + for ( ; spill != NULL; spill = spill->next) { ir_node *after = spill->after; - ir_node *block = get_block(after); - - after = skip_keeps_phis(after); + after = determine_spill_point(after); - spill->spill = be_spill(block, to_spill); - sched_add_after(skip_Proj(after), spill->spill); + spill->spill = arch_env_new_spill(env->arch_env, to_spill, after); DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after)); -#ifdef FIRM_STATISTICS env->spill_count++; -#endif } DBG((dbg, LEVEL_1, "\n")); } @@ -488,22 +460,21 @@ static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) arity = get_irn_arity(phi); ins = ALLOCAN(ir_node*, arity); unknown = new_r_Unknown(irg, mode_M); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ins[i] = unknown; } /* override or replace spills list... */ spill = OALLOC(&env->obst, spill_t); - spill->after = skip_keeps_phis(phi); - spill->spill = new_r_Phi(block, arity, ins, mode_M); + spill->after = determine_spill_point(phi); + spill->spill = be_new_Phi(block, arity, ins, mode_M, arch_no_register_req); spill->next = NULL; + sched_add_after(block, spill->spill); spillinfo->spills = spill; -#ifdef FIRM_STATISTICS env->spilled_phi_count++; -#endif - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *arg = get_irn_n(phi, i); spill_info_t *arg_info = get_spillinfo(env, arg); @@ -524,15 +495,11 @@ static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) */ static void spill_node(spill_env_t *env, spill_info_t *spillinfo) { - ir_node *to_spill; - /* node is already spilled */ - if(spillinfo->spills != NULL && spillinfo->spills->spill != NULL) + if (spillinfo->spills != NULL && spillinfo->spills->spill != NULL) return; - to_spill = spillinfo->to_spill; - - if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) { + if (spillinfo->spilled_phi) { spill_phi(env, spillinfo); } else { spill_irn(env, spillinfo); @@ -556,24 +523,27 @@ static void spill_node(spill_env_t *env, spill_info_t *spillinfo) static int is_value_available(spill_env_t *env, const ir_node *arg, const ir_node *reloader) { - if(is_Unknown(arg) || arg == new_NoMem()) + if (is_Unknown(arg) || is_NoMem(arg)) return 1; - if(be_is_Spill(skip_Proj_const(arg))) + if (be_is_Spill(skip_Proj_const(arg))) return 1; - if(arg == get_irg_frame(env->irg)) + if (arg == get_irg_frame(env->irg)) return 1; (void)reloader; + if (get_irn_mode(arg) == mode_T) + return 0; + /* * Ignore registers are always available */ if (arch_irn_is_ignore(arg)) return 1; - return 0; + return 0; } /** @@ -598,16 +568,16 @@ static int check_remat_conditions_costs(spill_env_t *env, if (!arch_irn_is(insn, rematerializable)) return REMAT_COST_INFINITE; - if(be_is_Reload(insn)) { + if (be_is_Reload(insn)) { costs += 2; } else { costs += arch_get_op_estimated_cost(insn); } - if(parentcosts + costs >= env->reload_cost + env->spill_cost) { + if (parentcosts + costs >= env->reload_cost + env->spill_cost) { return REMAT_COST_INFINITE; } /* never rematerialize a node which modifies the flags. - * (would be better to test wether the flags are actually live at point + * (would be better to test whether the flags are actually live at point * reloader...) */ if (arch_irn_is(insn, modify_flags)) { @@ -615,15 +585,15 @@ static int check_remat_conditions_costs(spill_env_t *env, } argremats = 0; - for(i = 0, arity = get_irn_arity(insn); i < arity; ++i) { + for (i = 0, arity = get_irn_arity(insn); i < arity; ++i) { ir_node *arg = get_irn_n(insn, i); - if(is_value_available(env, arg, reloader)) + if (is_value_available(env, arg, reloader)) continue; /* we have to rematerialize the argument as well */ ++argremats; - if(argremats > 1) { + if (argremats > 1) { /* we only support rematerializing 1 argument at the moment, * as multiple arguments could increase register pressure */ return REMAT_COST_INFINITE; @@ -631,7 +601,7 @@ static int check_remat_conditions_costs(spill_env_t *env, costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs); - if(parentcosts + costs >= env->reload_cost + env->spill_cost) + if (parentcosts + costs >= env->reload_cost + env->spill_cost) return REMAT_COST_INFINITE; } @@ -641,7 +611,7 @@ static int check_remat_conditions_costs(spill_env_t *env, /** * Re-materialize a node. * - * @param senv the spill environment + * @param env the spill environment * @param spilled the node that was spilled * @param reloader a irn that requires a reload */ @@ -652,24 +622,22 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) ir_node *bl; ir_node **ins; - if(is_Block(reloader)) { + if (is_Block(reloader)) { bl = reloader; } else { bl = get_nodes_block(reloader); } ins = ALLOCAN(ir_node*, get_irn_arity(spilled)); - for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { + for (i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { ir_node *arg = get_irn_n(spilled, i); - if(is_value_available(env, arg, reloader)) { + if (is_value_available(env, arg, reloader)) { ins[i] = arg; } else { ins[i] = do_remat(env, arg, reloader); -#ifdef FIRM_STATISTICS - /* don't count the recursive call as remat */ - env->remat_count--; -#endif + /* don't count the argument rematerialization as an extra remat */ + --env->remat_count; } } @@ -677,9 +645,8 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl, get_irn_op(spilled), get_irn_mode(spilled), get_irn_arity(spilled), ins); - copy_node_attr(spilled, res); + copy_node_attr(env->irg, spilled, res); arch_env_mark_remat(env->arch_env, res); - new_backedge_info(res); DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader)); @@ -687,9 +654,7 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) /* insert in schedule */ sched_reset(res); sched_add_before(reloader, res); -#ifdef FIRM_STATISTICS - env->remat_count++; -#endif + ++env->remat_count; } return res; @@ -698,7 +663,7 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) { ir_node *block = get_nodes_block(before); - double freq = get_block_execfreq(env->exec_freq, block); + double freq = get_block_execfreq(block); (void) to_spill; return env->spill_cost * freq; @@ -707,10 +672,10 @@ double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill, const ir_node *before) { - if(be_do_remats) { + if (be_do_remats) { /* is the node rematerializable? */ unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0); - if(costs < (unsigned) env->reload_cost) + if (costs < (unsigned) env->reload_cost) return costs; } @@ -719,13 +684,13 @@ unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) { - ir_node *block = get_nodes_block(before); - double freq = get_block_execfreq(env->exec_freq, block); + ir_node *block = get_nodes_block(before); + double freq = get_block_execfreq(block); - if(be_do_remats) { + if (be_do_remats) { /* is the node rematerializable? */ int costs = check_remat_conditions_costs(env, to_spill, before, 0); - if(costs < env->reload_cost) + if (costs < env->reload_cost) return costs * freq; } @@ -745,6 +710,39 @@ double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, return be_get_reload_costs(env, to_spill, before); } +ir_node *be_new_spill(ir_node *value, ir_node *after) +{ + ir_graph *irg = get_irn_irg(value); + ir_node *frame = get_irg_frame(irg); + const arch_register_class_t *cls = arch_get_irn_reg_class(value); + const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame); + ir_node *block = get_block(after); + ir_node *spill + = be_new_Spill(cls, cls_frame, block, frame, value); + + sched_add_after(after, spill); + return spill; +} + +ir_node *be_new_reload(ir_node *value, ir_node *spill, ir_node *before) +{ + ir_graph *irg = get_irn_irg(value); + ir_node *frame = get_irg_frame(irg); + ir_node *block = get_block(before); + const arch_register_class_t *cls = arch_get_irn_reg_class(value); + const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame); + ir_mode *mode = get_irn_mode(value); + ir_node *reload; + + assert(be_is_Spill(spill) || is_Phi(spill)); + assert(get_irn_mode(spill) == mode_M); + + reload = be_new_Reload(cls, cls_frame, block, frame, spill, mode); + sched_add_before(before, reload); + + return reload; +} + /* * ___ _ ____ _ _ * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___ @@ -766,7 +764,7 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) double spill_execfreq; /* already calculated? */ - if(spillinfo->spill_costs >= 0) + if (spillinfo->spill_costs >= 0) return; assert(!arch_irn_is(insn, dont_spill)); @@ -777,12 +775,13 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi * predecessor (of a PhiM) but this test might match other things too... */ - if(!sched_is_scheduled(insn)) { + if (!sched_is_scheduled(insn)) { + ir_graph *irg = get_irn_irg(to_spill); /* override spillinfos or create a new one */ spill_t *spill = OALLOC(&env->obst, spill_t); spill->after = NULL; spill->next = NULL; - spill->spill = new_NoMem(); + spill->spill = get_irg_no_mem(irg); spillinfo->spills = spill; spillinfo->spill_costs = 0; @@ -792,25 +791,25 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) } spill_block = get_nodes_block(insn); - spill_execfreq = get_block_execfreq(env->exec_freq, spill_block); + spill_execfreq = get_block_execfreq(spill_block); - if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) { + if (spillinfo->spilled_phi) { /* TODO calculate correct costs... * (though we can't remat this node anyway so no big problem) */ spillinfo->spill_costs = env->spill_cost * spill_execfreq; return; } - if(spillinfo->spills != NULL) { + if (spillinfo->spills != NULL) { spill_t *s; double spills_execfreq; /* calculate sum of execution frequencies of individual spills */ spills_execfreq = 0; s = spillinfo->spills; - for( ; s != NULL; s = s->next) { + for ( ; s != NULL; s = s->next) { ir_node *spill_block = get_block(s->after); - double freq = get_block_execfreq(env->exec_freq, spill_block); + double freq = get_block_execfreq(spill_block); spills_execfreq += freq; } @@ -820,7 +819,7 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) spill_execfreq * env->spill_cost)); /* multi-/latespill is advantageous -> return*/ - if(spills_execfreq < spill_execfreq) { + if (spills_execfreq < spill_execfreq) { DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill)); spillinfo->spill_costs = spills_execfreq * env->spill_cost; return; @@ -829,7 +828,7 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) /* override spillinfos or create a new one */ spill = OALLOC(&env->obst, spill_t); - spill->after = skip_keeps_phis(to_spill); + spill->after = determine_spill_point(to_spill); spill->next = NULL; spill->spill = NULL; @@ -874,35 +873,32 @@ void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn) void be_insert_spills_reloads(spill_env_t *env) { - const ir_exec_freq *exec_freq = env->exec_freq; - spill_info_t *si; - ir_nodeset_iterator_t iter; - ir_node *node; + size_t n_mem_phis = ARR_LEN(env->mem_phis); + size_t i; - BE_TIMER_PUSH(t_ra_spill_apply); + be_timer_push(T_RA_SPILL_APPLY); /* create all phi-ms first, this is needed so, that phis, hanging on spilled phis work correctly */ - foreach_ir_nodeset(&env->mem_phis, node, iter) { - spill_info_t *info = get_spillinfo(env, node); + for (i = 0; i < n_mem_phis; ++i) { + spill_info_t *info = env->mem_phis[i]; spill_node(env, info); } /* process each spilled node */ - for (si = set_first(env->spills); si; si = set_next(env->spills)) { - reloader_t *rld; + foreach_set(env->spills, spill_info_t, si) { ir_node *to_spill = si->to_spill; - ir_mode *mode = get_irn_mode(to_spill); ir_node **copies = NEW_ARR_F(ir_node*, 0); double all_remat_costs = 0; /** costs when we would remat all nodes */ - int force_remat = 0; + bool force_remat = false; + reloader_t *rld; DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill)); determine_spill_costs(env, si); /* determine possibility of rematerialisations */ - if(be_do_remats) { + if (be_do_remats) { /* calculate cost savings for each indivial value when it would be rematted instead of reloaded */ for (rld = si->reloaders; rld != NULL; rld = rld->next) { @@ -912,12 +908,12 @@ void be_insert_spills_reloads(spill_env_t *env) ir_node *block; ir_node *reloader = rld->reloader; - if(rld->rematted_node != NULL) { + if (rld->rematted_node != NULL) { DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n", rld->rematted_node, reloader)); continue; } - if(rld->remat_cost_delta >= REMAT_COST_INFINITE) { + if (rld->remat_cost_delta >= REMAT_COST_INFINITE) { DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n", reloader)); all_remat_costs = REMAT_COST_INFINITE; @@ -926,7 +922,7 @@ void be_insert_spills_reloads(spill_env_t *env) remat_cost = check_remat_conditions_costs(env, to_spill, reloader, 0); - if(remat_cost >= REMAT_COST_INFINITE) { + if (remat_cost >= REMAT_COST_INFINITE) { DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n", reloader)); rld->remat_cost_delta = REMAT_COST_INFINITE; @@ -937,13 +933,13 @@ void be_insert_spills_reloads(spill_env_t *env) remat_cost_delta = remat_cost - env->reload_cost; rld->remat_cost_delta = remat_cost_delta; block = is_Block(reloader) ? reloader : get_nodes_block(reloader); - freq = get_block_execfreq(exec_freq, block); + freq = get_block_execfreq(block); all_remat_costs += remat_cost_delta * freq; DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: " "%d (rel %f)\n", reloader, remat_cost_delta, remat_cost_delta * freq)); } - if(all_remat_costs < REMAT_COST_INFINITE) { + if (all_remat_costs < REMAT_COST_INFINITE) { /* we don't need the costs for the spill if we can remat all reloaders */ all_remat_costs -= si->spill_costs; @@ -952,10 +948,10 @@ void be_insert_spills_reloads(spill_env_t *env) env->spill_cost, si->spill_costs)); } - if(all_remat_costs < 0) { + if (all_remat_costs < 0) { DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n", all_remat_costs)); - force_remat = 1; + force_remat = true; } } @@ -976,11 +972,9 @@ void be_insert_spills_reloads(spill_env_t *env) /* create a reload, use the first spill for now SSA * reconstruction for memory comes below */ assert(si->spills != NULL); - copy = be_reload(si->reload_cls, rld->reloader, mode, - si->spills->spill); -#ifdef FIRM_STATISTICS + copy = arch_env_new_reload(env->arch_env, si->to_spill, + si->spills->spill, rld->reloader); env->reload_count++; -#endif } DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n", @@ -992,9 +986,9 @@ void be_insert_spills_reloads(spill_env_t *env) * SSA form for the spilled value */ if (ARR_LEN(copies) > 0) { be_ssa_construction_env_t senv; - /* be_lv_t *lv = be_get_birg_liveness(env->birg); */ + /* be_lv_t *lv = be_get_irg_liveness(env->irg); */ - be_ssa_construction_init(&senv, env->birg); + be_ssa_construction_init(&senv, env->irg); be_ssa_construction_add_copy(&senv, to_spill); be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies)); be_ssa_construction_fix_users(&senv, to_spill); @@ -1005,7 +999,7 @@ void be_insert_spills_reloads(spill_env_t *env) be_ssa_construction_update_liveness_phis(&senv); be_liveness_update(to_spill); len = ARR_LEN(copies); - for(i = 0; i < len; ++i) { + for (i = 0; i < len; ++i) { be_liveness_update(lv, copies[i]); } #endif @@ -1018,16 +1012,16 @@ void be_insert_spills_reloads(spill_env_t *env) be_ssa_construction_env_t senv; - be_ssa_construction_init(&senv, env->birg); + be_ssa_construction_init(&senv, env->irg); spill = si->spills; - for( ; spill != NULL; spill = spill->next) { + for ( ; spill != NULL; spill = spill->next) { /* maybe we rematerialized the value and need no spill */ - if(spill->spill == NULL) + if (spill->spill == NULL) continue; be_ssa_construction_add_copy(&senv, spill->spill); spill_count++; } - if(spill_count > 1) { + if (spill_count > 1) { /* all reloads are attached to the first spill, fix them now */ be_ssa_construction_fix_users(&senv, si->spills->spill); } @@ -1046,16 +1040,15 @@ void be_insert_spills_reloads(spill_env_t *env) /* Matze: In theory be_ssa_construction should take care of the liveness... * try to disable this again in the future */ - be_liveness_invalidate(env->birg->lv); + be_invalidate_live_sets(env->irg); - be_remove_dead_nodes_from_schedule(env->birg); + be_remove_dead_nodes_from_schedule(env->irg); - BE_TIMER_POP(t_ra_spill_apply); + be_timer_pop(T_RA_SPILL_APPLY); } +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill) void be_init_spill(void) { FIRM_DBG_REGISTER(dbg, "firm.be.spill"); } - -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill);