X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespill.c;h=2746cb042f720e34510148de38fd997bf63eb7d2;hb=d7343573ce41f42a614410632b6aff56b4f30015;hp=c0b5f36fc28a9c0a24d07a0296c563c4d52b13fa;hpb=4f7329d7d8d3c676f076459a45a6d5ccf9c6fbba;p=libfirm diff --git a/ir/be/bespill.c b/ir/be/bespill.c index c0b5f36fc..2746cb042 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -24,11 +24,10 @@ * @date 29.09.2005 * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include +#include #include "pset.h" #include "irnode_t.h" @@ -54,14 +53,13 @@ #include "belive_t.h" #include "benode_t.h" #include "bechordal_t.h" -#include "bejavacoal.h" -#include "benodesets.h" #include "bespilloptions.h" #include "bestatevent.h" #include "bessaconstr.h" #include "beirg_t.h" #include "beintlive_t.h" #include "bemodule.h" +#include "be_t.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) @@ -80,7 +78,7 @@ struct reloader_t { typedef struct spill_t spill_t; struct spill_t { spill_t *next; - ir_node *before; /**< spill has to be placed before this node (or earlier) */ + ir_node *after; /**< spill has to be placed after this node (or earlier) */ ir_node *spill; }; @@ -134,7 +132,7 @@ static int cmp_spillinfo(const void *x, const void *y, size_t size) static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) { spill_info_t info, *res; - int hash = nodeset_hash(value); + int hash = hash_irn(value); info.to_spill = value; res = set_find(env->spills, &info, sizeof(info), hash); @@ -154,14 +152,14 @@ spill_env_t *be_new_spill_env(be_irg_t *birg) { const arch_env_t *arch_env = birg->main_env->arch_env; - spill_env_t *env = xmalloc(sizeof(env[0])); + spill_env_t *env = XMALLOC(spill_env_t); env->spills = new_set(cmp_spillinfo, 1024); env->irg = be_get_birg_irg(birg); env->birg = birg; env->arch_env = arch_env; ir_nodeset_init(&env->mem_phis); - env->spill_cost = arch_env->isa->spill_cost; - env->reload_cost = arch_env->isa->reload_cost; + env->spill_cost = arch_env->spill_cost; + env->reload_cost = arch_env->reload_cost; env->exec_freq = be_get_birg_exec_freq(birg); obstack_init(&env->obst); @@ -192,27 +190,31 @@ void be_delete_spill_env(spill_env_t *env) * */ -void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *before) +void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after) { spill_info_t *spill_info = get_spillinfo(env, to_spill); spill_t *spill; spill_t *s; spill_t *last; - DB((dbg, LEVEL_1, "Add spill of %+F before %+F\n", to_spill, before)); + assert(!arch_irn_is(to_spill, dont_spill)); + DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after)); + + /* Just for safety make sure that we do not insert the spill in front of a phi */ + assert(!is_Phi(sched_next(after))); /* spills that are dominated by others are not needed */ last = NULL; s = spill_info->spills; for( ; s != NULL; s = s->next) { /* no need to add this spill if it is dominated by another */ - if(value_dominates(s->before, before)) { - DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->before)); + if(value_dominates(s->after, after)) { + DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after)); return; } /* remove spills that we dominate */ - if(value_dominates(before, s->before)) { - DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->before)); + if(value_dominates(after, s->after)) { + DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after)); if(last != NULL) { last->next = s->next; } else { @@ -224,7 +226,7 @@ void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *before) } spill = obstack_alloc(&env->obst, sizeof(spill[0])); - spill->before = before; + spill->after = after; spill->next = spill_info->spills; spill->spill = NULL; @@ -261,6 +263,8 @@ void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before, spill_info_t *info; reloader_t *rel; + assert(!arch_irn_is(to_spill, dont_spill)); + info = get_spillinfo(env, to_spill); if (is_Phi(to_spill)) { @@ -303,7 +307,7 @@ ir_node *be_get_end_of_block_insertion_point(const ir_node *block) ir_node *last = sched_last(block); /* we might have keeps behind the jump... */ - while(be_is_Keep(last)) { + while (be_is_Keep(last)) { last = sched_prev(last); assert(!sched_is_end(last)); } @@ -314,6 +318,17 @@ ir_node *be_get_end_of_block_insertion_point(const ir_node *block) return last; } +static ir_node *skip_keeps_phis(ir_node *node) +{ + while(true) { + ir_node *next = sched_next(node); + if(!is_Phi(next) && !be_is_Keep(next)) + break; + node = next; + } + return node; +} + /** * Returns the point at which you can insert a node that should be executed * before block @p block when coming from pred @p pos. @@ -366,10 +381,20 @@ void be_spill_phi(spill_env_t *env, ir_node *node) block = get_nodes_block(node); spill = get_spillinfo(env, node); for(i = 0, arity = get_irn_arity(node); i < arity; ++i) { - ir_node *arg = get_irn_n(node, i); - ir_node *pred_block = get_Block_cfgpred_block(block, i); - ir_node *insert = be_get_end_of_block_insertion_point(pred_block); + ir_node *arg = get_irn_n(node, i); + ir_node *insert; //get_spillinfo(env, arg); + + /* some backends have virtual noreg/unknown nodes that are not scheduled + * and simply always available. */ + if(!sched_is_scheduled(arg)) { + ir_node *pred_block = get_Block_cfgpred_block(block, i); + insert = be_get_end_of_block_insertion_point(pred_block); + insert = sched_prev(insert); + } else { + insert = skip_keeps_phis(arg); + } + be_add_spill(env, arg, insert); } } @@ -383,15 +408,6 @@ void be_spill_phi(spill_env_t *env, ir_node *node) * |_| */ -static ir_node *skip_keeps_phis(ir_node *node) -{ - node = sched_next(node); - while(is_Phi(node) || be_is_Keep(node)) { - node = sched_next(node); - } - return node; -} - static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo); /** @@ -420,21 +436,17 @@ static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) return; } - DBG((dbg, LEVEL_1, "spilling %+F ... ", to_spill)); + DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill)); spill = spillinfo->spills; for( ; spill != NULL; spill = spill->next) { - ir_node *block = get_block(spill->before); - ir_node *before = spill->before; + ir_node *after = spill->after; + ir_node *block = get_block(after); - /* place all spills before the reloads (as we can't guarantee the - * same order as the be_add_spill and be_add_reload calls */ - while(be_is_Reload(sched_prev(before))) { - before = sched_prev(before); - } + after = skip_keeps_phis(after); - spill->spill = be_spill(env->arch_env, block, to_spill); - sched_add_before(before, spill->spill); - DB((dbg, LEVEL_1, "\t%+F before %+F,", spill->spill, before)); + spill->spill = be_spill(block, to_spill); + sched_add_after(after, spill->spill); + DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after)); #ifdef FIRM_STATISTICS env->spill_count++; #endif @@ -471,15 +483,15 @@ static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) /* build a new PhiM */ arity = get_irn_arity(phi); - ins = alloca(sizeof(ir_node*) * arity); + ins = ALLOCAN(ir_node*, arity); unknown = new_r_Unknown(irg, mode_M); for(i = 0; i < arity; ++i) { ins[i] = unknown; } - /* override replace spills... */ + /* override or replace spills list... */ spill = obstack_alloc(&env->obst, sizeof(spill[0])); - spill->before = skip_keeps_phis(phi); + spill->after = skip_keeps_phis(phi); spill->spill = new_r_Phi(irg, block, arity, ins, mode_M); spill->next = NULL; @@ -550,39 +562,19 @@ static int is_value_available(spill_env_t *env, const ir_node *arg, if(arg == get_irg_frame(env->irg)) return 1; +#if 0 /* hack for now (happens when command should be inserted at end of block) */ - if(is_Block(reloader)) { + if(is_Block(reloader)) return 0; - } +#else + (void)reloader; +#endif /* * Ignore registers are always available */ - if(arch_irn_is(env->arch_env, arg, ignore)) { - return 1; - } - - /* the following test does not work while spilling, - * because the liveness info is not adapted yet to the effects of the - * additional spills/reloads. - */ -#if 0 - /* we want to remat before the insn reloader - * thus an arguments is alive if - * - it interferes with the reloaders result - * - or it is (last-) used by reloader itself - */ - if (values_interfere(env->birg->lv, reloader, arg)) { + if (arch_irn_is_ignore(arg)) return 1; - } - - arity = get_irn_arity(reloader); - for (i = 0; i < arity; ++i) { - ir_node *rel_arg = get_irn_n(reloader, i); - if (rel_arg == arg) - return 1; - } -#endif return 0; } @@ -590,13 +582,11 @@ static int is_value_available(spill_env_t *env, const ir_node *arg, /** * Checks whether the node can principally be rematerialized */ -static int is_remat_node(spill_env_t *env, const ir_node *node) +static int is_remat_node(const ir_node *node) { - const arch_env_t *arch_env = env->arch_env; - assert(!be_is_Spill(node)); - if(arch_irn_is(arch_env, node, rematerializable)) + if (arch_irn_is(node, rematerializable)) return 1; return 0; @@ -619,18 +609,22 @@ static int check_remat_conditions_costs(spill_env_t *env, int argremats; int costs = 0; - if(!is_remat_node(env, spilled)) + if (!is_remat_node(spilled)) return REMAT_COST_INFINITE; if(be_is_Reload(spilled)) { costs += 2; } else { - costs += arch_get_op_estimated_cost(env->arch_env, spilled); + costs += arch_get_op_estimated_cost(spilled); } if(parentcosts + costs >= env->reload_cost + env->spill_cost) { return REMAT_COST_INFINITE; } - if(arch_irn_is(env->arch_env, spilled, modify_flags)) { + /* never rematerialize a node which modifies the flags. + * (would be better to test wether the flags are actually live at point + * reloader...) + */ + if (arch_irn_is(spilled, modify_flags)) { return REMAT_COST_INFINITE; } @@ -679,7 +673,7 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) bl = get_nodes_block(reloader); } - ins = alloca(get_irn_arity(spilled) * sizeof(ins[0])); + ins = ALLOCAN(ir_node*, get_irn_arity(spilled)); for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) { ir_node *arg = get_irn_n(spilled, i); @@ -699,6 +693,7 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) get_irn_op(spilled), get_irn_mode(spilled), get_irn_arity(spilled), ins); copy_node_attr(spilled, res); + arch_env_mark_remat(env->arch_env, res); new_backedge_info(res); DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader)); @@ -724,6 +719,19 @@ double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) return env->spill_cost * freq; } +unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill, + const ir_node *before) +{ + if(be_do_remats) { + /* is the node rematerializable? */ + unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0); + if(costs < (unsigned) env->reload_cost) + return costs; + } + + return env->reload_cost; +} + double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) { ir_node *block = get_nodes_block(before); @@ -775,7 +783,7 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) if(spillinfo->spill_costs >= 0) return; - assert(! arch_irn_is(env->arch_env, to_spill, dont_spill)); + assert(!arch_irn_is(to_spill, dont_spill)); assert(!be_is_Reload(to_spill)); /* some backends have virtual noreg/unknown nodes that are not scheduled @@ -786,9 +794,9 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) if(!sched_is_scheduled(to_spill)) { /* override spillinfos or create a new one */ spill_t *spill = obstack_alloc(&env->obst, sizeof(spill[0])); - spill->before = NULL; - spill->next = NULL; - spill->spill = new_NoMem(); + spill->after = NULL; + spill->next = NULL; + spill->spill = new_NoMem(); spillinfo->spills = spill; spillinfo->spill_costs = 0; @@ -811,17 +819,12 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) spill_t *s; double spills_execfreq; - /* calculate sum of executaion frequencies of individual spills */ + /* calculate sum of execution frequencies of individual spills */ spills_execfreq = 0; s = spillinfo->spills; for( ; s != NULL; s = s->next) { - ir_node *spill_block = s->before; - double freq; - - if(!is_Block(spill_block)) { - spill_block = get_nodes_block(spill_block); - } - freq = get_block_execfreq(env->exec_freq, spill_block); + ir_node *spill_block = get_block(s->after); + double freq = get_block_execfreq(env->exec_freq, spill_block); spills_execfreq += freq; } @@ -839,24 +842,59 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) } /* override spillinfos or create a new one */ - spill = obstack_alloc(&env->obst, sizeof(spill[0])); - spill->before = skip_keeps_phis(to_spill); - spill->next = NULL; - spill->spill = NULL; + spill = obstack_alloc(&env->obst, sizeof(spill[0])); + spill->after = skip_keeps_phis(to_spill); + spill->next = NULL; + spill->spill = NULL; spillinfo->spills = spill; spillinfo->spill_costs = spill_execfreq * env->spill_cost; DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill)); } +void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn) +{ + const spill_info_t *si = get_spillinfo(env, irn); + ir_node *start_block = get_irg_start_block(get_irn_irg(irn)); + int n_blocks = get_Block_dom_max_subtree_pre_num(start_block); + bitset_t *reloads = bitset_alloca(n_blocks); + reloader_t *r; + spill_t *s; + + if (si == NULL) + return; + + /* Fill the bitset with the dominance pre-order numbers + * of the blocks the reloads are located in. */ + for (r = si->reloaders; r != NULL; r = r->next) { + ir_node *bl = get_nodes_block(r->reloader); + bitset_set(reloads, get_Block_dom_tree_pre_num(bl)); + } + + /* Now, cancel out all the blocks that are dominated by each spill. + * If the bitset is not empty after that, we have reloads that are + * not dominated by any spill. */ + for (s = si->spills; s != NULL; s = s->next) { + ir_node *bl = get_nodes_block(s->after); + int start = get_Block_dom_tree_pre_num(bl); + int end = get_Block_dom_max_subtree_pre_num(bl); + + bitset_clear_range(reloads, start, end); + } + + if (!bitset_is_empty(reloads)) + be_add_spill(env, si->to_spill, si->to_spill); +} + void be_insert_spills_reloads(spill_env_t *env) { - const arch_env_t *arch_env = env->arch_env; const ir_exec_freq *exec_freq = env->exec_freq; spill_info_t *si; ir_nodeset_iterator_t iter; ir_node *node; + BE_TIMER_PUSH(t_ra_spill_apply); + /* create all phi-ms first, this is needed so, that phis, hanging on spilled phis work correctly */ foreach_ir_nodeset(&env->mem_phis, node, iter) { @@ -952,7 +990,7 @@ void be_insert_spills_reloads(spill_env_t *env) /* create a reload, use the first spill for now SSA * reconstruction for memory comes below */ assert(si->spills != NULL); - copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode, + copy = be_reload(si->reload_cls, rld->reloader, mode, si->spills->spill); #ifdef FIRM_STATISTICS env->reload_count++; @@ -1025,6 +1063,8 @@ void be_insert_spills_reloads(spill_env_t *env) be_liveness_invalidate(env->birg->lv); be_remove_dead_nodes_from_schedule(env->birg); + + BE_TIMER_POP(t_ra_spill_apply); } void be_init_spill(void)