X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespillbelady.c;h=768ece1d25f56daa14d35a8aa873f5f4e5c33e0e;hb=f2c2e45eb4e677fef5bf6a8e418b2a22441172d5;hp=137786f44ebc1fd845eec20ccb1cbb7e863238f2;hpb=e5fb67efb637315db0be7f976f8ebeaf0e44364e;p=libfirm diff --git a/ir/be/bespillbelady.c b/ir/be/bespillbelady.c index 137786f44..768ece1d2 100644 --- a/ir/be/bespillbelady.c +++ b/ir/be/bespillbelady.c @@ -24,9 +24,7 @@ * @date 20.09.2005 * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include @@ -41,8 +39,6 @@ #include "ircons_t.h" #include "irprintf.h" #include "irnodeset.h" -#include "xmalloc.h" -#include "pdeq.h" #include "beutil.h" #include "bearch_t.h" @@ -68,13 +64,9 @@ #define DBG_WORKSET 128 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) -/* factor to weight the different costs of reloading/rematerializing a node - (see bespill.h be_get_reload_costs_no_weight) */ -#define RELOAD_COST_FACTOR 10 - #define TIME_UNDEFINED 6666 -#define PLACE_SPILLS +//#define LOOK_AT_LOOPDEPTH /** * An association between a node and a point in time. @@ -91,7 +83,6 @@ typedef struct _workset_t { } workset_t; static struct obstack obst; -static const arch_env_t *arch_env; static const arch_register_class_t *cls; static const be_lv_t *lv; static be_loopana_t *loop_ana; @@ -102,9 +93,23 @@ static be_uses_t *uses; /**< env for the next-use magic */ static ir_node *instr; /**< current instruction */ static unsigned instr_nr; /**< current instruction number (relative to block start) */ -static ir_nodeset_t used; static spill_env_t *senv; /**< see bespill.h */ -static pdeq *worklist; +static ir_node **blocklist; + +static bool move_spills = true; +static bool respectloopdepth = true; +static bool improve_known_preds = true; +/* factor to weight the different costs of reloading/rematerializing a node + (see bespill.h be_get_reload_costs_no_weight) */ +static int remat_bonus = 10; + +static const lc_opt_table_entry_t options[] = { + LC_OPT_ENT_BOOL ("movespills", "try to move spills out of loops", &move_spills), + LC_OPT_ENT_BOOL ("respectloopdepth", "exprimental (outermost loop cutting)", &respectloopdepth), + LC_OPT_ENT_BOOL ("improveknownpreds", "experimental (known preds cutting)", &improve_known_preds), + LC_OPT_ENT_INT ("rematbonus", "give bonus to rematerialisable nodes", &remat_bonus), + LC_OPT_LAST +}; static int loc_compare(const void *a, const void *b) { @@ -176,7 +181,7 @@ static void workset_insert(workset_t *workset, ir_node *val, bool spilled) loc_t *loc; int i; /* check for current regclass */ - assert(arch_irn_consider_in_reg_alloc(arch_env, cls, val)); + assert(arch_irn_consider_in_reg_alloc(cls, val)); /* check if val is already contained */ for (i = 0; i < workset->len; ++i) { @@ -209,7 +214,7 @@ static void workset_clear(workset_t *workset) /** * Removes the value @p val from the workset if present. */ -static INLINE void workset_remove(workset_t *workset, ir_node *val) +static inline void workset_remove(workset_t *workset, ir_node *val) { int i; for(i = 0; i < workset->len; ++i) { @@ -220,7 +225,7 @@ static INLINE void workset_remove(workset_t *workset, ir_node *val) } } -static INLINE const loc_t *workset_contains(const workset_t *ws, +static inline const loc_t *workset_contains(const workset_t *ws, const ir_node *val) { int i; @@ -271,27 +276,30 @@ static void *new_block_info(void) /** * @return The distance to the next use or 0 if irn has dont_spill flag set */ -static INLINE unsigned get_distance(ir_node *from, unsigned from_step, +static inline unsigned get_distance(ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses) { be_next_use_t use; - int flags = arch_irn_get_flags(arch_env, def); unsigned costs; unsigned time; - assert(! (flags & arch_irn_flags_ignore)); + assert(!arch_irn_is_ignore(def)); - use = be_get_next_use(uses, from, from_step, def, skip_from_uses); - if (USES_IS_INFINITE(use.time)) + use = be_get_next_use(uses, from, from_step, def, skip_from_uses); + time = use.time; + if (USES_IS_INFINITE(time)) return USES_INFINITY; /* We have to keep nonspillable nodes in the workingset */ - if (flags & arch_irn_flags_dont_spill) + if (arch_irn_get_flags(def) & arch_irn_flags_dont_spill) return 0; - costs = be_get_reload_costs_no_weight(senv, def, use.before); - assert(costs * RELOAD_COST_FACTOR < 1000); - time = use.time + 1000 - (costs * RELOAD_COST_FACTOR); + /* give some bonus to rematerialisable nodes */ + if (remat_bonus > 0) { + costs = be_get_reload_costs_no_weight(senv, def, use.before); + assert(costs * remat_bonus < 1000); + time += 1000 - (costs * remat_bonus); + } return time; } @@ -307,8 +315,8 @@ static INLINE unsigned get_distance(ir_node *from, unsigned from_step, */ static void displace(workset_t *new_vals, int is_usage) { - ir_node **to_insert = alloca(n_regs * sizeof(to_insert[0])); - bool *spilled = alloca(n_regs * sizeof(spilled[0])); + ir_node **to_insert = ALLOCAN(ir_node*, n_regs); + bool *spilled = ALLOCAN(bool, n_regs); ir_node *val; int i; int len; @@ -321,10 +329,6 @@ static void displace(workset_t *new_vals, int is_usage) workset_foreach(new_vals, val, iter) { bool reloaded = false; - /* mark value as used */ - if (is_usage) - ir_nodeset_insert(&used, val); - if (! workset_contains(ws, val)) { DB((dbg, DBG_DECIDE, " insert %+F\n", val)); if (is_usage) { @@ -351,10 +355,13 @@ static void displace(workset_t *new_vals, int is_usage) /* Only make more free room if we do not have enough */ if (spills_needed > 0) { -#ifndef PLACE_SPILLS - ir_node *curr_bb = get_nodes_block(instr); - workset_t *ws_start = get_block_info(curr_bb)->start_workset; -#endif + ir_node *curr_bb = NULL; + workset_t *ws_start = NULL; + + if (move_spills) { + curr_bb = get_nodes_block(instr); + ws_start = get_block_info(curr_bb)->start_workset; + } DB((dbg, DBG_DECIDE, " disposing %d values\n", spills_needed)); @@ -374,22 +381,15 @@ static void displace(workset_t *new_vals, int is_usage) DB((dbg, DBG_DECIDE, " disposing node %+F (%u)\n", val, workset_get_time(ws, i))); -#ifdef PLACE_SPILLS - if (!USES_IS_INFINITE(ws->vals[i].time) && !ws->vals[i].spilled) { - ir_node *after_pos = sched_prev(instr); - be_add_spill(senv, val, after_pos); - } -#endif - -#ifndef PLACE_SPILLS - /* Logic for not needed live-ins: If a value is disposed - * before its first use, remove it from start workset - * We don't do this for phis though */ - if (!is_Phi(val) && ! ir_nodeset_contains(&used, val)) { - workset_remove(ws_start, val); - DB((dbg, DBG_DECIDE, " (and removing %+F from start workset)\n", val)); + if (move_spills) { + if (!USES_IS_INFINITE(ws->vals[i].time) + && !ws->vals[i].spilled) { + ir_node *after_pos = sched_prev(instr); + DB((dbg, DBG_DECIDE, "Spill %+F after node %+F\n", val, + after_pos)); + be_add_spill(senv, val, after_pos); + } } -#endif } /* kill the last 'demand' entries in the array */ @@ -479,13 +479,13 @@ static loc_t to_take_or_not_to_take(ir_node* first, ir_node *node, loc.node = node; loc.spilled = false; - if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node)) { + if (!arch_irn_consider_in_reg_alloc(cls, node)) { loc.time = USES_INFINITY; return loc; } /* We have to keep nonspillable nodes in the workingset */ - if (arch_irn_get_flags(arch_env, node) & arch_irn_flags_dont_spill) { + if (arch_irn_get_flags(node) & arch_irn_flags_dont_spill) { loc.time = 0; DB((dbg, DBG_START, " %+F taken (dontspill node)\n", node, loc.time)); return loc; @@ -497,26 +497,25 @@ static loc_t to_take_or_not_to_take(ir_node* first, ir_node *node, assert(is_Phi(node)); loc.time = USES_INFINITY; DB((dbg, DBG_START, " %+F not taken (dead)\n", node)); - if (is_Phi(node)) { - be_spill_phi(senv, node); - } return loc; } loc.time = next_use.time; - if (available == AVAILABLE_EVERYWHERE) { - DB((dbg, DBG_START, " %+F taken (%u, live in all preds)\n", node, - loc.time)); - return loc; - } else if(available == AVAILABLE_NOWHERE) { - DB((dbg, DBG_START, " %+F not taken (%u, live in no pred)\n", node, - loc.time)); - loc.time = USES_INFINITY; - return loc; + if (improve_known_preds) { + if (available == AVAILABLE_EVERYWHERE) { + DB((dbg, DBG_START, " %+F taken (%u, live in all preds)\n", + node, loc.time)); + return loc; + } else if(available == AVAILABLE_NOWHERE) { + DB((dbg, DBG_START, " %+F not taken (%u, live in no pred)\n", + node, loc.time)); + loc.time = USES_INFINITY; + return loc; + } } - if (next_use.outermost_loop >= get_loop_depth(loop)) { + if (!respectloopdepth || next_use.outermost_loop >= get_loop_depth(loop)) { DB((dbg, DBG_START, " %+F taken (%u, loop %d)\n", node, loc.time, next_use.outermost_loop)); } else { @@ -524,6 +523,7 @@ static loc_t to_take_or_not_to_take(ir_node* first, ir_node *node, DB((dbg, DBG_START, " %+F delayed (outerdepth %d < loopdepth %d)\n", node, next_use.outermost_loop, get_loop_depth(loop))); } + return loc; } @@ -550,7 +550,7 @@ static void decide_start_workset(const ir_node *block) /* check predecessors */ arity = get_irn_arity(block); - pred_worksets = alloca(sizeof(pred_worksets[0]) * arity); + pred_worksets = ALLOCAN(workset_t*, arity); all_preds_known = true; for(i = 0; i < arity; ++i) { ir_node *pred_block = get_Block_cfgpred_block(block, i); @@ -577,6 +577,8 @@ static void decide_start_workset(const ir_node *block) if (! is_Phi(node)) break; + if (!arch_irn_consider_in_reg_alloc(cls, node)) + continue; if (all_preds_known) { available = available_in_all_preds(pred_worksets, arity, node, true); @@ -591,6 +593,8 @@ static void decide_start_workset(const ir_node *block) ARR_APP1(loc_t, delayed, loc); else ARR_APP1(loc_t, starters, loc); + } else { + be_spill_phi(senv, node); } } @@ -624,34 +628,39 @@ static void decide_start_workset(const ir_node *block) /* so far we only put nodes into the starters list that are used inside * the loop. If register pressure in the loop is low then we can take some * values and let them live through the loop */ + DB((dbg, DBG_START, "Loop pressure %d, taking %d delayed vals\n", + pressure, free_slots)); if (free_slots > 0) { qsort(delayed, ARR_LEN(delayed), sizeof(delayed[0]), loc_compare); - for (i = 0; i < ARR_LEN(delayed) && i < free_slots; ++i) { + for (i = 0; i < ARR_LEN(delayed) && free_slots > 0; ++i) { int p, arity; loc_t *loc = & delayed[i]; - /* don't use values which are dead in a known predecessors - * to not induce unnecessary reloads */ - arity = get_irn_arity(block); - for (p = 0; p < arity; ++p) { - ir_node *pred_block = get_Block_cfgpred_block(block, p); - block_info_t *pred_info = get_block_info(pred_block); - - if (pred_info == NULL) - continue; - - if (!workset_contains(pred_info->end_workset, loc->node)) { - DB((dbg, DBG_START, - " delayed %+F not live at pred %+F\n", loc->node, - pred_block)); - goto skip_delayed; + if (!is_Phi(loc->node)) { + /* don't use values which are dead in a known predecessors + * to not induce unnecessary reloads */ + arity = get_irn_arity(block); + for (p = 0; p < arity; ++p) { + ir_node *pred_block = get_Block_cfgpred_block(block, p); + block_info_t *pred_info = get_block_info(pred_block); + + if (pred_info == NULL) + continue; + + if (!workset_contains(pred_info->end_workset, loc->node)) { + DB((dbg, DBG_START, + " delayed %+F not live at pred %+F\n", loc->node, + pred_block)); + goto skip_delayed; + } } } DB((dbg, DBG_START, " delayed %+F taken\n", loc->node)); ARR_APP1(loc_t, starters, *loc); loc->node = NULL; + --free_slots; skip_delayed: ; } @@ -735,128 +744,36 @@ static void decide_start_workset(const ir_node *block) } } -#if 0 -static void decide_start_workset2(const ir_node *block) -{ - int arity; - workset_t **pred_worksets; - int p; - int i; - int len; - - /* check if all predecessors are known */ - arity = get_irn_arity(block); - pred_worksets = alloca(sizeof(pred_worksets[0]) * arity); - for (i = 0; i < arity; ++i) { - ir_node *pred_block = get_Block_cfgpred_block(block, i); - block_info_t *pred_info = get_block_info(pred_block); - - if (pred_info == NULL) { - /* not all predecessors known, use decide_start_workset */ - decide_start_workset(block); - return; - } - - pred_worksets[i] = pred_info->end_workset; - } - - /* we construct a new workset */ - workset_clear(ws); - - /* take values live in all pred blocks */ - len = workset_get_length(pred_worksets[0]); - for (p = 0; p < len; ++p) { - const loc_t *l = &pred_worksets[0]->vals[p]; - ir_node *value; - bool spilled = false; - - if (USES_IS_INFINITE(l->time)) - continue; - - /* value available in all preds? */ - value = l->node; - for (i = 1; i < arity; ++i) { - bool found = false; - workset_t *p_workset = pred_worksets[i]; - int p_len = workset_get_length(p_workset); - int p_i; - - for (p_i = 0; p_i < p_len; ++p_i) { - const loc_t *p_l = &p_workset->vals[p_i]; - if (p_l->node != value) - continue; - - found = true; - if (p_l->spilled) - spilled = true; - break; - } - - if (!found) - break; - } - - /* it was available in all preds */ - if (i >= arity) { - workset_insert(ws, value, spilled); - } - } - - /* Copy the best ones from starters to start workset */ - ws_count = MIN(ARR_LEN(starters), n_regs); - workset_bulk_fill(ws, ws_count, starters); -} -#endif - /** * For the given block @p block, decide for each values * whether it is used from a register or is reloaded * before the use. */ -static void belady(ir_node *block) +static void process_block(ir_node *block) { workset_t *new_vals; ir_node *irn; int iter; block_info_t *block_info; - int i, arity; - int has_backedges = 0; - //int first = 0; - const ir_edge_t *edge; + int arity; /* no need to process a block twice */ - if (get_block_info(block) != NULL) { - return; - } + assert(get_block_info(block) == NULL); - /* check if all predecessor blocks are processed yet (though for backedges - * we have to make an exception as we can't process them first) */ + /* construct start workset */ arity = get_Block_n_cfgpreds(block); - for(i = 0; i < arity; ++i) { - ir_node *pred_block = get_Block_cfgpred_block(block, i); - block_info_t *pred_info = get_block_info(pred_block); - - if (pred_info == NULL) { - /* process predecessor first (it will be in the queue already) */ - if (!is_backedge(block, i)) { - return; - } - has_backedges = 1; - } - } - (void) has_backedges; if (arity == 0) { + /* no predecessor -> empty set */ workset_clear(ws); } else if (arity == 1) { + /* one predecessor, copy it's end workset */ ir_node *pred_block = get_Block_cfgpred_block(block, 0); block_info_t *pred_info = get_block_info(pred_block); assert(pred_info != NULL); workset_copy(ws, pred_info->end_workset); } else { - /* we need 2 heuristics here, for the case when all predecessor blocks - * are known and when some are backedges (and therefore can't be known - * yet) */ + /* multiple predecessors, do more advanced magic :) */ decide_start_workset(block); } @@ -876,7 +793,6 @@ static void belady(ir_node *block) /* process the block from start to end */ DB((dbg, DBG_WSETS, "Processing...\n")); - ir_nodeset_init(&used); instr_nr = 0; /* TODO: this leaks (into the obstack)... */ new_vals = new_workset(); @@ -898,7 +814,7 @@ static void belady(ir_node *block) workset_clear(new_vals); for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) { ir_node *in = get_irn_n(irn, i); - if (!arch_irn_consider_in_reg_alloc(arch_env, cls, in)) + if (!arch_irn_consider_in_reg_alloc(cls, in)) continue; /* (note that "spilled" is irrelevant here) */ @@ -913,12 +829,12 @@ static void belady(ir_node *block) foreach_out_edge(irn, edge) { ir_node *proj = get_edge_src_irn(edge); - if (!arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) + if (!arch_irn_consider_in_reg_alloc(cls, proj)) continue; workset_insert(new_vals, proj, false); } } else { - if (!arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) + if (!arch_irn_consider_in_reg_alloc(cls, irn)) continue; workset_insert(new_vals, irn, false); } @@ -926,7 +842,6 @@ static void belady(ir_node *block) instr_nr++; } - ir_nodeset_destroy(&used); /* Remember end-workset for this block */ block_info->end_workset = workset_clone(ws); @@ -934,12 +849,6 @@ static void belady(ir_node *block) workset_foreach(ws, irn, iter) DB((dbg, DBG_WSETS, " %+F (%u)\n", irn, workset_get_time(ws, iter))); - - /* add successor blocks into worklist */ - foreach_block_succ(block, edge) { - ir_node *succ = get_edge_src_irn(edge); - pdeq_putr(worklist, succ); - } } /** @@ -958,10 +867,14 @@ static void fix_block_borders(ir_node *block, void *data) DB((dbg, DBG_FIX, "\n")); DB((dbg, DBG_FIX, "Fixing %+F\n", block)); + arity = get_irn_arity(block); + /* can happen for endless loops */ + if (arity == 0) + return; + start_workset = get_block_info(block)->start_workset; /* process all pred blocks */ - arity = get_irn_arity(block); for (i = 0; i < arity; ++i) { ir_node *pred = get_Block_cfgpred_block(block, i); workset_t *pred_end_workset = get_block_info(pred)->end_workset; @@ -988,8 +901,7 @@ static void fix_block_borders(ir_node *block, void *data) if (found) continue; -#ifdef PLACE_SPILLS - if (be_is_live_in(lv, block, node) + if (move_spills && be_is_live_in(lv, block, node) && !pred_end_workset->vals[iter].spilled) { ir_node *insert_point; if (arity > 1) { @@ -1002,7 +914,6 @@ static void fix_block_borders(ir_node *block, void *data) insert_point)); be_add_spill(senv, node, insert_point); } -#endif } /* reload missing values in predecessors, add missing spills */ @@ -1017,16 +928,15 @@ static void fix_block_borders(ir_node *block, void *data) assert(!l->spilled); /* we might have unknowns as argument for the phi */ - if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node)) + if (!arch_irn_consider_in_reg_alloc(cls, node)) continue; } /* check if node is in a register at end of pred */ pred_loc = workset_contains(pred_end_workset, node); if (pred_loc != NULL) { -#ifdef PLACE_SPILLS /* we might have to spill value on this path */ - if (!pred_loc->spilled && l->spilled) { + if (move_spills && !pred_loc->spilled && l->spilled) { ir_node *insert_point = be_get_end_of_block_insertion_point(pred); insert_point = sched_prev(insert_point); @@ -1034,7 +944,6 @@ static void fix_block_borders(ir_node *block, void *data) insert_point)); be_add_spill(senv, node, insert_point); } -#endif } else { /* node is not in register at the end of pred -> reload it */ DB((dbg, DBG_FIX, " reload %+F\n", node)); @@ -1045,45 +954,61 @@ static void fix_block_borders(ir_node *block, void *data) } } +static void add_block(ir_node *block, void *data) +{ + (void) data; + ARR_APP1(ir_node*, blocklist, block); +} + static void be_spill_belady(be_irg_t *birg, const arch_register_class_t *rcls) { + int i; ir_graph *irg = be_get_birg_irg(birg); be_liveness_assure_sets(be_assure_liveness(birg)); + stat_ev_tim_push(); /* construct control flow loop tree */ if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) { construct_cf_backedges(irg); } + stat_ev_tim_pop("belady_time_backedges"); + stat_ev_tim_push(); be_clear_links(irg); + stat_ev_tim_pop("belady_time_clear_links"); + + ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); /* init belady env */ + stat_ev_tim_push(); obstack_init(&obst); - arch_env = birg->main_env->arch_env; - cls = rcls; - lv = be_get_birg_liveness(birg); - n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL); - ws = new_workset(); - uses = be_begin_uses(irg, lv); - loop_ana = be_new_loop_pressure(birg); - senv = be_new_spill_env(birg); - worklist = new_pdeq(); - - pdeq_putr(worklist, get_irg_start_block(irg)); - - while(!pdeq_empty(worklist)) { - ir_node *block = pdeq_getl(worklist); - belady(block); + cls = rcls; + lv = be_get_birg_liveness(birg); + n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL); + ws = new_workset(); + uses = be_begin_uses(irg, lv); + loop_ana = be_new_loop_pressure(birg, cls); + senv = be_new_spill_env(birg); + blocklist = NEW_ARR_F(ir_node*, 0); + irg_block_edges_walk(get_irg_start_block(irg), NULL, add_block, NULL); + stat_ev_tim_pop("belady_time_init"); + + stat_ev_tim_push(); + /* walk blocks in reverse postorder */ + for (i = ARR_LEN(blocklist) - 1; i >= 0; --i) { + process_block(blocklist[i]); } - /* end block might not be reachable in endless loops */ - belady(get_irg_end_block(irg)); - - del_pdeq(worklist); + DEL_ARR_F(blocklist); + stat_ev_tim_pop("belady_time_belady"); + stat_ev_tim_push(); /* belady was block-local, fix the global flow by adding reloads on the * edges */ irg_block_walk_graph(irg, fix_block_borders, NULL, NULL); + stat_ev_tim_pop("belady_time_fix_borders"); + + ir_free_resources(irg, IR_RESOURCE_IRN_LINK); /* Insert spill/reload nodes into the graph and fix usages */ be_insert_spills_reloads(senv); @@ -1100,6 +1025,9 @@ void be_init_spillbelady(void) static be_spiller_t belady_spiller = { be_spill_belady }; + lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); + lc_opt_entry_t *belady_group = lc_opt_get_grp(be_grp, "belady"); + lc_opt_add_table(belady_group, options); be_register_spiller("belady", &belady_spiller); FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");