2 * Author: Daniel Grund, Matthias Braun
4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irprintf_t.h"
29 #include "iredges_t.h"
35 #include "bespillbelady.h"
37 #include "besched_t.h"
41 #include "bechordal_t.h"
42 #include "bespilloptions.h"
51 #define DBG_WORKSET 128
52 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
55 * An association between a node and a point in time.
57 typedef struct _loc_t {
58 ir_node *irn; /**< A node. */
59 unsigned time; /**< A use time (see beuses.h). */
62 typedef struct _workset_t {
63 int len; /**< current length */
64 loc_t vals[0]; /**< inlined array of the values/distances in this working set */
67 typedef struct _belady_env_t {
69 const arch_env_t *arch;
70 const arch_register_class_t *cls;
72 int n_regs; /** number of regs in this reg-class */
74 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
75 be_uses_t *uses; /**< env for the next-use magic */
76 ir_node *instr; /**< current instruction */
77 unsigned instr_nr; /**< current instruction number (relative to block start) */
80 spill_env_t *senv; /**< see bespill.h */
83 static int loc_compare(const void *a, const void *b)
87 return p->time - q->time;
90 static INLINE void workset_print(const workset_t *w)
94 for(i = 0; i < w->len; ++i) {
95 ir_fprintf(stderr, "%+F %d\n", w->vals[i].irn, w->vals[i].time);
100 * Alloc a new workset on obstack @p ob with maximum size @p max
102 static INLINE workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
104 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
105 res = obstack_alloc(ob, size);
106 memset(res, 0, size);
111 * Alloc a new instance on obstack and make it equal to @param ws
113 static INLINE workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
115 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
116 res = obstack_alloc(ob, size);
117 memcpy(res, ws, size);
122 * Do NOT alloc anything. Make @param tgt equal to @param src.
123 * returns @param tgt for convenience
125 static INLINE workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
126 size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
127 memcpy(tgt, src, size);
132 * Overwrites the current content array of @param ws with the
133 * @param count locations given at memory @param locs.
134 * Set the length of @param ws to count.
136 static INLINE void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
137 workset->len = count;
138 memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
142 * Inserts the value @p val into the workset, iff it is not
143 * already contained. The workset must not be full.
145 static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
147 /* check for current regclass */
148 if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
149 DBG((dbg, DBG_WORKSET, "Skipped %+F\n", val));
153 /* check if val is already contained */
154 for(i=0; i<ws->len; ++i)
155 if (ws->vals[i].irn == val)
159 assert(ws->len < env->n_regs && "Workset already full!");
160 ws->vals[ws->len++].irn = val;
164 * Removes all entries from this workset
166 static INLINE void workset_clear(workset_t *ws) {
171 * Removes the value @p val from the workset if present.
173 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
175 for(i=0; i<ws->len; ++i) {
176 if (ws->vals[i].irn == val) {
177 ws->vals[i] = ws->vals[--ws->len];
183 static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
185 for(i=0; i<ws->len; ++i) {
186 if (ws->vals[i].irn == val)
194 * Iterates over all values in the working set.
195 * @p ws The workset to iterate
196 * @p v A variable to put the current value in
197 * @p i An integer for internal use
199 #define workset_foreach(ws, v, i) for(i=0; \
200 v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
203 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
204 #define workset_get_time(ws, i) (ws)->vals[i].time
205 #define workset_set_length(ws, length) (ws)->len = length
206 #define workset_get_length(ws) ((ws)->len)
207 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
208 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
210 typedef struct _block_info_t {
211 workset_t *ws_start, *ws_end;
216 static INLINE void *new_block_info(struct obstack *ob) {
217 block_info_t *res = obstack_alloc(ob, sizeof(*res));
218 res->ws_start = NULL;
225 #define get_block_info(block) ((block_info_t *)get_irn_link(block))
226 #define set_block_info(block, info) set_irn_link(block, info)
229 * @return The distance to the next use or 0 if irn has dont_spill flag set
231 static INLINE unsigned get_distance(belady_env_t *env, ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
234 int flags = arch_irn_get_flags(env->arch, def);
236 assert(! (flags & arch_irn_flags_ignore));
238 use = be_get_next_use(env->uses, from, from_step, def, skip_from_uses);
239 if(USES_IS_INFINITE(use.time))
240 return USES_INFINITY;
242 /* We have to keep nonspillable nodes in the workingset */
243 if(flags & arch_irn_flags_dont_spill)
250 * Performs the actions necessary to grant the request that:
251 * - new_vals can be held in registers
252 * - as few as possible other values are disposed
253 * - the worst values get disposed
255 * @p is_usage indicates that the values in new_vals are used (not defined)
256 * In this case reloads must be performed
258 static void displace(belady_env_t *env, workset_t *new_vals, int is_usage) {
260 int i, len, max_allowed, demand, iter;
262 workset_t *ws = env->ws;
263 ir_node **to_insert = alloca(env->n_regs * sizeof(*to_insert));
266 1. Identify the number of needed slots and the values to reload
269 workset_foreach(new_vals, val, iter) {
270 /* mark value as used */
272 pset_insert_ptr(env->used, val);
274 if (! workset_contains(ws, val)) {
275 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
276 to_insert[demand++] = val;
278 DBG((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, env->instr));
279 be_add_reload(env->senv, val, env->instr, env->cls, 1);
283 assert(is_usage || "Defined value already in workset?!?");
284 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
287 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
290 2. Make room for at least 'demand' slots
292 len = workset_get_length(ws);
293 max_allowed = env->n_regs - demand;
295 DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
297 /* Only make more free room if we do not have enough */
298 if (len > max_allowed) {
299 /* get current next-use distance */
300 for (i = 0; i < ws->len; ++i) {
301 unsigned dist = get_distance(env, env->instr, env->instr_nr, workset_get_val(ws, i), !is_usage);
302 workset_set_time(ws, i, dist);
305 /* sort entries by increasing nextuse-distance*/
309 Logic for not needed live-ins: If a value is disposed
310 before its first usage, remove it from start workset
311 We don't do this for phis though
313 for (i = max_allowed; i < ws->len; ++i) {
314 ir_node *irn = ws->vals[i].irn;
316 DBG((dbg, DBG_DECIDE, " disposing %+F (%u)\n", irn, workset_get_time(ws, i)));
321 if (! pset_find_ptr(env->used, irn)) {
322 ir_node *curr_bb = get_nodes_block(env->instr);
323 workset_t *ws_start = get_block_info(curr_bb)->ws_start;
324 workset_remove(ws_start, irn);
326 DBG((dbg, DBG_DECIDE, " (and removing %+F from start workset)\n", irn));
330 /* kill the last 'demand' entries in the array */
331 workset_set_length(ws, max_allowed);
335 3. Insert the new values into the workset
337 for (i = 0; i < demand; ++i)
338 workset_insert(env, env->ws, to_insert[i]);
341 static void belady(ir_node *block, void *env);
343 static loc_t to_take_or_not_to_take(belady_env_t *env, ir_node* first, ir_node *node, ir_node *block, ir_loop *loop) {
344 be_next_use_t next_use;
346 loc.time = USES_INFINITY;
348 if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, node)) {
349 loc.time = USES_INFINITY;
355 /* We have to keep nonspillable nodes in the workingset */
356 if(arch_irn_get_flags(env->arch, node) & arch_irn_flags_dont_spill) {
358 DBG((dbg, DBG_START, " %+F taken (dontspill node)\n", node, loc.time));
362 next_use = be_get_next_use(env->uses, first, 0, node, 0);
363 if(USES_IS_INFINITE(next_use.time)) {
364 // the nodes marked as live in shouldn't be dead, so it must be a phi
365 loc.time = USES_INFINITY;
366 DBG((dbg, DBG_START, " %+F not taken (dead)\n", node));
367 assert(is_Phi(node));
371 loc.time = next_use.time;
373 if(next_use.outermost_loop >= get_loop_depth(loop)) {
374 DBG((dbg, DBG_START, " %+F taken (%u, loop %d)\n", node, loc.time, next_use.outermost_loop));
376 // ARR_APP1(loc_t, starters, loc);
378 loc.time = USES_INFINITY;
379 DBG((dbg, DBG_START, " %+F not taken (outerloopdepth %d < loopdetph %d)\n", node, next_use.outermost_loop, get_loop_depth(loop)));
385 * Computes set of live-ins for each block with multiple predecessors
386 * and notifies spill algorithm which phis need to be spilled
388 static void compute_live_ins(ir_node *block, void *data) {
389 belady_env_t *env = data;
390 block_info_t *block_info;
391 ir_node *first, *irn;
392 loc_t loc, *starters;
393 int i, len, ws_count;
394 ir_loop *loop = get_irn_loop(block);
395 const be_lv_t *lv = env->lv;
397 if(get_Block_n_cfgpreds(block) == 1 && get_irg_start_block(get_irn_irg(block)) != block)
400 block_info = new_block_info(&env->ob);
401 set_block_info(block, block_info);
403 /* Collect all values living at start of block */
404 starters = NEW_ARR_F(loc_t, 0);
406 DBG((dbg, DBG_START, "Living at start of %+F:\n", block));
407 first = sched_first(block);
409 sched_foreach(block, irn) {
413 loc = to_take_or_not_to_take(env, first, irn, block, loop);
415 if(!USES_IS_INFINITE(loc.time)) {
416 ARR_APP1(loc_t, starters, loc);
418 be_spill_phi(env->senv, irn);
422 be_lv_foreach(lv, block, be_lv_state_in, i) {
423 ir_node *node = be_lv_get_irn(lv, block, i);
425 loc = to_take_or_not_to_take(env, first, node, block, loop);
427 if(!USES_IS_INFINITE(loc.time)) {
428 ARR_APP1(loc_t, starters, loc);
432 // Sort start values by first use
433 qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
435 /* Copy the best ones from starters to start workset */
436 ws_count = MIN(ARR_LEN(starters), env->n_regs);
437 block_info->ws_start = new_workset(env, &env->ob);
438 workset_bulk_fill(block_info->ws_start, ws_count, starters);
440 /* The phis of this block which are not in the start set have to be spilled later. */
441 len = ARR_LEN(starters);
442 for (i = ws_count; i < len; ++i) {
443 irn = starters[i].irn;
444 if (!is_Phi(irn) || get_nodes_block(irn) != block)
447 be_spill_phi(env->senv, irn);
454 * Collects all values live-in at block @p block and all phi results in this block.
455 * Then it adds the best values (at most n_regs) to the blocks start_workset.
456 * The phis among the remaining values get spilled: Introduce psudo-copies of
457 * their args to break interference and make it possible to spill them to the
460 static block_info_t *compute_block_start_info(belady_env_t *env, ir_node *block) {
462 block_info_t *res, *pred_info;
464 /* Have we seen this block before? */
465 res = get_block_info(block);
469 /* Create the block info for this block. */
470 res = new_block_info(&env->ob);
471 set_block_info(block, res);
473 /* Use endset of predecessor block as startset */
474 assert(get_Block_n_cfgpreds(block) == 1 && block != get_irg_start_block(get_irn_irg(block)));
475 pred_block = get_Block_cfgpred_block(block, 0);
476 pred_info = get_block_info(pred_block);
478 /* if pred block has not been processed yet, do it now */
479 if (pred_info == NULL || pred_info->processed == 0) {
480 belady(pred_block, env);
481 pred_info = get_block_info(pred_block);
484 /* now we have an end_set of pred */
485 assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
486 res->ws_start = workset_clone(env, &env->ob, pred_info->ws_end);
493 * For the given block @p block, decide for each values
494 * whether it is used from a register or is reloaded
497 static void belady(ir_node *block, void *data) {
498 belady_env_t *env = data;
502 block_info_t *block_info;
504 /* make sure we have blockinfo (with startset) */
505 block_info = get_block_info(block);
506 if (block_info == NULL)
507 block_info = compute_block_start_info(env, block);
509 /* Don't do a block twice */
510 if(block_info->processed)
513 /* get the starting workset for this block */
514 DBG((dbg, DBG_DECIDE, "\n"));
515 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", block));
517 workset_copy(env, env->ws, block_info->ws_start);
518 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
519 workset_foreach(env->ws, irn, iter)
520 DBG((dbg, DBG_WSETS, " %+F (%u)\n", irn, workset_get_time(env->ws, iter)));
522 /* process the block from start to end */
523 DBG((dbg, DBG_WSETS, "Processing...\n"));
524 env->used = pset_new_ptr_default();
526 new_vals = new_workset(env, &env->ob);
527 sched_foreach(block, irn) {
529 assert(workset_get_length(env->ws) <= env->n_regs && "Too much values in workset!");
531 /* projs are handled with the tuple value.
532 * Phis are no real instr (see insert_starters())
533 * instr_nr does not increase */
534 if (is_Proj(irn) || is_Phi(irn)) {
535 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
538 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
540 /* set instruction in the workset */
543 /* allocate all values _used_ by this instruction */
544 workset_clear(new_vals);
545 for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
546 workset_insert(env, new_vals, get_irn_n(irn, i));
548 displace(env, new_vals, 1);
550 /* allocate all values _defined_ by this instruction */
551 workset_clear(new_vals);
552 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
554 for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
555 workset_insert(env, new_vals, proj);
557 workset_insert(env, new_vals, irn);
559 displace(env, new_vals, 0);
565 /* Remember end-workset for this block */
566 block_info->ws_end = workset_clone(env, &env->ob, env->ws);
567 block_info->processed = 1;
568 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", block));
569 workset_foreach(block_info->ws_end, irn, iter)
570 DBG((dbg, DBG_WSETS, " %+F (%u)\n", irn, workset_get_time(block_info->ws_end, iter)));
574 * 'decide' is block-local and makes assumptions
575 * about the set of live-ins. Thus we must adapt the
576 * live-outs to the live-ins at each block-border.
578 static void fix_block_borders(ir_node *block, void *data) {
579 belady_env_t *env = data;
581 ir_graph *irg = get_irn_irg(block);
582 ir_node *startblock = get_irg_start_block(irg);
583 int i, max, iter, iter2;
585 if(block == startblock)
588 DBG((dbg, DBG_FIX, "\n"));
589 DBG((dbg, DBG_FIX, "Fixing %+F\n", block));
591 wsb = get_block_info(block)->ws_start;
593 /* process all pred blocks */
594 for (i=0, max=get_irn_arity(block); i<max; ++i) {
595 ir_node *irnb, *irnp, *pred = get_Block_cfgpred_block(block, i);
596 workset_t *wsp = get_block_info(pred)->ws_end;
598 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
600 workset_foreach(wsb, irnb, iter) {
601 /* if irnb is a phi of the current block we reload
602 * the corresponding argument, else irnb itself */
603 if(is_Phi(irnb) && block == get_nodes_block(irnb)) {
604 irnb = get_irn_n(irnb, i);
606 // we might have unknowns as argument for the phi
607 if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, irnb))
611 /* Unknowns are available everywhere */
612 if(get_irn_opcode(irnb) == iro_Unknown)
615 /* check if irnb is in a register at end of pred */
616 workset_foreach(wsp, irnp, iter2) {
621 /* irnb is not in memory at the end of pred, so we have to reload it */
622 DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
623 DBG((dbg, DBG_SPILL, "Reload %+F before %+F,%d\n", irnb, block, i));
624 be_add_reload_on_edge(env->senv, irnb, block, i, env->cls, 1);
627 /*epsilon statement :)*/;
632 void be_spill_belady(be_irg_t *birg, const arch_register_class_t *cls) {
633 be_spill_belady_spill_env(birg, cls, NULL);
636 void be_spill_belady_spill_env(be_irg_t *birg, const arch_register_class_t *cls, spill_env_t *spill_env) {
638 ir_graph *irg = be_get_birg_irg(birg);
640 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
641 //firm_dbg_set_mask(dbg, DBG_SPILL);
643 be_assure_liveness(birg);
644 /* construct control flow loop tree */
645 if(! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
646 construct_cf_backedges(irg);
649 /* init belady env */
650 obstack_init(&env.ob);
651 env.arch = birg->main_env->arch_env;
653 env.lv = be_get_birg_liveness(birg);
654 env.n_regs = env.cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
655 env.ws = new_workset(&env, &env.ob);
656 env.uses = be_begin_uses(irg, env.lv);
657 if(spill_env == NULL) {
658 env.senv = be_new_spill_env(birg);
660 env.senv = spill_env;
662 DEBUG_ONLY(be_set_spill_env_dbg_module(env.senv, dbg);)
665 /* Decide which phi nodes will be spilled and place copies for them into the graph */
666 irg_block_walk_graph(irg, compute_live_ins, NULL, &env);
667 /* Fix high register pressure with belady algorithm */
668 irg_block_walk_graph(irg, NULL, belady, &env);
669 /* belady was block-local, fix the global flow by adding reloads on the edges */
670 irg_block_walk_graph(irg, fix_block_borders, NULL, &env);
671 /* Insert spill/reload nodes into the graph and fix usages */
672 be_insert_spills_reloads(env.senv);
675 if(spill_env == NULL)
676 be_delete_spill_env(env.senv);
677 be_end_uses(env.uses);
678 obstack_free(&env.ob, NULL);
681 void be_init_spillbelady(void)
683 static be_spiller_t belady_spiller = {
687 be_register_spiller("belady", &belady_spiller);
690 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady);