2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Beladys spillalgorithm.
23 * @author Daniel Grund, Matthias Braun
34 #include "irprintf_t.h"
40 #include "iredges_t.h"
47 #include "bespillbelady.h"
49 #include "besched_t.h"
53 #include "bechordal_t.h"
54 #include "bespilloptions.h"
55 #include "beloopana.h"
66 #define DBG_WORKSET 128
67 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
70 * An association between a node and a point in time.
72 typedef struct _loc_t {
73 ir_node *irn; /**< A node. */
74 unsigned time; /**< A use time (see beuses.h). */
75 int reloaded_value; /**< the value is a reloaded value */
78 typedef struct _workset_t {
79 int len; /**< current length */
80 loc_t vals[0]; /**< inlined array of the values/distances in this working set */
83 typedef struct _belady_env_t {
85 const arch_env_t *arch;
86 const arch_register_class_t *cls;
88 be_loopana_t *loop_ana;
89 int n_regs; /** number of regs in this reg-class */
91 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
92 be_uses_t *uses; /**< env for the next-use magic */
93 ir_node *instr; /**< current instruction */
94 unsigned instr_nr; /**< current instruction number (relative to block start) */
97 spill_env_t *senv; /**< see bespill.h */
100 static int loc_compare(const void *a, const void *b)
104 return p->time - q->time;
108 static void workset_print(const workset_t *w)
112 for(i = 0; i < w->len; ++i) {
113 ir_fprintf(stderr, "%+F %d (%d)\n", w->vals[i].irn, w->vals[i].time, w->vals[i].reloaded_value);
115 /* avoid unused warning */
116 (void) workset_print;
120 * Alloc a new workset on obstack @p ob with maximum size @p max
122 static INLINE workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
124 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
125 res = obstack_alloc(ob, size);
126 memset(res, 0, size);
131 * Alloc a new instance on obstack and make it equal to @param ws
133 static INLINE workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
135 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
136 res = obstack_alloc(ob, size);
137 memcpy(res, ws, size);
142 * Do NOT alloc anything. Make @param tgt equal to @param src.
143 * returns @param tgt for convenience
145 static INLINE workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
146 size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
147 memcpy(tgt, src, size);
152 * Overwrites the current content array of @param ws with the
153 * @param count locations given at memory @param locs.
154 * Set the length of @param ws to count.
156 static INLINE void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
157 workset->len = count;
158 memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
162 * Inserts the value @p val into the workset, iff it is not
163 * already contained. The workset must not be full.
165 static INLINE void workset_insert(belady_env_t *env, workset_t *ws,
166 ir_node *val, int reloaded_value)
169 /* check for current regclass */
170 if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
171 //DBG((dbg, DBG_WORKSET, "Skipped %+F\n", val));
175 /* check if val is already contained */
176 for(i=0; i<ws->len; ++i) {
177 if (ws->vals[i].irn == val) {
178 if(!ws->vals[i].reloaded_value)
179 ws->vals[i].reloaded_value = reloaded_value;
185 assert(ws->len < env->n_regs && "Workset already full!");
186 ws->vals[ws->len].irn = val;
187 ws->vals[ws->len].reloaded_value = reloaded_value;
188 ws->vals[ws->len].time = 6666;
193 * Removes all entries from this workset
195 static INLINE void workset_clear(workset_t *ws) {
200 * Removes the value @p val from the workset if present.
202 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
204 for(i=0; i<ws->len; ++i) {
205 if (ws->vals[i].irn == val) {
206 ws->vals[i] = ws->vals[--ws->len];
212 static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
214 for(i=0; i<ws->len; ++i) {
215 if (ws->vals[i].irn == val)
223 * Iterates over all values in the working set.
224 * @p ws The workset to iterate
225 * @p v A variable to put the current value in
226 * @p i An integer for internal use
228 #define workset_foreach(ws, v, i) for(i=0; \
229 v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
232 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
233 #define workset_get_time(ws, i) (ws)->vals[i].time
234 #define workset_set_length(ws, length) (ws)->len = length
235 #define workset_get_length(ws) ((ws)->len)
236 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
237 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
239 typedef struct _block_info_t {
240 workset_t *ws_start, *ws_end;
245 static INLINE void *new_block_info(struct obstack *ob) {
246 block_info_t *res = obstack_alloc(ob, sizeof(*res));
247 res->ws_start = NULL;
254 #define get_block_info(block) ((block_info_t *)get_irn_link(block))
255 #define set_block_info(block, info) set_irn_link(block, info)
258 * @return The distance to the next use or 0 if irn has dont_spill flag set
260 static INLINE unsigned get_distance(belady_env_t *env, ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
263 int flags = arch_irn_get_flags(env->arch, def);
266 assert(! (flags & arch_irn_flags_ignore));
268 use = be_get_next_use(env->uses, from, from_step, def, skip_from_uses);
269 if(USES_IS_INFINITE(use.time))
270 return USES_INFINITY;
272 /* We have to keep nonspillable nodes in the workingset */
273 if(flags & arch_irn_flags_dont_spill)
277 time += be_get_reload_costs_no_weight(env->senv, def, use.before) * 10;
283 * Performs the actions necessary to grant the request that:
284 * - new_vals can be held in registers
285 * - as few as possible other values are disposed
286 * - the worst values get disposed
288 * @p is_usage indicates that the values in new_vals are used (not defined)
289 * In this case reloads must be performed
291 static void displace(belady_env_t *env, workset_t *new_vals, int is_usage) {
293 int i, len, max_allowed, demand, iter;
295 workset_t *ws = env->ws;
296 ir_node **to_insert = alloca(env->n_regs * sizeof(*to_insert));
299 1. Identify the number of needed slots and the values to reload
302 workset_foreach(new_vals, val, iter) {
303 /* mark value as used */
305 pset_insert_ptr(env->used, val);
307 if (! workset_contains(ws, val)) {
308 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
310 to_insert[demand++] = val;
312 DBG((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, env->instr));
313 be_add_reload(env->senv, val, env->instr, env->cls, 1);
316 DBG((dbg, DBG_DECIDE, " %+F already in workset\n", val));
322 2. Make room for at least 'demand' slots
324 len = workset_get_length(ws);
325 max_allowed = env->n_regs - demand;
327 /* Only make more free room if we do not have enough */
328 if (len > max_allowed) {
329 DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
331 /* get current next-use distance */
332 for (i = 0; i < ws->len; ++i) {
333 unsigned dist = get_distance(env, env->instr, env->instr_nr, workset_get_val(ws, i), !is_usage);
334 workset_set_time(ws, i, dist);
337 /* sort entries by increasing nextuse-distance*/
341 Logic for not needed live-ins: If a value is disposed
342 before its first usage, remove it from start workset
343 We don't do this for phis though
345 for (i = max_allowed; i < ws->len; ++i) {
346 ir_node *irn = ws->vals[i].irn;
348 DBG((dbg, DBG_DECIDE, " disposing node %+F (%u)\n", irn,
349 workset_get_time(ws, i)));
351 if(!USES_IS_INFINITE(ws->vals[i].time)
352 && !ws->vals[i].reloaded_value) {
353 //be_add_spill(env->senv, irn, env->instr);
359 if (! pset_find_ptr(env->used, irn)) {
360 ir_node *curr_bb = get_nodes_block(env->instr);
361 workset_t *ws_start = get_block_info(curr_bb)->ws_start;
362 workset_remove(ws_start, irn);
364 DBG((dbg, DBG_DECIDE, " (and removing %+F from start workset)\n", irn));
368 /* kill the last 'demand' entries in the array */
369 workset_set_length(ws, max_allowed);
373 3. Insert the new values into the workset
375 for (i = 0; i < demand; ++i) {
376 workset_insert(env, env->ws, to_insert[i], 1);
380 static void belady(ir_node *block, void *env);
383 * Decides whether a specific node should be in the start workset or not
385 * @param env belady environment
387 * @param node the node to test
388 * @param block the block of the node
389 * @param loop the loop of the node
391 static loc_t to_take_or_not_to_take(belady_env_t *env, ir_node* first,
392 ir_node *node, ir_node *block,
395 be_next_use_t next_use;
397 loc.time = USES_INFINITY;
399 loc.reloaded_value = 0;
402 if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, node)) {
403 loc.time = USES_INFINITY;
407 /* We have to keep nonspillable nodes in the workingset */
408 if(arch_irn_get_flags(env->arch, node) & arch_irn_flags_dont_spill) {
410 DBG((dbg, DBG_START, " %+F taken (dontspill node)\n", node, loc.time));
414 next_use = be_get_next_use(env->uses, first, 0, node, 0);
415 if(USES_IS_INFINITE(next_use.time)) {
416 // the nodes marked as live in shouldn't be dead, so it must be a phi
417 assert(is_Phi(node));
418 loc.time = USES_INFINITY;
419 DBG((dbg, DBG_START, " %+F not taken (dead)\n", node));
421 be_spill_phi(env->senv, node);
426 loc.time = next_use.time;
428 if(next_use.outermost_loop >= get_loop_depth(loop)) {
429 DBG((dbg, DBG_START, " %+F taken (%u, loop %d)\n", node, loc.time, next_use.outermost_loop));
431 loc.time = USES_PENDING;
432 DBG((dbg, DBG_START, " %+F delayed (outerloopdepth %d < loopdetph %d)\n", node, next_use.outermost_loop, get_loop_depth(loop)));
438 * Computes set of live-ins for each block with multiple predecessors
439 * and notifies spill algorithm which phis need to be spilled
441 static void compute_live_ins(ir_node *block, void *data) {
442 belady_env_t *env = data;
443 ir_loop *loop = get_irn_loop(block);
444 const be_lv_t *lv = env->lv;
445 block_info_t *block_info;
446 ir_node *first, *irn;
447 loc_t loc, *starters, *delayed;
448 int i, len, ws_count;
449 int free_slots, free_pressure_slots;
452 if (get_Block_n_cfgpreds(block) == 1 && get_irg_start_block(get_irn_irg(block)) != block)
455 block_info = new_block_info(&env->ob);
456 set_block_info(block, block_info);
458 /* Collect all values living at start of block */
459 starters = NEW_ARR_F(loc_t, 0);
460 delayed = NEW_ARR_F(loc_t, 0);
462 DBG((dbg, DBG_START, "Living at start of %+F:\n", block));
463 first = sched_first(block);
465 /* check all Phis first */
466 sched_foreach(block, irn) {
470 loc = to_take_or_not_to_take(env, first, irn, block, loop);
471 /* the phis can't be reloaded here (they just get defined) */
472 loc.reloaded_value = 0;
474 if (! USES_IS_INFINITE(loc.time)) {
475 if (USES_IS_PENDING(loc.time))
476 ARR_APP1(loc_t, delayed, loc);
478 ARR_APP1(loc_t, starters, loc);
482 /* check all Live-Ins */
483 be_lv_foreach(lv, block, be_lv_state_in, i) {
484 ir_node *node = be_lv_get_irn(lv, block, i);
486 loc = to_take_or_not_to_take(env, first, node, block, loop);
488 if (! USES_IS_INFINITE(loc.time)) {
489 if (USES_IS_PENDING(loc.time))
490 ARR_APP1(loc_t, delayed, loc);
492 ARR_APP1(loc_t, starters, loc);
496 pressure = be_get_loop_pressure(env->loop_ana, env->cls, loop);
497 assert(ARR_LEN(delayed) <= (signed)pressure);
498 free_slots = env->n_regs - ARR_LEN(starters);
499 free_pressure_slots = env->n_regs - (pressure - ARR_LEN(delayed));
500 free_slots = MIN(free_slots, free_pressure_slots);
501 /* append nodes delayed due to loop structure until start set is full */
502 for (i = 0; i < ARR_LEN(delayed) && i < free_slots; ++i) {
503 DBG((dbg, DBG_START, " delayed %+F taken\n", delayed[i].irn));
504 ARR_APP1(loc_t, starters, delayed[i]);
505 delayed[i].irn = NULL;
508 /* spill all delayed phis which didn't make it into start workset */
509 for ( ; i < ARR_LEN(delayed); ++i) {
510 ir_node *irn = delayed[i].irn;
511 if (irn && is_Phi(irn) && get_nodes_block(irn) == block) {
512 DBG((dbg, DBG_START, " spilling delayed phi %+F\n", irn));
513 be_spill_phi(env->senv, irn);
518 /* Sort start values by first use */
519 qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
521 /* Copy the best ones from starters to start workset */
522 ws_count = MIN(ARR_LEN(starters), env->n_regs);
523 block_info->ws_start = new_workset(env, &env->ob);
524 workset_bulk_fill(block_info->ws_start, ws_count, starters);
526 /* The phis of this block which are not in the start set have to be spilled later. */
527 len = ARR_LEN(starters);
528 for (i = ws_count; i < len; ++i) {
529 irn = starters[i].irn;
530 if (! is_Phi(irn) || get_nodes_block(irn) != block)
533 be_spill_phi(env->senv, irn);
540 * Collects all values live-in at block @p block and all phi results in this block.
541 * Then it adds the best values (at most n_regs) to the blocks start_workset.
542 * The phis among the remaining values get spilled: Introduce psudo-copies of
543 * their args to break interference and make it possible to spill them to the
546 static block_info_t *compute_block_start_info(belady_env_t *env, ir_node *block) {
548 block_info_t *res, *pred_info;
550 /* Have we seen this block before? */
551 res = get_block_info(block);
555 /* Create the block info for this block. */
556 res = new_block_info(&env->ob);
557 set_block_info(block, res);
559 /* Use endset of predecessor block as startset */
560 assert(get_Block_n_cfgpreds(block) == 1 && block != get_irg_start_block(get_irn_irg(block)));
561 pred_block = get_Block_cfgpred_block(block, 0);
562 pred_info = get_block_info(pred_block);
564 /* if pred block has not been processed yet, do it now */
565 if (pred_info == NULL || pred_info->processed == 0) {
566 belady(pred_block, env);
567 pred_info = get_block_info(pred_block);
570 /* now we have an end_set of pred */
571 assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
572 res->ws_start = workset_clone(env, &env->ob, pred_info->ws_end);
579 * For the given block @p block, decide for each values
580 * whether it is used from a register or is reloaded
583 static void belady(ir_node *block, void *data) {
584 belady_env_t *env = data;
588 block_info_t *block_info;
590 /* make sure we have blockinfo (with startset) */
591 block_info = get_block_info(block);
592 if (block_info == NULL)
593 block_info = compute_block_start_info(env, block);
595 /* Don't do a block twice */
596 if(block_info->processed)
599 /* get the starting workset for this block */
600 DBG((dbg, DBG_DECIDE, "\n"));
601 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", block));
603 workset_copy(env, env->ws, block_info->ws_start);
604 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
605 workset_foreach(env->ws, irn, iter)
606 DBG((dbg, DBG_WSETS, " %+F (%u)\n", irn, workset_get_time(env->ws, iter)));
608 /* process the block from start to end */
609 DBG((dbg, DBG_WSETS, "Processing...\n"));
610 env->used = pset_new_ptr_default();
612 new_vals = new_workset(env, &env->ob);
613 sched_foreach(block, irn) {
615 assert(workset_get_length(env->ws) <= env->n_regs && "Too much values in workset!");
617 /* projs are handled with the tuple value.
618 * Phis are no real instr (see insert_starters())
619 * instr_nr does not increase */
620 if (is_Proj(irn) || is_Phi(irn)) {
621 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
624 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
626 /* set instruction in the workset */
629 /* allocate all values _used_ by this instruction */
630 workset_clear(new_vals);
631 for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
632 /* (note that reloaded_value is not interesting here) */
633 workset_insert(env, new_vals, get_irn_n(irn, i), 0);
635 displace(env, new_vals, 1);
637 /* allocate all values _defined_ by this instruction */
638 workset_clear(new_vals);
639 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
640 const ir_edge_t *edge;
642 foreach_out_edge(irn, edge) {
643 ir_node *proj = get_edge_src_irn(edge);
644 workset_insert(env, new_vals, proj, 0);
647 workset_insert(env, new_vals, irn, 0);
649 displace(env, new_vals, 0);
655 /* Remember end-workset for this block */
656 block_info->ws_end = workset_clone(env, &env->ob, env->ws);
657 block_info->processed = 1;
658 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", block));
659 workset_foreach(block_info->ws_end, irn, iter)
660 DBG((dbg, DBG_WSETS, " %+F (%u)\n", irn, workset_get_time(block_info->ws_end, iter)));
664 * 'decide' is block-local and makes assumptions
665 * about the set of live-ins. Thus we must adapt the
666 * live-outs to the live-ins at each block-border.
668 static void fix_block_borders(ir_node *block, void *data)
670 ir_graph *irg = get_irn_irg(block);
671 ir_node *startblock = get_irg_start_block(irg);
672 belady_env_t *env = data;
673 workset_t *start_workset;
678 if(block == startblock)
681 DBG((dbg, DBG_FIX, "\n"));
682 DBG((dbg, DBG_FIX, "Fixing %+F\n", block));
684 start_workset = get_block_info(block)->ws_start;
686 /* process all pred blocks */
687 arity = get_irn_arity(block);
688 for (i = 0; i < arity; ++i) {
689 ir_node *pred = get_Block_cfgpred_block(block, i);
690 workset_t *workset_pred_end = get_block_info(pred)->ws_end;
693 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
695 /* spill all values not used anymore */
696 workset_foreach(workset_pred_end, node, iter) {
700 workset_foreach(start_workset, n2, iter2) {
705 /* note that we do not look at phi inputs, becuase the values
706 * will be either live-end and need no spill or
707 * they have other users in which must be somewhere else in the
712 if(!found && be_is_live_out(env->lv, pred, node)
713 && !workset_pred_end->vals[iter].reloaded_value) {
714 ir_node *insert_point
715 = be_get_end_of_block_insertion_point(pred);
716 DBG((dbg, DBG_SPILL, "Spill %+F before %+F\n", node,
718 be_add_spill(env->senv, node, insert_point);
723 /* reload missing values in predecessors */
724 workset_foreach(start_workset, node, iter) {
725 /* if node is a phi of the current block we reload
726 * the corresponding argument, else node itself */
727 if(is_Phi(node) && block == get_nodes_block(node)) {
728 node = get_irn_n(node, i);
730 /* we might have unknowns as argument for the phi */
731 if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, node))
735 /* check if node is in a register at end of pred */
736 if(workset_contains(workset_pred_end, node))
739 /* node is not in memory at the end of pred -> reload it */
740 DBG((dbg, DBG_FIX, " reload %+F\n", node));
741 DBG((dbg, DBG_SPILL, "Reload %+F before %+F,%d\n", node, block, i));
742 be_add_reload_on_edge(env->senv, node, block, i, env->cls, 1);
748 * Do spilling for a register class on a graph using the belady heuristic.
749 * In the transformed graph, the register pressure never exceeds the number
750 * of available registers.
752 * @param birg The backend graph
753 * @param cls The register class to spill
755 static void be_spill_belady(be_irg_t *birg, const arch_register_class_t *cls) {
756 be_spill_belady_spill_env(birg, cls, NULL);
759 void be_spill_belady_spill_env(be_irg_t *birg, const arch_register_class_t *cls, spill_env_t *spill_env) {
761 ir_graph *irg = be_get_birg_irg(birg);
764 n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
765 be_liveness_assure_sets(be_assure_liveness(birg));
767 /* construct control flow loop tree */
768 if(! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
769 construct_cf_backedges(irg);
774 /* init belady env */
775 obstack_init(&env.ob);
776 env.arch = birg->main_env->arch_env;
778 env.lv = be_get_birg_liveness(birg);
780 env.ws = new_workset(&env, &env.ob);
781 env.uses = be_begin_uses(irg, env.lv);
782 env.loop_ana = be_new_loop_pressure(birg);
783 if(spill_env == NULL) {
784 env.senv = be_new_spill_env(birg);
786 env.senv = spill_env;
789 /* Decide which phi nodes will be spilled and place copies for them into the graph */
790 irg_block_walk_graph(irg, compute_live_ins, NULL, &env);
791 /* Fix high register pressure with belady algorithm */
792 irg_block_walk_graph(irg, NULL, belady, &env);
793 /* belady was block-local, fix the global flow by adding reloads on the edges */
794 irg_block_walk_graph(irg, fix_block_borders, NULL, &env);
796 be_end_uses(env.uses);
797 be_free_loop_pressure(env.loop_ana);
798 obstack_free(&env.ob, NULL);
800 /* Insert spill/reload nodes into the graph and fix usages */
801 be_insert_spills_reloads(env.senv);
804 if(spill_env == NULL)
805 be_delete_spill_env(env.senv);
808 void be_init_spillbelady(void)
810 static be_spiller_t belady_spiller = {
814 be_register_spiller("belady", &belady_spiller);
815 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
818 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady);