2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Beladys spillalgorithm.
23 * @author Daniel Grund, Matthias Braun
34 #include "irprintf_t.h"
40 #include "iredges_t.h"
43 #include "irnodeset.h"
50 #include "besched_t.h"
54 #include "bechordal_t.h"
55 #include "bespilloptions.h"
56 #include "beloopana.h"
68 #define DBG_WORKSET 128
69 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
71 /* factor to weight the different costs of reloading/rematerializing a node
72 (see bespill.h be_get_reload_costs_no_weight) */
73 #define RELOAD_COST_FACTOR 10
75 #define TIME_UNDEFINED 6666
80 * An association between a node and a point in time.
82 typedef struct loc_t {
84 unsigned time; /**< A use time (see beuses.h). */
85 bool spilled; /**< the value was already spilled on this path */
88 typedef struct _workset_t {
89 int len; /**< current length */
90 loc_t vals[0]; /**< inlined array of the values/distances in this working set */
93 static struct obstack obst;
94 static const arch_env_t *arch_env;
95 static const arch_register_class_t *cls;
96 static const be_lv_t *lv;
97 static be_loopana_t *loop_ana;
99 static workset_t *ws; /**< the main workset used while
100 processing a block. */
101 static be_uses_t *uses; /**< env for the next-use magic */
102 static ir_node *instr; /**< current instruction */
103 static unsigned instr_nr; /**< current instruction number
104 (relative to block start) */
105 static ir_nodeset_t used;
106 static spill_env_t *senv; /**< see bespill.h */
107 static pdeq *worklist;
109 static int loc_compare(const void *a, const void *b)
113 return p->time - q->time;
116 void workset_print(const workset_t *w)
120 for(i = 0; i < w->len; ++i) {
121 ir_fprintf(stderr, "%+F %d\n", w->vals[i].node, w->vals[i].time);
126 * Alloc a new workset on obstack @p ob with maximum size @p max
128 static workset_t *new_workset(void)
131 size_t size = sizeof(*res) + n_regs * sizeof(res->vals[0]);
133 res = obstack_alloc(&obst, size);
134 memset(res, 0, size);
139 * Alloc a new instance on obstack and make it equal to @param workset
141 static workset_t *workset_clone(workset_t *workset)
144 size_t size = sizeof(*res) + n_regs * sizeof(res->vals[0]);
145 res = obstack_alloc(&obst, size);
146 memcpy(res, workset, size);
151 * Copy workset @param src to @param tgt
153 static void workset_copy(workset_t *dest, const workset_t *src)
155 size_t size = sizeof(*src) + n_regs * sizeof(src->vals[0]);
156 memcpy(dest, src, size);
160 * Overwrites the current content array of @param ws with the
161 * @param count locations given at memory @param locs.
162 * Set the length of @param ws to count.
164 static void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs)
166 workset->len = count;
167 memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
171 * Inserts the value @p val into the workset, iff it is not
172 * already contained. The workset must not be full.
174 static void workset_insert(workset_t *workset, ir_node *val, bool spilled)
178 /* check for current regclass */
179 assert(arch_irn_consider_in_reg_alloc(arch_env, cls, val));
181 /* check if val is already contained */
182 for (i = 0; i < workset->len; ++i) {
183 loc = &workset->vals[i];
184 if (loc->node == val) {
193 assert(workset->len < n_regs && "Workset already full!");
194 loc = &workset->vals[workset->len];
196 loc->spilled = spilled;
197 loc->time = TIME_UNDEFINED;
202 * Removes all entries from this workset
204 static void workset_clear(workset_t *workset)
210 * Removes the value @p val from the workset if present.
212 static INLINE void workset_remove(workset_t *workset, ir_node *val)
215 for(i = 0; i < workset->len; ++i) {
216 if (workset->vals[i].node == val) {
217 workset->vals[i] = workset->vals[--workset->len];
223 static INLINE const loc_t *workset_contains(const workset_t *ws,
228 for (i = 0; i < ws->len; ++i) {
229 if (ws->vals[i].node == val)
237 * Iterates over all values in the working set.
238 * @p ws The workset to iterate
239 * @p v A variable to put the current value in
240 * @p i An integer for internal use
242 #define workset_foreach(ws, v, i) for(i=0; \
243 v=(i < ws->len) ? ws->vals[i].node : NULL, i < ws->len; \
246 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
247 #define workset_get_time(ws, i) (ws)->vals[i].time
248 #define workset_set_length(ws, length) (ws)->len = length
249 #define workset_get_length(ws) ((ws)->len)
250 #define workset_get_val(ws, i) ((ws)->vals[i].node)
251 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
253 typedef struct _block_info_t
255 workset_t *start_workset;
256 workset_t *end_workset;
260 static void *new_block_info(void)
262 block_info_t *res = obstack_alloc(&obst, sizeof(res[0]));
263 memset(res, 0, sizeof(res[0]));
268 #define get_block_info(block) ((block_info_t *)get_irn_link(block))
269 #define set_block_info(block, info) set_irn_link(block, info)
272 * @return The distance to the next use or 0 if irn has dont_spill flag set
274 static INLINE unsigned get_distance(ir_node *from, unsigned from_step,
275 const ir_node *def, int skip_from_uses)
278 int flags = arch_irn_get_flags(arch_env, def);
282 assert(! (flags & arch_irn_flags_ignore));
284 use = be_get_next_use(uses, from, from_step, def, skip_from_uses);
285 if(USES_IS_INFINITE(use.time))
286 return USES_INFINITY;
288 /* We have to keep nonspillable nodes in the workingset */
289 if(flags & arch_irn_flags_dont_spill)
292 costs = be_get_reload_costs_no_weight(senv, def, use.before);
293 assert(costs * RELOAD_COST_FACTOR < 1000);
294 time = use.time + 1000 - (costs * RELOAD_COST_FACTOR);
300 * Performs the actions necessary to grant the request that:
301 * - new_vals can be held in registers
302 * - as few as possible other values are disposed
303 * - the worst values get disposed
305 * @p is_usage indicates that the values in new_vals are used (not defined)
306 * In this case reloads must be performed
308 static void displace(workset_t *new_vals, int is_usage)
310 ir_node **to_insert = alloca(n_regs * sizeof(to_insert[0]));
311 bool *spilled = alloca(n_regs * sizeof(spilled[0]));
319 /* 1. Identify the number of needed slots and the values to reload */
321 workset_foreach(new_vals, val, iter) {
322 bool reloaded = false;
324 /* mark value as used */
326 ir_nodeset_insert(&used, val);
328 if (! workset_contains(ws, val)) {
329 DB((dbg, DBG_DECIDE, " insert %+F\n", val));
331 DB((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, instr));
332 be_add_reload(senv, val, instr, cls, 1);
336 DB((dbg, DBG_DECIDE, " %+F already in workset\n", val));
338 /* remove the value from the current workset so it is not accidently
340 workset_remove(ws, val);
342 spilled[demand] = reloaded;
343 to_insert[demand] = val;
347 /* 2. Make room for at least 'demand' slots */
348 len = workset_get_length(ws);
349 spills_needed = len + demand - n_regs;
350 assert(spills_needed <= len);
352 /* Only make more free room if we do not have enough */
353 if (spills_needed > 0) {
355 ir_node *curr_bb = get_nodes_block(instr);
356 workset_t *ws_start = get_block_info(curr_bb)->start_workset;
359 DB((dbg, DBG_DECIDE, " disposing %d values\n", spills_needed));
361 /* calculate current next-use distance for live values */
362 for (i = 0; i < len; ++i) {
363 ir_node *val = workset_get_val(ws, i);
364 unsigned dist = get_distance(instr, instr_nr, val, !is_usage);
365 workset_set_time(ws, i, dist);
368 /* sort entries by increasing nextuse-distance*/
371 for (i = len - spills_needed; i < len; ++i) {
372 ir_node *val = ws->vals[i].node;
374 DB((dbg, DBG_DECIDE, " disposing node %+F (%u)\n", val,
375 workset_get_time(ws, i)));
378 if(!USES_IS_INFINITE(ws->vals[i].time) && !ws->vals[i].spilled) {
379 ir_node *after_pos = sched_prev(instr);
380 be_add_spill(senv, val, after_pos);
385 /* Logic for not needed live-ins: If a value is disposed
386 * before its first use, remove it from start workset
387 * We don't do this for phis though */
388 if (!is_Phi(val) && ! ir_nodeset_contains(&used, val)) {
389 workset_remove(ws_start, val);
390 DB((dbg, DBG_DECIDE, " (and removing %+F from start workset)\n", val));
395 /* kill the last 'demand' entries in the array */
396 workset_set_length(ws, len - spills_needed);
399 /* 3. Insert the new values into the workset */
400 for (i = 0; i < demand; ++i) {
401 ir_node *val = to_insert[i];
403 workset_insert(ws, val, spilled[i]);
407 /** Decides whether a specific node should be in the start workset or not
409 * @param env belady environment
411 * @param node the node to test
412 * @param loop the loop of the node
414 static loc_t to_take_or_not_to_take(ir_node* first, ir_node *node,
417 be_next_use_t next_use;
420 loc.time = USES_INFINITY;
424 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
425 loc.time = USES_INFINITY;
429 /* We have to keep nonspillable nodes in the workingset */
430 if(arch_irn_get_flags(arch_env, node) & arch_irn_flags_dont_spill) {
432 DB((dbg, DBG_START, " %+F taken (dontspill node)\n", node, loc.time));
436 next_use = be_get_next_use(uses, first, 0, node, 0);
437 if(USES_IS_INFINITE(next_use.time)) {
438 // the nodes marked as live in shouldn't be dead, so it must be a phi
439 assert(is_Phi(node));
440 loc.time = USES_INFINITY;
441 DB((dbg, DBG_START, " %+F not taken (dead)\n", node));
443 be_spill_phi(senv, node);
448 loc.time = next_use.time;
450 if(next_use.outermost_loop >= get_loop_depth(loop)) {
451 DB((dbg, DBG_START, " %+F taken (%u, loop %d)\n", node, loc.time,
452 next_use.outermost_loop));
454 loc.time = USES_PENDING;
455 DB((dbg, DBG_START, " %+F delayed (outerdepth %d < loopdepth %d)\n",
456 node, next_use.outermost_loop, get_loop_depth(loop)));
462 * Computes the start-workset for a block with multiple predecessors. We assume
463 * that at least 1 of the predeccesors is a back-edge which means we're at the
464 * beginning of a loop. We try to reload as much values as possible now so they
465 * don't get reloaded inside the loop.
467 static void decide_start_workset(const ir_node *block)
469 ir_loop *loop = get_irn_loop(block);
475 int i, len, ws_count;
476 int free_slots, free_pressure_slots;
479 workset_t **pred_worksets;
481 /* Collect all values living at start of block */
482 starters = NEW_ARR_F(loc_t, 0);
483 delayed = NEW_ARR_F(loc_t, 0);
485 DB((dbg, DBG_START, "Living at start of %+F:\n", block));
486 first = sched_first(block);
488 /* check all Phis first */
489 sched_foreach(block, node) {
493 loc = to_take_or_not_to_take(first, node, loop);
495 if (! USES_IS_INFINITE(loc.time)) {
496 if (USES_IS_PENDING(loc.time))
497 ARR_APP1(loc_t, delayed, loc);
499 ARR_APP1(loc_t, starters, loc);
503 /* check all Live-Ins */
504 be_lv_foreach(lv, block, be_lv_state_in, i) {
505 ir_node *node = be_lv_get_irn(lv, block, i);
507 loc = to_take_or_not_to_take(first, node, loop);
509 if (! USES_IS_INFINITE(loc.time)) {
510 if (USES_IS_PENDING(loc.time))
511 ARR_APP1(loc_t, delayed, loc);
513 ARR_APP1(loc_t, starters, loc);
517 pressure = be_get_loop_pressure(loop_ana, cls, loop);
518 assert(ARR_LEN(delayed) <= (signed)pressure);
519 free_slots = n_regs - ARR_LEN(starters);
520 free_pressure_slots = n_regs - (pressure - ARR_LEN(delayed));
521 free_slots = MIN(free_slots, free_pressure_slots);
523 /* so far we only put nodes into the starters list that are used inside
524 * the loop. If register pressure in the loop is low then we can take some
525 * values and let them live through the loop */
526 if (free_slots > 0) {
527 qsort(delayed, ARR_LEN(delayed), sizeof(delayed[0]), loc_compare);
529 for (i = 0; i < ARR_LEN(delayed) && i < free_slots; ++i) {
531 loc_t *loc = & delayed[i];
533 /* don't use values which are dead in a known predecessors
534 * to not induce unnecessary reloads */
535 arity = get_irn_arity(block);
536 for (p = 0; p < arity; ++p) {
537 ir_node *pred_block = get_Block_cfgpred_block(block, p);
538 block_info_t *pred_info = get_block_info(pred_block);
540 if (pred_info == NULL)
543 if (!workset_contains(pred_info->end_workset, loc->node)) {
545 " delayed %+F not live at pred %+F\n", loc->node,
551 DB((dbg, DBG_START, " delayed %+F taken\n", loc->node));
552 ARR_APP1(loc_t, starters, *loc);
559 /* spill phis (the actual phis not just their values) that are in this block
560 * but not in the start workset */
561 for (i = ARR_LEN(delayed) - 1; i >= 0; --i) {
562 ir_node *node = delayed[i].node;
563 if(node == NULL || !is_Phi(node) || get_nodes_block(node) != block)
566 DB((dbg, DBG_START, " spilling delayed phi %+F\n", node));
567 be_spill_phi(senv, node);
571 /* Sort start values by first use */
572 qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
574 /* Copy the best ones from starters to start workset */
575 ws_count = MIN(ARR_LEN(starters), n_regs);
577 workset_bulk_fill(ws, ws_count, starters);
579 /* spill phis (the actual phis not just their values) that are in this block
580 * but not in the start workset */
581 len = ARR_LEN(starters);
582 for (i = ws_count; i < len; ++i) {
583 ir_node *node = starters[i].node;
584 if (! is_Phi(node) || get_nodes_block(node) != block)
587 DB((dbg, DBG_START, " spilling phi %+F\n", node));
588 be_spill_phi(senv, node);
593 /* determine spill status of the values: If there's 1 pred block (which
594 * is no backedge) where the value is spilled then we must set it to
596 arity = get_irn_arity(block);
597 pred_worksets = alloca(sizeof(pred_worksets[0]) * arity);
598 for(i = 0; i < arity; ++i) {
599 ir_node *pred_block = get_Block_cfgpred_block(block, i);
600 block_info_t *pred_info = get_block_info(pred_block);
602 if(pred_info == NULL)
603 pred_worksets[i] = NULL;
605 pred_worksets[i] = pred_info->end_workset;
608 for(i = 0; i < ws_count; ++i) {
609 loc_t *loc = &ws->vals[i];
610 ir_node *value = loc->node;
614 /* phis from this block aren't spilled */
615 if(get_nodes_block(value) == block) {
616 assert(is_Phi(value));
617 loc->spilled = false;
621 /* determine if value was spilled on any predecessor */
623 for(n = 0; n < arity; ++n) {
624 workset_t *pred_workset = pred_worksets[n];
628 if (pred_workset == NULL)
631 p_len = workset_get_length(pred_workset);
632 for(p = 0; p < p_len; ++p) {
633 loc_t *l = &pred_workset->vals[p];
635 if (l->node != value)
645 loc->spilled = spilled;
650 static void decide_start_workset2(const ir_node *block)
653 workset_t **pred_worksets;
657 /* check if all predecessors are known */
658 arity = get_irn_arity(block);
659 pred_worksets = alloca(sizeof(pred_worksets[0]) * arity);
660 for (i = 0; i < arity; ++i) {
661 ir_node *pred_block = get_Block_cfgpred_block(block, i);
662 block_info_t *pred_info = get_block_info(pred_block);
664 if (pred_info == NULL) {
665 /* not all predecessors known, use decide_start_workset */
666 decide_start_workset(block);
670 pred_worksets[i] = pred_info->end_workset;
673 /* take values live in all pred blocks */
674 len = workset_get_length(pred_workset[0]);
675 for (p = 0; p < p_len; ++p) {
676 const loc_t *l = &pred_workset[0]->vals[p];
678 bool spilled = false;
680 if (USES_IS_INFINITE(l->time))
683 /* value available in all preds? */
685 for (i = 0; i < arity; ++i) {
687 workset_t p_workset = &pred_worksets[i];
688 int p_len = workset_get_length(p_workset);
691 for (p_i = 0; p_i < p_len; ++p_i) {
692 const loc_t *p_l = &p_workset->vals[p_i];
693 if (p_l->node != value)
706 /* it was available in all preds, TODO: insert spills... */
708 workset_insert(ws, value, spilled);
713 /* Copy the best ones from starters to start workset */
714 ws_count = MIN(ARR_LEN(starters), n_regs);
716 workset_bulk_fill(ws, ws_count, starters);
722 * For the given block @p block, decide for each values
723 * whether it is used from a register or is reloaded
726 static void belady(ir_node *block)
731 block_info_t *block_info;
733 int has_backedges = 0;
735 const ir_edge_t *edge;
737 /* no need to process a block twice */
738 if(get_block_info(block) != NULL) {
742 /* check if all predecessor blocks are processed yet (though for backedges
743 * we have to make an exception as we can't process them first) */
744 arity = get_Block_n_cfgpreds(block);
745 for(i = 0; i < arity; ++i) {
746 ir_node *pred_block = get_Block_cfgpred_block(block, i);
747 block_info_t *pred_info = get_block_info(pred_block);
749 if(pred_info == NULL) {
750 /* process predecessor first (it will be in the queue already) */
751 if(!is_backedge(block, i)) {
757 (void) has_backedges;
760 } else if(arity == 1) {
761 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
762 block_info_t *pred_info = get_block_info(pred_block);
764 assert(pred_info != NULL);
765 workset_copy(ws, pred_info->end_workset);
767 /* we need 2 heuristics here, for the case when all predecessor blocks
768 * are known and when some are backedges (and therefore can't be known
770 decide_start_workset(block);
773 DB((dbg, DBG_DECIDE, "\n"));
774 DB((dbg, DBG_DECIDE, "Decide for %+F\n", block));
776 block_info = new_block_info();
777 set_block_info(block, block_info);
779 DB((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
780 workset_foreach(ws, irn, iter) {
781 DB((dbg, DBG_WSETS, " %+F (%u)\n", irn,
782 workset_get_time(ws, iter)));
785 block_info->start_workset = workset_clone(ws);
787 /* process the block from start to end */
788 DB((dbg, DBG_WSETS, "Processing...\n"));
789 ir_nodeset_init(&used);
791 /* TODO: this leaks (into the obstack)... */
792 new_vals = new_workset();
794 sched_foreach(block, irn) {
796 assert(workset_get_length(ws) <= n_regs);
798 /* Phis are no real instr (see insert_starters()) */
802 DB((dbg, DBG_DECIDE, " ...%+F\n", irn));
804 /* set instruction in the workset */
807 /* allocate all values _used_ by this instruction */
808 workset_clear(new_vals);
809 for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
810 ir_node *in = get_irn_n(irn, i);
811 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, in))
814 /* (note that "spilled" is irrelevant here) */
815 workset_insert(new_vals, in, false);
817 displace(new_vals, 1);
819 /* allocate all values _defined_ by this instruction */
820 workset_clear(new_vals);
821 if (get_irn_mode(irn) == mode_T) {
822 const ir_edge_t *edge;
824 foreach_out_edge(irn, edge) {
825 ir_node *proj = get_edge_src_irn(edge);
826 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, proj))
828 workset_insert(new_vals, proj, false);
831 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
833 workset_insert(new_vals, irn, false);
835 displace(new_vals, 0);
839 ir_nodeset_destroy(&used);
841 /* Remember end-workset for this block */
842 block_info->end_workset = workset_clone(ws);
843 DB((dbg, DBG_WSETS, "End workset for %+F:\n", block));
844 workset_foreach(ws, irn, iter)
845 DB((dbg, DBG_WSETS, " %+F (%u)\n", irn,
846 workset_get_time(ws, iter)));
848 /* add successor blocks into worklist */
849 foreach_block_succ(block, edge) {
850 ir_node *succ = get_edge_src_irn(edge);
851 pdeq_putr(worklist, succ);
856 * 'decide' is block-local and makes assumptions
857 * about the set of live-ins. Thus we must adapt the
858 * live-outs to the live-ins at each block-border.
860 static void fix_block_borders(ir_node *block, void *data)
862 workset_t *start_workset;
868 DB((dbg, DBG_FIX, "\n"));
869 DB((dbg, DBG_FIX, "Fixing %+F\n", block));
871 start_workset = get_block_info(block)->start_workset;
873 /* process all pred blocks */
874 arity = get_irn_arity(block);
875 for (i = 0; i < arity; ++i) {
876 ir_node *pred = get_Block_cfgpred_block(block, i);
877 workset_t *pred_end_workset = get_block_info(pred)->end_workset;
880 DB((dbg, DBG_FIX, " Pred %+F\n", pred));
882 /* spill all values not used anymore */
883 workset_foreach(pred_end_workset, node, iter) {
887 workset_foreach(start_workset, n2, iter2) {
892 /* note that we do not look at phi inputs, becuase the values
893 * will be either live-end and need no spill or
894 * they have other users in which must be somewhere else in the
902 if(be_is_live_in(lv, block, node)
903 && !pred_end_workset->vals[iter].spilled) {
904 ir_node *insert_point;
906 insert_point = be_get_end_of_block_insertion_point(pred);
907 insert_point = sched_prev(insert_point);
909 insert_point = block;
911 DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
913 be_add_spill(senv, node, insert_point);
918 /* reload missing values in predecessors, add missing spills */
919 workset_foreach(start_workset, node, iter) {
920 const loc_t *l = &start_workset->vals[iter];
921 const loc_t *pred_loc;
923 /* if node is a phi of the current block we reload
924 * the corresponding argument, else node itself */
925 if(is_Phi(node) && get_nodes_block(node) == block) {
926 node = get_irn_n(node, i);
929 /* we might have unknowns as argument for the phi */
930 if(!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
934 /* check if node is in a register at end of pred */
935 pred_loc = workset_contains(pred_end_workset, node);
936 if (pred_loc != NULL) {
938 /* we might have to spill value on this path */
939 if (!pred_loc->spilled && l->spilled) {
940 ir_node *insert_point
941 = be_get_end_of_block_insertion_point(pred);
942 insert_point = sched_prev(insert_point);
943 DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
945 be_add_spill(senv, node, insert_point);
949 /* node is not in register at the end of pred -> reload it */
950 DB((dbg, DBG_FIX, " reload %+F\n", node));
951 DB((dbg, DBG_SPILL, "Reload %+F before %+F,%d\n", node, block, i));
952 be_add_reload_on_edge(senv, node, block, i, cls, 1);
958 static void be_spill_belady(be_irg_t *birg, const arch_register_class_t *rcls)
960 ir_graph *irg = be_get_birg_irg(birg);
962 be_liveness_assure_sets(be_assure_liveness(birg));
964 /* construct control flow loop tree */
965 if(! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
966 construct_cf_backedges(irg);
971 /* init belady env */
973 arch_env = birg->main_env->arch_env;
975 lv = be_get_birg_liveness(birg);
976 n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
978 uses = be_begin_uses(irg, lv);
979 loop_ana = be_new_loop_pressure(birg);
980 senv = be_new_spill_env(birg);
981 worklist = new_pdeq();
983 pdeq_putr(worklist, get_irg_start_block(irg));
985 while(!pdeq_empty(worklist)) {
986 ir_node *block = pdeq_getl(worklist);
989 /* end block might not be reachable in endless loops */
990 belady(get_irg_end_block(irg));
994 /* belady was block-local, fix the global flow by adding reloads on the
996 irg_block_walk_graph(irg, fix_block_borders, NULL, NULL);
998 /* Insert spill/reload nodes into the graph and fix usages */
999 be_insert_spills_reloads(senv);
1002 be_delete_spill_env(senv);
1004 be_free_loop_pressure(loop_ana);
1005 obstack_free(&obst, NULL);
1008 void be_init_spillbelady(void)
1010 static be_spiller_t belady_spiller = {
1014 be_register_spiller("belady", &belady_spiller);
1015 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
1018 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady);