2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief MIN-algorithm with some twists (aka belady spiller v3)
23 * @author Matthias Braun
28 * - handle phis correctly, decide whether we should spill them ~ok
29 * - merge multiple start worksets of blocks ~ok
30 * - how to and when to do the tentative phase...
42 #include "iredges_t.h"
50 #include "bespillutil.h"
55 #define EXPENSIVE_CHECKS
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 typedef struct loop_edge_t loop_edge_t;
67 typedef struct loop_info_t loop_info_t;
70 loop_edge_t *entry_edges;
71 loop_edge_t *exit_edges;
72 size_t max_register_pressure;
75 typedef struct worklist_entry_t worklist_entry_t;
76 struct worklist_entry_t {
77 struct list_head head;
79 ir_node *reload_point;
80 ir_loop *unused_livethrough_loop;
83 #define foreach_worklist(entry, wl) list_for_each_entry(worklist_entry_t, entry, &(wl)->live_values, head)
85 typedef struct worklist_t worklist_t;
87 struct list_head live_values;
92 typedef struct block_info_t block_info_t;
94 worklist_t *start_worklist;
95 worklist_t *end_worklist;
98 /** The register class we currently run on. */
99 static const arch_register_class_t *cls;
100 static struct obstack obst;
101 static spill_env_t *senv;
102 static size_t n_regs;
103 static size_t max_register_pressure;
104 static bool tentative_mode;
105 static bool do_push_unused_livethroughs;
106 /** Execution frequency for the current graph. */
107 static ir_exec_freq *exec_freq;
108 static ir_visited_t worklist_visited;
110 static bool should_have_reached_fixpoint;
113 static worklist_t *new_worklist(void)
115 worklist_t *worklist = OALLOCZ(&obst, worklist_t);
117 INIT_LIST_HEAD(&worklist->live_values);
118 worklist->n_live_values = 0;
123 static void mark_irn_not_visited(ir_node *node)
125 set_irn_visited(node, get_irg_visited(current_ir_graph) - 1);
128 static bool worklist_contains(const ir_node *node)
130 return irn_visited(node);
133 static block_info_t *get_block_info(ir_node *block)
135 block_info_t *info = get_irn_link(block);
139 info = OALLOCZ(&obst, block_info_t);
140 set_irn_link(block, info);
144 static void deactivate_worklist(const worklist_t *worklist)
146 worklist_entry_t *entry;
148 foreach_worklist(entry, worklist) {
149 assert(worklist_contains(entry->value));
150 mark_irn_not_visited(entry->value);
151 set_irn_link(entry->value, NULL);
155 static void activate_worklist(const worklist_t *worklist)
157 worklist_entry_t *entry;
159 foreach_worklist(entry, worklist) {
160 assert(!worklist_contains(entry->value));
161 mark_irn_visited(entry->value);
162 set_irn_link(entry->value, entry);
167 * duplicate a worklist and directly activates it
169 static void fill_and_activate_worklist(worklist_t *new_worklist,
170 const worklist_t *worklist, ir_node *block, ir_node *succ_block,
173 ir_node *reload_point = NULL;
174 size_t n_live_values = 0;
175 worklist_entry_t *entry;
177 if (succ_block != NULL &&
178 (get_Block_n_cfgpreds(succ_block) > 1
179 || get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) > 1)) {
180 reload_point = be_get_end_of_block_insertion_point(block);
183 foreach_worklist(entry, worklist) {
184 ir_node *value = entry->value;
185 worklist_entry_t *new_entry;
187 if (new_worklist->n_live_values >= n_regs)
190 if (is_Phi(value) && get_nodes_block(value) == succ_block) {
191 value = get_Phi_pred(value, succ_pos);
193 /* can happen for unknown phi preds */
194 if (!arch_irn_consider_in_reg_alloc(cls, value))
198 if (irn_visited_else_mark(value))
201 new_entry = OALLOCZ(&obst, worklist_entry_t);
203 new_entry->value = value;
204 if (reload_point != NULL) {
205 new_entry->reload_point = reload_point;
207 new_entry->reload_point = entry->reload_point;
210 list_add_tail(&new_entry->head, &new_worklist->live_values);
213 set_irn_link(value, new_entry);
214 new_worklist->n_live_values++;
218 static worklist_t *duplicate_worklist(const worklist_t *worklist)
220 worklist_t *new_worklist;
221 struct list_head *entry;
223 new_worklist = OALLOCZ(&obst, worklist_t);
224 INIT_LIST_HEAD(&new_worklist->live_values);
225 new_worklist->n_live_values = worklist->n_live_values;
227 list_for_each(entry, &worklist->live_values) {
228 worklist_entry_t *wl_entry = list_entry(entry, worklist_entry_t, head);
229 worklist_entry_t *new_entry = OALLOC(&obst, worklist_entry_t);
231 memcpy(new_entry, wl_entry, sizeof(new_entry[0]));
232 list_add_tail(&new_entry->head, &new_worklist->live_values);
239 static void print_worklist(const worklist_t *worklist, int level)
241 struct list_head *entry;
243 DB((dbg, level, "%d values: ", worklist->n_live_values));
244 list_for_each(entry, &worklist->live_values) {
245 worklist_entry_t *wl_entry
246 = list_entry(entry, worklist_entry_t, head);
248 DB((dbg, level, "%+F ", wl_entry->value));
255 * Clear the link fields of a loop and all
258 * @param loop the root loop
260 static void clear_loop_info(ir_loop *loop)
262 int n_elements = get_loop_n_elements(loop);
266 for (i = 0; i < n_elements; ++i) {
267 loop_element element = get_loop_element(loop, i);
268 if (*element.kind != k_ir_loop)
271 clear_loop_info(element.son);
275 static loop_info_t *get_loop_info(ir_loop *loop)
277 loop_info_t *info = loop->link;
281 info = OALLOCZ(&obst, loop_info_t);
288 * constructs loop in+out edges when called in a block walker
290 static void construct_loop_edges(ir_node *block, void *data)
292 int n_cfgpreds = get_Block_n_cfgpreds(block);
293 ir_loop *loop = get_irn_loop(block);
298 for (i = 0; i < n_cfgpreds; ++i) {
299 ir_node *cfgpred_block = get_Block_cfgpred_block(block, i);
300 ir_loop *cfgpred_loop = get_irn_loop(cfgpred_block);
305 if (cfgpred_loop == loop)
308 /* critical edges are splitted, so we can't jump from 1 loop
309 * directly into another without going out of it */
310 assert(get_loop_depth(cfgpred_loop) != get_loop_depth(loop));
312 /* edge out of loop */
313 if (get_loop_depth(cfgpred_loop) > get_loop_depth(loop)) {
318 is_exit_edge = false;
323 /* this might be a jump out/in multiple loops */
325 loop_info_t *l_info = get_loop_info(l);
327 edge = OALLOCZ(&obst, loop_edge_t);
332 edge->next = l_info->exit_edges;
333 l_info->exit_edges = edge;
334 assert(get_loop_depth(loop) < get_loop_depth(l));
336 edge->next = l_info->entry_edges;
337 l_info->entry_edges = edge;
338 assert(get_loop_depth(cfgpred_loop) < get_loop_depth(l));
340 l = get_loop_outer_loop(l);
348 static void place_reload(worklist_entry_t *entry)
353 DB((dbg, LEVEL_1, "reload of %+F before %+F\n", entry->value,
354 entry->reload_point));
355 assert(entry->reload_point != NULL);
356 be_add_reload(senv, entry->value, entry->reload_point, cls, 1);
357 entry->reload_point = NULL;
361 * makes sure the worklist contains not more than n_regs - room_needed entries
363 static void make_room(worklist_t *worklist, size_t room_needed)
367 struct list_head *entry;
369 spills_needed = worklist->n_live_values + room_needed - n_regs;
370 if (spills_needed <= 0)
373 entry = worklist->live_values.next;
374 for (i = spills_needed; i > 0; --i) {
375 struct list_head *next = entry->next;
376 worklist_entry_t *wl_entry
377 = list_entry(entry, worklist_entry_t, head);
379 assert(worklist_contains(wl_entry->value));
380 mark_irn_not_visited(wl_entry->value);
381 place_reload(wl_entry);
386 worklist->n_live_values -= spills_needed;
390 * a value was used, so bring it to the back of the worklist (which might
391 * result in a spill of another value).
393 static void val_used(worklist_t *worklist, ir_node *value, ir_node *sched_point)
395 /* already in the worklist? move around, otherwise add at back */
396 worklist_entry_t *entry = get_irn_link(value);
398 assert(arch_irn_consider_in_reg_alloc(cls, value));
400 if (worklist_contains(value)) {
401 assert(entry != NULL);
403 list_del(&entry->head);
406 entry = OALLOCZ(&obst, worklist_entry_t);
407 entry->value = value;
408 set_irn_link(value, entry);
411 ++worklist->n_live_values;
412 mark_irn_visited(value);
415 entry->reload_point = sched_point;
416 list_add_tail(&entry->head, &worklist->live_values);
419 static void worklist_remove(worklist_t *worklist, ir_node *value)
421 worklist_entry_t *entry = get_irn_link(value);
422 assert(entry != NULL);
423 list_del(&entry->head);
424 --worklist->n_live_values;
426 assert(worklist_contains(value));
427 mark_irn_not_visited(value);
430 static void update_max_pressure(worklist_t *worklist)
432 if (worklist->n_live_values > max_register_pressure)
433 max_register_pressure = worklist->n_live_values;
436 static void do_spilling(ir_node *block, worklist_t *worklist)
440 assert(worklist != NULL);
442 sched_foreach_reverse(block, node) {
446 DB((dbg, LEVEL_2, "\t%+F... ", node));
447 update_max_pressure(worklist);
451 /* TODO: if we have some free registers, then we could decide to
452 * not spill some phis (but not for phis where at least 1 input is
455 /* we have to spill all phis that are not live */
456 sched_foreach_reverse_from(node, node2) {
457 assert(is_Phi(node2));
459 if (worklist_contains(node2))
461 if (!arch_irn_consider_in_reg_alloc(cls, node2))
465 be_spill_phi(senv, node2);
467 DB((dbg, LEVEL_2, "\n"));
471 /* remove values defined by this instruction from the workset. Values
472 * defined but not in the workset need free registers */
473 if (get_irn_mode(node) == mode_T) {
474 const ir_edge_t *edge;
476 foreach_out_edge(node, edge) {
477 ir_node *proj = get_edge_src_irn(edge);
478 if (!arch_irn_consider_in_reg_alloc(cls, proj))
480 if (worklist_contains(proj)) {
481 worklist_remove(worklist, proj);
486 } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
487 if (worklist_contains(node)) {
488 worklist_remove(worklist, node);
494 /* make sure we have enough free registers for the spills */
495 make_room(worklist, n_defs);
497 /* put all values used by the instruction into the workset */
498 arity = get_irn_arity(node);
499 for (i = 0; i < arity; ++i) {
500 ir_node *use = get_irn_n(node, i);
502 if (!arch_irn_consider_in_reg_alloc(cls, use))
505 val_used(worklist, use, node);
508 /* we might have too many values in the worklist now and need to spill
510 make_room(worklist, 0);
513 print_worklist(worklist, LEVEL_2);
514 DB((dbg, LEVEL_2, "\n"));
518 update_max_pressure(worklist);
521 DB((dbg, LEVEL_1, "worklist at begin of %+F:", block));
522 print_worklist(worklist, LEVEL_1);
523 DB((dbg, LEVEL_1, "\n"));
527 static bool worklists_equal(const worklist_t *wl1, const worklist_t *wl2)
529 const struct list_head *i1 = &wl1->live_values;
530 const struct list_head *i2 = &wl2->live_values;
532 for ( ; i1 != &wl1->live_values && i2 != &wl2->live_values;
533 i1 = i1->next, i2 = i2->next) {
534 worklist_entry_t *entry1 = list_entry(i1, worklist_entry_t, head);
535 worklist_entry_t *entry2 = list_entry(i2, worklist_entry_t, head);
537 if (entry1->value != entry2->value)
540 /* both lists at the end */
541 if (i1 != &wl1->live_values || i2 != &wl2->live_values)
547 static bool fill_start_worklist(worklist_t *new_worklist, ir_node *block)
549 double best_execfreq = -1;
550 worklist_t *best_worklist = NULL;
551 ir_node *best_succ_block = NULL;
553 const ir_edge_t *edge;
555 /* construct worklist */
556 foreach_block_succ(block, edge) {
557 ir_node *succ_block = get_edge_src_irn(edge);
558 double execfreq = get_block_execfreq(exec_freq, succ_block);
559 block_info_t *block_info;
560 worklist_t *succ_worklist;
562 if (execfreq < best_execfreq)
565 block_info = get_block_info(succ_block);
566 succ_worklist = block_info->start_worklist;
568 if (succ_worklist == NULL || succ_worklist->visited >= worklist_visited)
571 best_execfreq = execfreq;
572 best_worklist = succ_worklist;
573 best_succ_block = succ_block;
574 best_pos = get_edge_src_pos(edge);
577 if (best_worklist == NULL)
579 best_worklist->visited = worklist_visited;
581 fill_and_activate_worklist(new_worklist, best_worklist, block,
582 best_succ_block, best_pos);
586 static worklist_t *construct_start_worklist(ir_node *block)
588 worklist_t *worklist = new_worklist();
592 while (fill_start_worklist(worklist, block)) {
593 if (worklist->n_live_values >= n_regs)
600 static void process_block(ir_node *block, void *env)
602 block_info_t *block_info;
603 worklist_t *worklist;
607 DB((dbg, LEVEL_1, "Processing %+F\n", block));
609 worklist = construct_start_worklist(block);
610 block_info = get_block_info(block);
613 DB((dbg, LEVEL_1, "worklist at end of %+F:", block));
614 print_worklist(worklist, LEVEL_1);
615 DB((dbg, LEVEL_1, "\n"));
617 if (should_have_reached_fixpoint) {
618 assert(worklists_equal(block_info->end_worklist, worklist));
621 block_info->end_worklist = duplicate_worklist(worklist);
623 do_spilling(block, worklist);
624 deactivate_worklist(worklist);
627 if (should_have_reached_fixpoint) {
628 assert(worklists_equal(block_info->start_worklist, worklist));
631 block_info->start_worklist = worklist;
633 /* we shouldn't have any live values left at the start block */
634 n_preds = get_Block_n_cfgpreds(block);
635 //assert(n_preds != 0 || worklist->n_live_values == 0);
638 typedef union block_or_loop_t block_or_loop_t;
639 union block_or_loop_t {
644 static block_or_loop_t *loop_blocks;
645 static ir_loop *current_loop;
647 static void find_blocks(ir_node *block);
649 static void find_in_loop(ir_loop *loop, ir_node *entry)
651 loop_info_t *loop_info = get_loop_info(loop);
652 block_or_loop_t block_or_loop;
655 /* simply mark 1 block in the loop to indicate that the loop was already
657 ir_node *some_block = loop_info->entry_edges->block;
658 if (Block_block_visited(some_block))
661 block_or_loop.loop = loop;
662 ARR_APP1(block_or_loop_t, loop_blocks, block_or_loop);
666 /* we should be 1 of the entry blocks */
667 loop_edge_t *edge = loop_info->entry_edges;
669 for ( ; edge != NULL; edge = edge->next) {
670 if (edge->block == entry) {
681 /* check all loop successors */
682 for (edge = loop_info->exit_edges; edge != NULL; edge = edge->next) {
683 ir_node *succ = edge->block;
684 ir_loop *succ_loop = get_irn_loop(succ);
686 if (succ_loop == current_loop) {
689 assert(get_loop_depth(succ_loop) < get_loop_depth(current_loop));
694 static void find_blocks(ir_node *block)
696 const ir_edge_t *edge;
697 block_or_loop_t block_or_loop;
699 if (Block_block_visited(block))
702 block_or_loop.block = block;
704 ARR_APP1(block_or_loop_t, loop_blocks, block_or_loop);
705 mark_Block_block_visited(block);
707 foreach_block_succ(block, edge) {
708 ir_node *succ = get_edge_src_irn(edge);
710 /* is it part of the current loop? */
711 ir_loop *loop = get_irn_loop(succ);
712 if (loop != current_loop) {
714 if (get_loop_depth(loop) > get_loop_depth(current_loop)) {
715 find_in_loop(loop, succ);
717 /* parent loop: we're not interested in the block */
726 * append an entry to a worklist. WARNING: The entry must not already be in the
729 static void worklist_append(worklist_t *worklist, ir_node *value,
730 ir_node *reload_point,
731 ir_loop *unused_livethrough_loop)
733 worklist_entry_t *entry;
735 #ifdef EXPENSIVE_CHECKS
736 foreach_worklist(entry, worklist) {
737 assert(entry->value != value);
741 entry = OALLOCZ(&obst, worklist_entry_t);
742 entry->value = value;
743 entry->reload_point = reload_point;
744 entry->unused_livethrough_loop = unused_livethrough_loop;
745 list_add_tail(&entry->head, &worklist->live_values);
746 ++worklist->n_live_values;
747 assert(worklist->n_live_values <= n_regs);
750 static void push_unused_livethrough(loop_info_t *loop_info, ir_node *value)
755 /* add the value to all loop exit and entry blocks */
756 for (edge = loop_info->exit_edges; edge != NULL; edge = edge->next) {
758 = get_Block_cfgpred_block(edge->block, edge->pos);
759 const block_info_t *info = get_block_info(block);
760 worklist_t *worklist = info->end_worklist;
761 ir_node *reload_point = NULL;
763 if (worklist->visited >= worklist_visited)
765 worklist->visited = worklist_visited;
767 /* TODO: we need a smarter mechanism here, that makes the reloader place
768 * reload nodes on all loop exits... */
770 worklist_append(worklist, value, reload_point, loop_info->loop);
772 edge = loop_info->entry_edges;
773 for ( ; edge != NULL; edge = edge->next) {
774 ir_node *entry_block = edge->block;
775 const block_info_t *info = get_block_info(entry_block);
776 worklist_t *worklist = info->start_worklist;
778 ir_node *reload_point;
780 if (worklist->visited >= worklist_visited)
782 worklist->visited = worklist_visited;
784 pred_block = get_Block_cfgpred_block(entry_block, edge->pos);
785 reload_point = be_get_end_of_block_insertion_point(pred_block);
787 worklist_append(worklist, value, reload_point, loop_info->loop);
790 set_irn_link(value, NULL);
791 ++loop_info->max_register_pressure;
794 static void push_unused_livethroughs(loop_info_t *loop_info)
798 /* we can only push unused livethroughs if register pressure inside the loop
800 if (loop_info->max_register_pressure >= n_regs)
803 /* find unused livethroughs: register pressure in the loop was low enough
804 * which means that we had no spills which implies that at every point in
806 for (edge = loop_info->exit_edges; edge != NULL; edge = edge->next) {
807 ir_node *block = edge->block;
808 const block_info_t *info = get_block_info(block);
809 worklist_t *start_worklist = info->start_worklist;
811 const block_info_t *exit_info;
812 worklist_t *end_worklist;
813 struct list_head *entry;
815 if (start_worklist == NULL)
818 exit_block = get_Block_cfgpred_block(edge->block, edge->pos);
819 exit_info = get_block_info(exit_block);
820 end_worklist = exit_info->end_worklist;
822 activate_worklist(end_worklist);
823 /* all values contained in the start_worklist, which are not available
824 * in the end_worklist, must be unused livethroughs */
826 list_for_each(entry, &start_worklist->live_values) {
827 worklist_entry_t *wl_entry
828 = list_entry(entry, worklist_entry_t, head);
829 ir_node *value = wl_entry->value;
831 if (loop_info->max_register_pressure >= n_regs)
835 if (!worklist_contains(value)) {
836 /* add it to all loop exits */
837 DB((dbg, LEVEL_2, "Found unused livethrough: %+F (regpressure in loop %d)\n", value, loop_info->max_register_pressure));
838 push_unused_livethrough(loop_info, value);
839 /* the value should now be part of end_worklist, so mark it */
840 mark_irn_visited(value);
844 deactivate_worklist(end_worklist);
848 static void process_block_or_loop(const block_or_loop_t block_or_loop)
850 if (block_or_loop.loop->kind == k_ir_loop) {
851 loop_info_t *loop_info = get_loop_info(block_or_loop.loop);
853 if (do_push_unused_livethroughs)
854 push_unused_livethroughs(loop_info);
856 if (loop_info->max_register_pressure > max_register_pressure)
857 max_register_pressure = loop_info->max_register_pressure;
861 process_block(block_or_loop.block, NULL);
864 static void process_loop(ir_loop *loop)
866 int n_elements = get_loop_n_elements(loop);
868 loop_info_t *loop_info;
872 /* first handle all sub-loops */
873 for (i = 0; i < n_elements; ++i) {
874 loop_element element = get_loop_element(loop, i);
875 if (*element.kind != k_ir_loop)
878 process_loop(element.son);
881 /* create a postorder of the blocks */
882 loop_info = get_loop_info(loop);
883 edge = loop_info->entry_edges;
885 some_block = edge->block;
887 assert(loop == get_irg_loop(current_ir_graph));
888 some_block = get_irg_start_block(current_ir_graph);
891 loop_blocks = NEW_ARR_F(block_or_loop_t, 0);
894 ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
895 inc_irg_block_visited(current_ir_graph);
896 find_blocks(some_block);
897 /* for endless loops the end-block might be unreachable */
898 if (loop == get_irg_loop(current_ir_graph)) {
899 find_blocks(get_irg_end_block(current_ir_graph));
901 ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
903 DB((dbg, LEVEL_3, "Block List for loop %p\n", loop));
904 len = ARR_LEN(loop_blocks);
905 for (i = 0; i < len; ++i) {
906 block_or_loop_t block_or_loop = loop_blocks[i];
907 if (block_or_loop.loop->kind == k_ir_loop) {
908 DB((dbg, LEVEL_3, " L-%p", block_or_loop.loop));
910 DB((dbg, LEVEL_3, " B-%+F", block_or_loop.block));
913 DB((dbg, LEVEL_3, "\n"));
915 max_register_pressure = 0;
917 /* run1: tentative phase */
918 tentative_mode = true;
919 for (i = len-1; i >= 0; --i) {
920 process_block_or_loop(loop_blocks[i]);
923 /* run2: tentative phase - should reach fixpoint */
924 tentative_mode = true;
925 for (i = len-1; i >= 0; --i) {
926 process_block_or_loop(loop_blocks[i]);
930 /* run3: tentative phase - check fixpoint */
931 tentative_mode = true;
932 should_have_reached_fixpoint = true;
933 for (i = len-1; i >= 0; --i) {
934 process_block_or_loop(loop_blocks[i]);
936 should_have_reached_fixpoint = false;
939 /* run4: add spills/reloads */
940 tentative_mode = false;
941 do_push_unused_livethroughs = true;
942 for (i = len-1; i >= 0; --i) {
943 process_block_or_loop(loop_blocks[i]);
945 do_push_unused_livethroughs = false;
947 loop_info->max_register_pressure = max_register_pressure;
948 DB((dbg, LEVEL_2, "Regpressure in loop %p: %u\n", loop,
949 (unsigned) max_register_pressure));
951 DEL_ARR_F(loop_blocks);
954 static void fix_block_borders(ir_node *block, void *data)
956 block_info_t *block_info = get_block_info(block);
957 worklist_t *start_worklist = block_info->start_worklist;
958 int n_cfgpreds = get_Block_n_cfgpreds(block);
959 ir_loop *loop = get_irn_loop(block);
963 assert(start_worklist != NULL);
965 for (i = 0; i < n_cfgpreds; ++i) {
966 ir_node *pred_block = get_Block_cfgpred_block(block, i);
967 block_info_t *pred_block_info = get_block_info(pred_block);
968 worklist_t *end_worklist = pred_block_info->end_worklist;
969 ir_loop *pred_loop = get_irn_loop(pred_block);
970 bool is_loop_entry = false;
971 worklist_entry_t *entry;
973 assert(end_worklist != NULL);
975 if (get_loop_depth(pred_loop) < get_loop_depth(loop)) {
976 is_loop_entry = true;
979 /* reload missing values */
980 activate_worklist(end_worklist);
982 foreach_worklist(entry, start_worklist) {
983 ir_node *value = entry->value;
985 if (!is_loop_entry && entry->unused_livethrough_loop != NULL)
988 if (is_Phi(value) && get_nodes_block(value) == block) {
989 value = get_irn_n(value, i);
991 /* we might have unknowns as argument for the phi */
992 if (!arch_irn_consider_in_reg_alloc(cls, value))
995 if (worklist_contains(value))
997 be_add_reload_on_edge(senv, value, block, i, cls, 1);
1000 deactivate_worklist(end_worklist);
1005 * Run the spill algorithm.
1007 * @param birg the graph to run on
1008 * @param ncls the register class to run on
1010 static void be_spill_belady3(be_irg_t *birg, const arch_register_class_t *ncls)
1012 ir_graph *irg = be_get_birg_irg(birg);
1015 n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
1017 /* shortcut for register classes with ignore regs only */
1021 worklist_visited = 0;
1022 exec_freq = be_get_birg_exec_freq(birg);
1024 be_clear_links(irg);
1025 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK
1026 | IR_RESOURCE_LOOP_LINK);
1027 inc_irg_visited(irg);
1029 obstack_init(&obst);
1030 senv = be_new_spill_env(birg);
1032 assure_cf_loop(irg);
1033 clear_loop_info(get_irg_loop(irg));
1034 irg_block_walk_graph(irg, construct_loop_edges, NULL, NULL);
1036 process_loop(get_irg_loop(irg));
1038 irg_block_walk_graph(irg, fix_block_borders, NULL, NULL);
1040 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK
1041 | IR_RESOURCE_LOOP_LINK);
1043 be_insert_spills_reloads(senv);
1045 obstack_free(&obst, NULL);
1048 be_delete_spill_env(senv);
1051 void be_init_spillbelady3(void)
1053 static be_spiller_t belady3_spiller = {
1057 be_register_spiller("belady3", &belady3_spiller);
1058 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady3");
1061 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady3);