2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief MIN-algorithm with some twists (aka belady spiller v3)
23 * @author Matthias Braun
28 * - handle phis correctly, decide wether we should spill them ~ok
29 * - merge multiple start worksets of blocks ~ok
30 * - how to and when to do the tentative phase...
44 #include "iredges_t.h"
52 #include "bespilloptions.h"
53 #include "besched_t.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 typedef struct loop_edge_t loop_edge_t;
65 typedef struct loop_info_t loop_info_t;
67 loop_edge_t *entry_edges;
68 loop_edge_t *exit_edges;
69 size_t max_register_pressure;
72 typedef struct worklist_entry_t worklist_entry_t;
73 struct worklist_entry_t {
74 struct list_head head;
76 ir_node *reload_point;
77 worklist_entry_t *next_reload;
80 typedef struct worklist_t worklist_t;
82 struct list_head live_values;
86 typedef struct block_info_t block_info_t;
88 worklist_t *start_worklist;
89 worklist_t *end_worklist;
92 static const arch_env_t *arch_env;
93 static const arch_register_class_t *cls;
94 static struct obstack obst;
95 static spill_env_t *senv;
97 static size_t max_register_pressure;
98 static bool tentative_mode;
99 static bool should_have_reached_fixpoint;
100 static ir_exec_freq *exec_freq;
102 static worklist_t *new_worklist(void)
104 worklist_t *worklist = obstack_alloc(&obst, sizeof(worklist[0]));
106 INIT_LIST_HEAD(&worklist->live_values);
107 worklist->n_live_values = 0;
112 static void mark_irn_not_visited(ir_node *node)
114 set_irn_visited(node, get_irg_visited(current_ir_graph) - 1);
117 static bool worklist_contains(const ir_node *node)
119 return irn_visited(node);
122 static block_info_t *get_block_info(ir_node *block)
124 block_info_t *info = get_irn_link(block);
128 info = obstack_alloc(&obst, sizeof(info[0]));
129 memset(info, 0, sizeof(info[0]));
130 set_irn_link(block, info);
134 static void deactivate_worklist(const worklist_t *worklist)
136 struct list_head *entry;
138 list_for_each(entry, &worklist->live_values) {
139 worklist_entry_t *wl_entry
140 = list_entry(entry, worklist_entry_t, head);
141 assert(worklist_contains(wl_entry->value));
142 mark_irn_not_visited(wl_entry->value);
143 set_irn_link(wl_entry->value, NULL);
147 static void activate_worklist(const worklist_t *worklist)
149 struct list_head *entry;
151 list_for_each(entry, &worklist->live_values) {
152 worklist_entry_t *wl_entry
153 = list_entry(entry, worklist_entry_t, head);
154 assert(!worklist_contains(wl_entry->value));
155 mark_irn_visited(wl_entry->value);
156 set_irn_link(wl_entry->value, wl_entry);
161 * duplicate a worklist and directly activates it
163 static worklist_t *duplicate_activate_worklist(const worklist_t *worklist,
164 ir_node *block, ir_node *succ_block, int succ_pos)
166 ir_node *reload_point = NULL;
167 size_t n_live_values = 0;
168 worklist_t *new_worklist;
169 struct list_head *entry;
171 if (succ_block != NULL &&
172 (get_Block_n_cfgpreds(succ_block) > 1
173 || get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) > 1)) {
174 reload_point = be_get_end_of_block_insertion_point(block);
177 new_worklist = obstack_alloc(&obst, sizeof(new_worklist[0]));
178 INIT_LIST_HEAD(&new_worklist->live_values);
180 list_for_each(entry, &worklist->live_values) {
181 worklist_entry_t *wl_entry = list_entry(entry, worklist_entry_t, head);
182 ir_node *value = wl_entry->value;
184 if (is_Phi(value) && get_nodes_block(value) == succ_block) {
185 value = get_Phi_pred(value, succ_pos);
187 /* can happen for unknown phi preds */
188 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, value))
192 worklist_entry_t *new_entry
193 = obstack_alloc(&obst, sizeof(new_entry[0]));
194 new_entry->value = value;
195 if (reload_point != NULL) {
196 new_entry->reload_point = reload_point;
198 new_entry->reload_point = wl_entry->reload_point;
200 new_entry->next_reload = NULL;
202 if (irn_not_visited(value)) {
203 list_add_tail(&new_entry->head, &new_worklist->live_values);
206 mark_irn_visited(value);
207 set_irn_link(value, new_entry);
211 /* the only way we might visit a value again, is when we get it as
213 assert(is_Phi(wl_entry->value)
214 && get_nodes_block(wl_entry->value) == succ_block);
218 new_worklist->n_live_values = n_live_values;
223 static worklist_t *duplicate_worklist(const worklist_t *worklist)
225 worklist_t *new_worklist;
226 struct list_head *entry;
228 new_worklist = obstack_alloc(&obst, sizeof(new_worklist[0]));
229 INIT_LIST_HEAD(&new_worklist->live_values);
230 new_worklist->n_live_values = worklist->n_live_values;
232 list_for_each(entry, &worklist->live_values) {
233 worklist_entry_t *wl_entry = list_entry(entry, worklist_entry_t, head);
234 worklist_entry_t *new_entry
235 = obstack_alloc(&obst, sizeof(new_entry[0]));
237 memcpy(new_entry, wl_entry, sizeof(new_entry[0]));
238 list_add_tail(&new_entry->head, &new_worklist->live_values);
245 static void print_worklist(const worklist_t *worklist, int level)
247 struct list_head *entry;
249 DB((dbg, level, "%d values: ", worklist->n_live_values));
250 list_for_each(entry, &worklist->live_values) {
251 worklist_entry_t *wl_entry
252 = list_entry(entry, worklist_entry_t, head);
254 DB((dbg, level, "%+F ", wl_entry->value));
261 static void clear_loop_info(ir_loop *loop)
263 int n_elements = get_loop_n_elements(loop);
267 for (i = 0; i < n_elements; ++i) {
268 loop_element element = get_loop_element(loop, i);
269 if (*element.kind != k_ir_loop)
272 clear_loop_info(element.son);
276 static loop_info_t *get_loop_info(ir_loop *loop)
278 loop_info_t *info = loop->link;
282 info = obstack_alloc(&obst, sizeof(info[0]));
283 memset(info, 0, sizeof(info[0]));
289 * constructs loop in+out edges when called in a block walker
291 static void construct_loop_edges(ir_node *block, void *data)
293 int n_cfgpreds = get_Block_n_cfgpreds(block);
294 ir_loop *loop = get_irn_loop(block);
299 for (i = 0; i < n_cfgpreds; ++i) {
300 ir_node *cfgpred_block = get_Block_cfgpred_block(block, i);
301 ir_loop *cfgpred_loop = get_irn_loop(cfgpred_block);
304 if (cfgpred_loop == loop)
307 /* critical edges are splitted, so we can't jump from 1 loop
308 * directly into another without going out of it */
309 assert(get_loop_depth(cfgpred_loop) != get_loop_depth(loop));
311 /* edge out of loop */
314 if (get_loop_depth(cfgpred_loop) > get_loop_depth(loop)) {
319 is_exit_edge = false;
324 /* this might be a jump out/in multiple loops */
326 loop_info_t *l_info = get_loop_info(l);
328 edge = obstack_alloc(&obst, sizeof(edge[0]));
329 memset(edge, 0, sizeof(edge[0]));
334 ir_fprintf(stderr, "Loop %p exit edge %+F:%d\n",
336 edge->next = l_info->exit_edges;
337 l_info->exit_edges = edge;
338 assert(get_loop_depth(loop) < get_loop_depth(l));
340 ir_fprintf(stderr, "Loop %p entry edge %+F:%d\n",
342 edge->next = l_info->entry_edges;
343 l_info->entry_edges = edge;
344 assert(get_loop_depth(cfgpred_loop) < get_loop_depth(l));
346 l = get_loop_outer_loop(l);
354 static void place_reload(worklist_entry_t *entry)
359 /* walk list of reload points */
361 DB((dbg, LEVEL_1, "reload of %+F before %+F\n", entry->value,
362 entry->reload_point));
363 assert(entry->reload_point != NULL);
364 be_add_reload(senv, entry->value, entry->reload_point, cls, 1);
365 entry->reload_point = NULL;
367 entry = entry->next_reload;
368 } while(entry != NULL);
373 * does stuff required for non-active worklists: spills all values not live
374 * in the active worklist; links live values to the current worklist
376 static void process_non_active_worklist(const worklist_t *worklist,
377 worklist_t *current_worklist)
379 struct list_head *entry;
381 list_for_each(entry, &worklist->live_values) {
382 worklist_entry_t *wl_entry = list_entry(entry, worklist_entry_t, head);
383 ir_node *value = wl_entry->value;
385 if (!worklist_contains(value)) {
386 /* if we still have room in the worklist, then we can simply take
388 if (current_worklist->n_live_values < n_regs) {
389 worklist_entry_t *new_entry
390 = obstack_alloc(&obst, sizeof(new_entry[0]));
392 new_entry->value = value;
393 new_entry->reload_point = wl_entry->reload_point;
394 new_entry->next_reload = wl_entry->next_reload;
395 set_irn_link(value, new_entry);
396 mark_irn_visited(value);
397 list_add(&new_entry->head, ¤t_worklist->live_values);
398 ++current_worklist->n_live_values;
400 /* value is not in current worklist, place reloads */
401 place_reload(wl_entry);
404 /* value is in current worklist, link it with the reload point
406 worklist_entry_t *active_entry = get_irn_link(value);
407 wl_entry->next_reload = active_entry->next_reload;
408 active_entry->next_reload = wl_entry;
415 * makes sure the worklist contains not more than n_regs - room_needed entries
417 static void make_room(worklist_t *worklist, size_t room_needed)
421 struct list_head *entry;
423 spills_needed = worklist->n_live_values + room_needed - n_regs;
424 if (spills_needed <= 0)
427 entry = worklist->live_values.next;
428 for(i = spills_needed; i > 0; --i) {
429 struct list_head *next = entry->next;
430 worklist_entry_t *wl_entry
431 = list_entry(entry, worklist_entry_t, head);
433 assert(worklist_contains(wl_entry->value));
434 mark_irn_not_visited(wl_entry->value);
435 place_reload(wl_entry);
440 worklist->n_live_values -= spills_needed;
444 * a value was used, so bring it to the back of the worklist (which might
445 * result in a spill of another value).
447 static void val_used(worklist_t *worklist, ir_node *value, ir_node *sched_point)
449 assert(arch_irn_consider_in_reg_alloc(arch_env, cls, value));
451 /* already in the worklist? move around, otherwise add at back */
452 worklist_entry_t *entry = get_irn_link(value);
453 if (worklist_contains(value)) {
454 assert(entry != NULL);
456 list_del(&entry->head);
459 entry = obstack_alloc(&obst, sizeof(entry[0]));
460 entry->value = value;
461 set_irn_link(value, entry);
464 ++worklist->n_live_values;
465 mark_irn_visited(value);
468 entry->reload_point = sched_point;
469 entry->next_reload = NULL;
470 list_add_tail(&entry->head, &worklist->live_values);
473 static void worklist_remove(worklist_t *worklist, ir_node *value)
475 worklist_entry_t *entry = get_irn_link(value);
476 assert(entry != NULL);
477 list_del(&entry->head);
478 --worklist->n_live_values;
480 assert(worklist_contains(value));
481 mark_irn_not_visited(value);
484 static void update_max_pressure(worklist_t *worklist)
486 if (worklist->n_live_values > max_register_pressure)
487 max_register_pressure = worklist->n_live_values;
490 static void do_spilling(ir_node *block, worklist_t *worklist)
494 assert(worklist != NULL);
496 sched_foreach_reverse(block, node) {
500 DB((dbg, LEVEL_2, "\t%+F... ", node));
501 update_max_pressure(worklist);
505 /* TODO: if we have some free registers, then we could decide to
506 * not spill some phis (but not for phis where at least 1 input is
509 /* we have to spill all phis that are not live */
510 sched_foreach_reverse_from(node, node2) {
511 assert(is_Phi(node2));
513 if (worklist_contains(node2))
515 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node2))
519 be_spill_phi(senv, node2);
521 DB((dbg, LEVEL_2, "\n"));
525 /* remove values defined by this instruction from the workset. Values
526 * defined but not in the workset need free registers */
527 if (get_irn_mode(node) == mode_T) {
528 const ir_edge_t *edge;
530 foreach_out_edge(node, edge) {
531 ir_node *proj = get_edge_src_irn(edge);
532 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, proj))
534 if (worklist_contains(proj)) {
535 worklist_remove(worklist, proj);
540 } else if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
541 if (worklist_contains(node)) {
542 worklist_remove(worklist, node);
548 /* make sure we have enough free registers for the spills */
549 make_room(worklist, n_defs);
551 /* put all values used by the instruction into the workset */
552 arity = get_irn_arity(node);
553 for(i = 0; i < arity; ++i) {
554 ir_node *use = get_irn_n(node, i);
556 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, use))
559 val_used(worklist, use, node);
562 /* we might have too many values in the worklist now and need to spill
564 make_room(worklist, 0);
567 print_worklist(worklist, LEVEL_2);
568 DB((dbg, LEVEL_2, "\n"));
572 update_max_pressure(worklist);
575 DB((dbg, LEVEL_1, "worklist at begin of %+F:", block));
576 print_worklist(worklist, LEVEL_1);
577 DB((dbg, LEVEL_1, "\n"));
581 static bool worklists_equal(const worklist_t *wl1, const worklist_t *wl2)
583 const struct list_head *i1 = &wl1->live_values;
584 const struct list_head *i2 = &wl2->live_values;
586 for ( ; i1 != &wl1->live_values && i2 != &wl2->live_values;
587 i1 = i1->next, i2 = i2->next) {
588 worklist_entry_t *entry1 = list_entry(i1, worklist_entry_t, head);
589 worklist_entry_t *entry2 = list_entry(i2, worklist_entry_t, head);
591 if (entry1->value != entry2->value)
594 /* both lists at the end */
595 if (i1 != &wl1->live_values || i2 != &wl2->live_values)
601 static void process_block(ir_node *block, void *env)
603 block_info_t *block_info = NULL;
604 worklist_t *worklist = NULL;
605 double best_execfreq = -1;
606 ir_node *best_succ_block = NULL;
609 const ir_edge_t *edge;
612 DB((dbg, LEVEL_1, "Processing %+F\n", block));
614 /* construct worklist */
615 foreach_block_succ(block, edge) {
616 ir_node *succ_block = get_edge_src_irn(edge);
617 double execfreq = get_block_execfreq(exec_freq, succ_block);
619 if (execfreq > best_execfreq) {
620 block_info_t *block_info = get_block_info(succ_block);
621 worklist_t *succ_worklist = block_info->start_worklist;
622 if (succ_worklist != NULL) {
623 best_execfreq = execfreq;
624 worklist = succ_worklist;
625 best_succ_block = succ_block;
626 best_pos = get_edge_src_pos(edge);
630 if (worklist == NULL) {
631 /* at least 1 successor block must have been processed unless it was
633 assert(tentative_mode || block == get_irg_end_block(get_irn_irg(block)));
635 worklist = new_worklist();
637 worklist = duplicate_activate_worklist(worklist, block, best_succ_block,
640 /* handle this in fix_blocks later... */
642 /* now we could have live values in the succ worklists that are not
643 * live anymore in the worklist we picked. We need reloads for them. */
644 foreach_block_succ(block, edge) {
645 ir_node *succ_block = get_edge_src_irn(edge);
646 block_info_t *block_info = get_block_info(succ_block);
647 worklist_t *succ_worklist = block_info->start_worklist;
649 if (succ_block == best_succ_block)
652 process_non_active_worklist(succ_worklist, worklist);
657 block_info = get_block_info(block);
660 DB((dbg, LEVEL_1, "worklist at end of %+F:", block));
661 print_worklist(worklist, LEVEL_1);
662 DB((dbg, LEVEL_1, "\n"));
664 if (should_have_reached_fixpoint) {
665 assert(worklists_equal(block_info->end_worklist, worklist));
668 block_info->end_worklist = duplicate_worklist(worklist);
670 do_spilling(block, worklist);
671 deactivate_worklist(worklist);
674 if (should_have_reached_fixpoint) {
675 assert(worklists_equal(block_info->start_worklist, worklist));
678 block_info->start_worklist = worklist;
680 /* we shouldn't have any live values left at the start block */
681 n_preds = get_Block_n_cfgpreds(block);
682 assert(n_preds != 0 || worklist->n_live_values == 0);
685 typedef struct block_or_loop_t block_or_loop_t;
686 struct block_or_loop_t {
694 static block_or_loop_t *loop_blocks;
695 static ir_loop *current_loop;
697 static void find_blocks(ir_node *block);
699 static void find_in_loop(ir_loop *loop, ir_node *entry)
701 loop_info_t *loop_info = get_loop_info(loop);
703 /* simply mark 1 block in the loop to indicate that the loop was already
705 ir_node *some_block = loop_info->entry_edges->block;
706 if (Block_block_visited(some_block))
709 block_or_loop_t block_or_loop;
710 block_or_loop.v.loop = loop;
711 block_or_loop.is_loop = true;
712 ARR_APP1(block_or_loop_t, loop_blocks, block_or_loop);
716 /* we should be 1 of the entry blocks */
717 loop_edge_t *edge = loop_info->entry_edges;
719 for ( ; edge != NULL; edge = edge->next) {
720 if (edge->block == entry)
726 /* check all loop successors */
727 loop_edge_t *edge = loop_info->exit_edges;
728 for ( ; edge != NULL; edge = edge->next) {
729 ir_node *succ = edge->block;
730 ir_loop *succ_loop = get_irn_loop(succ);
732 if (succ_loop == current_loop) {
735 assert(get_loop_depth(succ_loop) < get_loop_depth(current_loop));
740 static void find_blocks(ir_node *block)
742 const ir_edge_t *edge;
744 if (Block_block_visited(block))
747 block_or_loop_t block_or_loop;
748 block_or_loop.v.block = block;
749 block_or_loop.is_loop = false;
751 ARR_APP1(block_or_loop_t, loop_blocks, block_or_loop);
752 mark_Block_block_visited(block);
754 foreach_block_succ(block, edge) {
755 ir_node *succ = get_edge_src_irn(edge);
757 /* is it part of the current loop? */
758 ir_loop *loop = get_irn_loop(succ);
759 if (loop != current_loop) {
761 if (get_loop_depth(loop) > get_loop_depth(current_loop)) {
762 find_in_loop(loop, succ);
764 /* parent loop: we're not interested in the block */
772 static void process_block_or_loop(const block_or_loop_t *block_or_loop)
774 if (block_or_loop->is_loop) {
775 loop_info_t *loop_info = get_loop_info(block_or_loop->v.loop);
777 if (loop_info->max_register_pressure > max_register_pressure)
778 max_register_pressure = loop_info->max_register_pressure;
780 /* TODO: push unused livethroughs... */
783 process_block(block_or_loop->v.block, NULL);
786 static void process_loop(ir_loop *loop)
788 /* first handle all sub-loops */
789 int n_elements = get_loop_n_elements(loop);
792 for (i = 0; i < n_elements; ++i) {
793 loop_element element = get_loop_element(loop, i);
794 if (*element.kind != k_ir_loop)
797 process_loop(element.son);
800 /* create a postorder of the blocks */
801 loop_info_t *loop_info = get_loop_info(loop);
802 loop_edge_t *edge = loop_info->entry_edges;
805 some_block = edge->block;
807 assert(loop == get_irg_loop(current_ir_graph));
808 some_block = get_irg_start_block(current_ir_graph);
811 loop_blocks = NEW_ARR_F(block_or_loop_t,0);
814 ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
815 inc_irg_block_visited(current_ir_graph);
816 find_blocks(some_block);
817 /* for endless loops the end-block might be unreachable */
818 if (loop == get_irg_loop(current_ir_graph)) {
819 find_blocks(get_irg_end_block(current_ir_graph));
821 ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
823 fprintf(stderr, "Block List for loop %p\n", loop);
824 int len = ARR_LEN(loop_blocks);
825 for (i = 0; i < len; ++i) {
826 block_or_loop_t *block_or_loop = &loop_blocks[i];
827 if (block_or_loop->is_loop) {
828 ir_fprintf(stderr, " L-%p", block_or_loop->v.loop);
830 ir_fprintf(stderr, " B-%+F", block_or_loop->v.block);
833 fprintf(stderr, "\n");
835 max_register_pressure = 0;
837 /* run1: tentative phase */
838 tentative_mode = true;
839 for (i = len-1; i >= 0; --i) {
840 process_block_or_loop(&loop_blocks[i]);
843 /* run2: tentative phase - should reach fixpoint */
844 tentative_mode = true;
845 for (i = len-1; i >= 0; --i) {
846 process_block_or_loop(&loop_blocks[i]);
850 /* run3: tentative phase - check fixpoint */
851 tentative_mode = true;
852 should_have_reached_fixpoint = true;
853 for (i = len-1; i >= 0; --i) {
854 process_block_or_loop(&loop_blocks[i]);
856 should_have_reached_fixpoint = false;
859 /* run4: add spills/reloads */
860 tentative_mode = false;
861 for (i = len-1; i >= 0; --i) {
862 process_block_or_loop(&loop_blocks[i]);
865 loop_info->max_register_pressure = max_register_pressure;
866 ir_fprintf(stderr, "Regpressure in loop %p: %u\n", loop,
867 (unsigned) max_register_pressure);
869 DEL_ARR_F(loop_blocks);
872 static void fix_block_borders(ir_node *block, void *data)
874 block_info_t *block_info = get_block_info(block);
875 worklist_t *start_worklist = block_info->start_worklist;
876 int n_cfgpreds = get_Block_n_cfgpreds(block);
880 assert(start_worklist != NULL);
882 for (i = 0; i < n_cfgpreds; ++i) {
883 ir_node *pred_block = get_Block_cfgpred_block(block, i);
884 block_info_t *pred_block_info = get_block_info(pred_block);
885 worklist_t *end_worklist = pred_block_info->end_worklist;
887 assert(end_worklist != NULL);
889 /* reload missing values */
890 struct list_head *entry;
892 activate_worklist(end_worklist);
894 list_for_each(entry, &start_worklist->live_values) {
895 worklist_entry_t *wl_entry
896 = list_entry(entry, worklist_entry_t, head);
897 ir_node *value = wl_entry->value;
899 if (is_Phi(value) && get_nodes_block(value) == block) {
900 value = get_irn_n(value, i);
902 /* we might have unknowns as argument for the phi */
903 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, value))
907 if (worklist_contains(value))
910 be_add_reload_on_edge(senv, value, block, i, cls, 1);
913 deactivate_worklist(end_worklist);
917 static void be_spill_belady3(be_irg_t *birg, const arch_register_class_t *ncls)
919 ir_graph *irg = be_get_birg_irg(birg);
922 n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
924 /* shortcut for register classes with ignore regs only */
928 arch_env = be_get_birg_arch_env(birg);
929 exec_freq = be_get_birg_exec_freq(birg);
932 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK
933 | IR_RESOURCE_LOOP_LINK);
934 inc_irg_visited(irg);
937 senv = be_new_spill_env(birg);
940 clear_loop_info(get_irg_loop(irg));
941 irg_block_walk_graph(irg, construct_loop_edges, NULL, NULL);
943 process_loop(get_irg_loop(current_ir_graph));
946 /* do a post-order walk over the CFG to make sure we have a maximum number
947 * of preds processed before entering a block */
949 irg_block_edges_walk(get_irg_start_block(irg), NULL, process_block, NULL);
952 irg_block_edges_walk(get_irg_start_block(irg), NULL, process_block, NULL);
955 irg_block_edges_walk(get_irg_start_block(irg), NULL, process_block, NULL);
958 irg_block_walk_graph(irg, fix_block_borders, NULL, NULL);
960 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK
961 | IR_RESOURCE_LOOP_LINK);
963 be_insert_spills_reloads(senv);
965 obstack_free(&obst, NULL);
968 be_delete_spill_env(senv);
971 void be_init_spillbelady3(void)
973 static be_spiller_t belady3_spiller = {
977 be_register_spiller("belady3", &belady3_spiller);
978 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady3");
981 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady3);