2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief MIN-algorithm with some twists (aka belady spiller v3)
23 * @author Matthias Braun
28 * - handle phis correctly, decide whether we should spill them ~ok
29 * - merge multiple start worksets of blocks ~ok
30 * - how to and when to do the tentative phase...
44 #include "iredges_t.h"
52 #include "bespilloptions.h"
53 #include "besched_t.h"
57 #define EXPENSIVE_CHECKS
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 typedef struct loop_edge_t loop_edge_t;
69 typedef struct loop_info_t loop_info_t;
72 loop_edge_t *entry_edges;
73 loop_edge_t *exit_edges;
74 size_t max_register_pressure;
77 typedef struct worklist_entry_t worklist_entry_t;
78 struct worklist_entry_t {
79 struct list_head head;
81 ir_node *reload_point;
82 ir_loop *unused_livethrough_loop;
85 typedef struct worklist_t worklist_t;
87 struct list_head live_values;
92 typedef struct block_info_t block_info_t;
94 worklist_t *start_worklist;
95 worklist_t *end_worklist;
98 static const arch_env_t *arch_env;
99 static const arch_register_class_t *cls;
100 static struct obstack obst;
101 static spill_env_t *senv;
102 static size_t n_regs;
103 static size_t max_register_pressure;
104 static bool tentative_mode;
105 static bool should_have_reached_fixpoint;
106 static bool do_push_unused_livethroughs;
107 static ir_exec_freq *exec_freq;
108 static ir_visited_t worklist_visited;
110 static worklist_t *new_worklist(void)
112 worklist_t *worklist = obstack_alloc(&obst, sizeof(worklist[0]));
113 memset(worklist, 0, sizeof(worklist[0]));
115 INIT_LIST_HEAD(&worklist->live_values);
116 worklist->n_live_values = 0;
121 static void mark_irn_not_visited(ir_node *node)
123 set_irn_visited(node, get_irg_visited(current_ir_graph) - 1);
126 static bool worklist_contains(const ir_node *node)
128 return irn_visited(node);
131 static block_info_t *get_block_info(ir_node *block)
133 block_info_t *info = get_irn_link(block);
137 info = obstack_alloc(&obst, sizeof(info[0]));
138 memset(info, 0, sizeof(info[0]));
139 set_irn_link(block, info);
143 static void deactivate_worklist(const worklist_t *worklist)
145 struct list_head *entry;
147 list_for_each(entry, &worklist->live_values) {
148 worklist_entry_t *wl_entry
149 = list_entry(entry, worklist_entry_t, head);
150 assert(worklist_contains(wl_entry->value));
151 mark_irn_not_visited(wl_entry->value);
152 set_irn_link(wl_entry->value, NULL);
156 static void activate_worklist(const worklist_t *worklist)
158 struct list_head *entry;
160 list_for_each(entry, &worklist->live_values) {
161 worklist_entry_t *wl_entry
162 = list_entry(entry, worklist_entry_t, head);
163 assert(!worklist_contains(wl_entry->value));
164 mark_irn_visited(wl_entry->value);
165 set_irn_link(wl_entry->value, wl_entry);
170 * duplicate a worklist and directly activates it
172 static void fill_and_activate_worklist(worklist_t *new_worklist,
173 const worklist_t *worklist, ir_node *block, ir_node *succ_block,
176 ir_node *reload_point = NULL;
177 size_t n_live_values = 0;
178 struct list_head *entry;
180 if (succ_block != NULL &&
181 (get_Block_n_cfgpreds(succ_block) > 1
182 || get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) > 1)) {
183 reload_point = be_get_end_of_block_insertion_point(block);
186 list_for_each(entry, &worklist->live_values) {
187 worklist_entry_t *wl_entry = list_entry(entry, worklist_entry_t, head);
188 ir_node *value = wl_entry->value;
189 worklist_entry_t *new_entry;
191 if (new_worklist->n_live_values >= n_regs)
194 if (is_Phi(value) && get_nodes_block(value) == succ_block) {
195 value = get_Phi_pred(value, succ_pos);
197 /* can happen for unknown phi preds */
198 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, value))
202 if (irn_visited(value))
205 new_entry = obstack_alloc(&obst, sizeof(new_entry[0]));
206 memset(new_entry, 0, sizeof(new_entry[0]));
208 new_entry->value = value;
209 if (reload_point != NULL) {
210 new_entry->reload_point = reload_point;
212 new_entry->reload_point = wl_entry->reload_point;
215 list_add_tail(&new_entry->head, &new_worklist->live_values);
218 mark_irn_visited(value);
219 set_irn_link(value, new_entry);
220 new_worklist->n_live_values++;
224 static worklist_t *duplicate_worklist(const worklist_t *worklist)
226 worklist_t *new_worklist;
227 struct list_head *entry;
229 new_worklist = obstack_alloc(&obst, sizeof(new_worklist[0]));
230 memset(new_worklist, 0, sizeof(new_worklist[0]));
231 INIT_LIST_HEAD(&new_worklist->live_values);
232 new_worklist->n_live_values = worklist->n_live_values;
234 list_for_each(entry, &worklist->live_values) {
235 worklist_entry_t *wl_entry = list_entry(entry, worklist_entry_t, head);
236 worklist_entry_t *new_entry
237 = obstack_alloc(&obst, sizeof(new_entry[0]));
239 memcpy(new_entry, wl_entry, sizeof(new_entry[0]));
240 list_add_tail(&new_entry->head, &new_worklist->live_values);
247 static void print_worklist(const worklist_t *worklist, int level)
249 struct list_head *entry;
251 DB((dbg, level, "%d values: ", worklist->n_live_values));
252 list_for_each(entry, &worklist->live_values) {
253 worklist_entry_t *wl_entry
254 = list_entry(entry, worklist_entry_t, head);
256 DB((dbg, level, "%+F ", wl_entry->value));
263 static void clear_loop_info(ir_loop *loop)
265 int n_elements = get_loop_n_elements(loop);
269 for (i = 0; i < n_elements; ++i) {
270 loop_element element = get_loop_element(loop, i);
271 if (*element.kind != k_ir_loop)
274 clear_loop_info(element.son);
278 static loop_info_t *get_loop_info(ir_loop *loop)
280 loop_info_t *info = loop->link;
284 info = obstack_alloc(&obst, sizeof(info[0]));
285 memset(info, 0, sizeof(info[0]));
292 * constructs loop in+out edges when called in a block walker
294 static void construct_loop_edges(ir_node *block, void *data)
296 int n_cfgpreds = get_Block_n_cfgpreds(block);
297 ir_loop *loop = get_irn_loop(block);
302 for (i = 0; i < n_cfgpreds; ++i) {
303 ir_node *cfgpred_block = get_Block_cfgpred_block(block, i);
304 ir_loop *cfgpred_loop = get_irn_loop(cfgpred_block);
309 if (cfgpred_loop == loop)
312 /* critical edges are splitted, so we can't jump from 1 loop
313 * directly into another without going out of it */
314 assert(get_loop_depth(cfgpred_loop) != get_loop_depth(loop));
316 /* edge out of loop */
317 if (get_loop_depth(cfgpred_loop) > get_loop_depth(loop)) {
322 is_exit_edge = false;
327 /* this might be a jump out/in multiple loops */
329 loop_info_t *l_info = get_loop_info(l);
331 edge = obstack_alloc(&obst, sizeof(edge[0]));
332 memset(edge, 0, sizeof(edge[0]));
337 edge->next = l_info->exit_edges;
338 l_info->exit_edges = edge;
339 assert(get_loop_depth(loop) < get_loop_depth(l));
341 edge->next = l_info->entry_edges;
342 l_info->entry_edges = edge;
343 assert(get_loop_depth(cfgpred_loop) < get_loop_depth(l));
345 l = get_loop_outer_loop(l);
353 static void place_reload(worklist_entry_t *entry)
358 DB((dbg, LEVEL_1, "reload of %+F before %+F\n", entry->value,
359 entry->reload_point));
360 assert(entry->reload_point != NULL);
361 be_add_reload(senv, entry->value, entry->reload_point, cls, 1);
362 entry->reload_point = NULL;
366 * makes sure the worklist contains not more than n_regs - room_needed entries
368 static void make_room(worklist_t *worklist, size_t room_needed)
372 struct list_head *entry;
374 spills_needed = worklist->n_live_values + room_needed - n_regs;
375 if (spills_needed <= 0)
378 entry = worklist->live_values.next;
379 for(i = spills_needed; i > 0; --i) {
380 struct list_head *next = entry->next;
381 worklist_entry_t *wl_entry
382 = list_entry(entry, worklist_entry_t, head);
384 assert(worklist_contains(wl_entry->value));
385 mark_irn_not_visited(wl_entry->value);
386 place_reload(wl_entry);
391 worklist->n_live_values -= spills_needed;
395 * a value was used, so bring it to the back of the worklist (which might
396 * result in a spill of another value).
398 static void val_used(worklist_t *worklist, ir_node *value, ir_node *sched_point)
400 /* already in the worklist? move around, otherwise add at back */
401 worklist_entry_t *entry = get_irn_link(value);
403 assert(arch_irn_consider_in_reg_alloc(arch_env, cls, value));
405 if (worklist_contains(value)) {
406 assert(entry != NULL);
408 list_del(&entry->head);
411 entry = obstack_alloc(&obst, sizeof(entry[0]));
412 memset(entry, 0, sizeof(entry[0]));
414 entry->value = value;
415 set_irn_link(value, entry);
418 ++worklist->n_live_values;
419 mark_irn_visited(value);
422 entry->reload_point = sched_point;
423 list_add_tail(&entry->head, &worklist->live_values);
426 static void worklist_remove(worklist_t *worklist, ir_node *value)
428 worklist_entry_t *entry = get_irn_link(value);
429 assert(entry != NULL);
430 list_del(&entry->head);
431 --worklist->n_live_values;
433 assert(worklist_contains(value));
434 mark_irn_not_visited(value);
437 static void update_max_pressure(worklist_t *worklist)
439 if (worklist->n_live_values > max_register_pressure)
440 max_register_pressure = worklist->n_live_values;
443 static void do_spilling(ir_node *block, worklist_t *worklist)
447 assert(worklist != NULL);
449 sched_foreach_reverse(block, node) {
453 DB((dbg, LEVEL_2, "\t%+F... ", node));
454 update_max_pressure(worklist);
458 /* TODO: if we have some free registers, then we could decide to
459 * not spill some phis (but not for phis where at least 1 input is
462 /* we have to spill all phis that are not live */
463 sched_foreach_reverse_from(node, node2) {
464 assert(is_Phi(node2));
466 if (worklist_contains(node2))
468 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node2))
472 be_spill_phi(senv, node2);
474 DB((dbg, LEVEL_2, "\n"));
478 /* remove values defined by this instruction from the workset. Values
479 * defined but not in the workset need free registers */
480 if (get_irn_mode(node) == mode_T) {
481 const ir_edge_t *edge;
483 foreach_out_edge(node, edge) {
484 ir_node *proj = get_edge_src_irn(edge);
485 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, proj))
487 if (worklist_contains(proj)) {
488 worklist_remove(worklist, proj);
493 } else if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
494 if (worklist_contains(node)) {
495 worklist_remove(worklist, node);
501 /* make sure we have enough free registers for the spills */
502 make_room(worklist, n_defs);
504 /* put all values used by the instruction into the workset */
505 arity = get_irn_arity(node);
506 for(i = 0; i < arity; ++i) {
507 ir_node *use = get_irn_n(node, i);
509 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, use))
512 val_used(worklist, use, node);
515 /* we might have too many values in the worklist now and need to spill
517 make_room(worklist, 0);
520 print_worklist(worklist, LEVEL_2);
521 DB((dbg, LEVEL_2, "\n"));
525 update_max_pressure(worklist);
528 DB((dbg, LEVEL_1, "worklist at begin of %+F:", block));
529 print_worklist(worklist, LEVEL_1);
530 DB((dbg, LEVEL_1, "\n"));
534 static bool worklists_equal(const worklist_t *wl1, const worklist_t *wl2)
536 const struct list_head *i1 = &wl1->live_values;
537 const struct list_head *i2 = &wl2->live_values;
539 for ( ; i1 != &wl1->live_values && i2 != &wl2->live_values;
540 i1 = i1->next, i2 = i2->next) {
541 worklist_entry_t *entry1 = list_entry(i1, worklist_entry_t, head);
542 worklist_entry_t *entry2 = list_entry(i2, worklist_entry_t, head);
544 if (entry1->value != entry2->value)
547 /* both lists at the end */
548 if (i1 != &wl1->live_values || i2 != &wl2->live_values)
554 static bool fill_start_worklist(worklist_t *new_worklist, ir_node *block)
556 double best_execfreq = -1;
557 worklist_t *best_worklist = NULL;
558 ir_node *best_succ_block = NULL;
560 const ir_edge_t *edge;
562 /* construct worklist */
563 foreach_block_succ(block, edge) {
564 ir_node *succ_block = get_edge_src_irn(edge);
565 double execfreq = get_block_execfreq(exec_freq, succ_block);
566 block_info_t *block_info;
567 worklist_t *succ_worklist;
569 if (execfreq < best_execfreq)
572 block_info = get_block_info(succ_block);
573 succ_worklist = block_info->start_worklist;
575 if (succ_worklist == NULL || succ_worklist->visited >= worklist_visited)
578 best_execfreq = execfreq;
579 best_worklist = succ_worklist;
580 best_succ_block = succ_block;
581 best_pos = get_edge_src_pos(edge);
584 if (best_worklist == NULL)
586 best_worklist->visited = worklist_visited;
588 fill_and_activate_worklist(new_worklist, best_worklist, block,
589 best_succ_block, best_pos);
593 static worklist_t *construct_start_worklist(ir_node *block)
595 worklist_t *worklist = new_worklist();
599 while(fill_start_worklist(worklist, block)) {
600 if (worklist->n_live_values >= n_regs)
607 static void process_block(ir_node *block, void *env)
609 block_info_t *block_info;
610 worklist_t *worklist;
614 DB((dbg, LEVEL_1, "Processing %+F\n", block));
616 worklist = construct_start_worklist(block);
617 block_info = get_block_info(block);
620 DB((dbg, LEVEL_1, "worklist at end of %+F:", block));
621 print_worklist(worklist, LEVEL_1);
622 DB((dbg, LEVEL_1, "\n"));
624 if (should_have_reached_fixpoint) {
625 assert(worklists_equal(block_info->end_worklist, worklist));
628 block_info->end_worklist = duplicate_worklist(worklist);
630 do_spilling(block, worklist);
631 deactivate_worklist(worklist);
634 if (should_have_reached_fixpoint) {
635 assert(worklists_equal(block_info->start_worklist, worklist));
638 block_info->start_worklist = worklist;
640 /* we shouldn't have any live values left at the start block */
641 n_preds = get_Block_n_cfgpreds(block);
642 //assert(n_preds != 0 || worklist->n_live_values == 0);
645 typedef struct block_or_loop_t block_or_loop_t;
646 struct block_or_loop_t {
654 static block_or_loop_t *loop_blocks;
655 static ir_loop *current_loop;
657 static void find_blocks(ir_node *block);
659 static void find_in_loop(ir_loop *loop, ir_node *entry)
661 loop_info_t *loop_info = get_loop_info(loop);
662 block_or_loop_t block_or_loop;
665 /* simply mark 1 block in the loop to indicate that the loop was already
667 ir_node *some_block = loop_info->entry_edges->block;
668 if (Block_block_visited(some_block))
671 block_or_loop.v.loop = loop;
672 block_or_loop.is_loop = true;
673 ARR_APP1(block_or_loop_t, loop_blocks, block_or_loop);
677 /* we should be 1 of the entry blocks */
678 loop_edge_t *edge = loop_info->entry_edges;
680 for ( ; edge != NULL; edge = edge->next) {
681 if (edge->block == entry)
690 /* check all loop successors */
691 for (edge = loop_info->exit_edges; edge != NULL; edge = edge->next) {
692 ir_node *succ = edge->block;
693 ir_loop *succ_loop = get_irn_loop(succ);
695 if (succ_loop == current_loop) {
698 assert(get_loop_depth(succ_loop) < get_loop_depth(current_loop));
703 static void find_blocks(ir_node *block)
705 const ir_edge_t *edge;
706 block_or_loop_t block_or_loop;
708 if (Block_block_visited(block))
711 block_or_loop.v.block = block;
712 block_or_loop.is_loop = false;
714 ARR_APP1(block_or_loop_t, loop_blocks, block_or_loop);
715 mark_Block_block_visited(block);
717 foreach_block_succ(block, edge) {
718 ir_node *succ = get_edge_src_irn(edge);
720 /* is it part of the current loop? */
721 ir_loop *loop = get_irn_loop(succ);
722 if (loop != current_loop) {
724 if (get_loop_depth(loop) > get_loop_depth(current_loop)) {
725 find_in_loop(loop, succ);
727 /* parent loop: we're not interested in the block */
736 * append an entry to a worklist. WARNING: The entry must not already be in the
739 static void worklist_append(worklist_t *worklist, ir_node *value,
740 ir_node *reload_point,
741 ir_loop *unused_livethrough_loop)
743 worklist_entry_t *entry = obstack_alloc(&obst, sizeof(entry[0]));
744 memset(entry, 0, sizeof(entry[0]));
746 #ifdef EXPENSIVE_CHECKS
748 struct list_head *entry;
749 list_for_each(entry, &worklist->live_values) {
750 worklist_entry_t *wl_entry
751 = list_entry(entry, worklist_entry_t, head);
752 assert(wl_entry->value != value);
757 entry->value = value;
758 entry->reload_point = reload_point;
759 entry->unused_livethrough_loop = unused_livethrough_loop;
760 list_add_tail(&entry->head, &worklist->live_values);
761 ++worklist->n_live_values;
762 assert(worklist->n_live_values <= n_regs);
765 static void push_unused_livethrough(loop_info_t *loop_info, ir_node *value)
770 /* add the value to all loop exit and entry blocks */
771 for (edge = loop_info->exit_edges; edge != NULL; edge = edge->next) {
773 = get_Block_cfgpred_block(edge->block, edge->pos);
774 const block_info_t *info = get_block_info(block);
775 worklist_t *worklist = info->end_worklist;
776 ir_node *reload_point = NULL;
778 if (worklist->visited >= worklist_visited)
780 worklist->visited = worklist_visited;
782 /* TODO: we need a smarter mechanism here, that makes the reloader place
783 * reload nodes on all loop exits... */
785 worklist_append(worklist, value, reload_point, loop_info->loop);
787 edge = loop_info->entry_edges;
788 for ( ; edge != NULL; edge = edge->next) {
789 ir_node *entry_block = edge->block;
790 const block_info_t *info = get_block_info(entry_block);
791 worklist_t *worklist = info->start_worklist;
793 ir_node *reload_point;
795 if (worklist->visited >= worklist_visited)
797 worklist->visited = worklist_visited;
799 pred_block = get_Block_cfgpred_block(entry_block, edge->pos);
800 reload_point = be_get_end_of_block_insertion_point(pred_block);
802 worklist_append(worklist, value, reload_point, loop_info->loop);
805 set_irn_link(value, NULL);
806 ++loop_info->max_register_pressure;
809 static void push_unused_livethroughs(loop_info_t *loop_info)
813 /* we can only push unused livethroughs if register pressure inside the loop
815 if (loop_info->max_register_pressure >= n_regs)
818 /* find unused livethroughs: register pressure in the loop was low enough
819 * which means that we had no spills which implies that at every point in
821 for (edge = loop_info->exit_edges; edge != NULL; edge = edge->next) {
822 ir_node *block = edge->block;
823 const block_info_t *info = get_block_info(block);
824 worklist_t *start_worklist = info->start_worklist;
826 const block_info_t *exit_info;
827 worklist_t *end_worklist;
828 struct list_head *entry;
830 if (start_worklist == NULL)
833 exit_block = get_Block_cfgpred_block(edge->block, edge->pos);
834 exit_info = get_block_info(exit_block);
835 end_worklist = exit_info->end_worklist;
837 activate_worklist(end_worklist);
838 /* all values contained in the start_worklist, which are not available
839 * in the end_worklist, must be unused livethroughs */
841 list_for_each(entry, &start_worklist->live_values) {
842 worklist_entry_t *wl_entry
843 = list_entry(entry, worklist_entry_t, head);
844 ir_node *value = wl_entry->value;
846 if (loop_info->max_register_pressure >= n_regs)
850 if (!worklist_contains(value)) {
851 /* add it to all loop exits */
852 DB((dbg, LEVEL_2, "Found unused livethrough: %+F (regpressure in loop %d)\n", value, loop_info->max_register_pressure));
853 push_unused_livethrough(loop_info, value);
854 /* the value should now be part of end_worklist, so mark it */
855 mark_irn_visited(value);
859 deactivate_worklist(end_worklist);
863 static void process_block_or_loop(const block_or_loop_t *block_or_loop)
865 if (block_or_loop->is_loop) {
866 loop_info_t *loop_info = get_loop_info(block_or_loop->v.loop);
868 if (do_push_unused_livethroughs)
869 push_unused_livethroughs(loop_info);
871 if (loop_info->max_register_pressure > max_register_pressure)
872 max_register_pressure = loop_info->max_register_pressure;
876 process_block(block_or_loop->v.block, NULL);
879 static void process_loop(ir_loop *loop)
881 int n_elements = get_loop_n_elements(loop);
883 loop_info_t *loop_info;
887 /* first handle all sub-loops */
888 for (i = 0; i < n_elements; ++i) {
889 loop_element element = get_loop_element(loop, i);
890 if (*element.kind != k_ir_loop)
893 process_loop(element.son);
896 /* create a postorder of the blocks */
897 loop_info = get_loop_info(loop);
898 edge = loop_info->entry_edges;
900 some_block = edge->block;
902 assert(loop == get_irg_loop(current_ir_graph));
903 some_block = get_irg_start_block(current_ir_graph);
906 loop_blocks = NEW_ARR_F(block_or_loop_t,0);
909 ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
910 inc_irg_block_visited(current_ir_graph);
911 find_blocks(some_block);
912 /* for endless loops the end-block might be unreachable */
913 if (loop == get_irg_loop(current_ir_graph)) {
914 find_blocks(get_irg_end_block(current_ir_graph));
916 ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
918 DB((dbg, LEVEL_3, "Block List for loop %p\n", loop));
919 len = ARR_LEN(loop_blocks);
920 for (i = 0; i < len; ++i) {
921 block_or_loop_t *block_or_loop = &loop_blocks[i];
922 if (block_or_loop->is_loop) {
923 DB((dbg, LEVEL_3, " L-%p", block_or_loop->v.loop));
925 DB((dbg, LEVEL_3, " B-%+F", block_or_loop->v.block));
928 DB((dbg, LEVEL_3, "\n"));
930 max_register_pressure = 0;
932 /* run1: tentative phase */
933 tentative_mode = true;
934 for (i = len-1; i >= 0; --i) {
935 process_block_or_loop(&loop_blocks[i]);
938 /* run2: tentative phase - should reach fixpoint */
939 tentative_mode = true;
940 for (i = len-1; i >= 0; --i) {
941 process_block_or_loop(&loop_blocks[i]);
945 /* run3: tentative phase - check fixpoint */
946 tentative_mode = true;
947 should_have_reached_fixpoint = true;
948 for (i = len-1; i >= 0; --i) {
949 process_block_or_loop(&loop_blocks[i]);
951 should_have_reached_fixpoint = false;
954 /* run4: add spills/reloads */
955 tentative_mode = false;
956 do_push_unused_livethroughs = true;
957 for (i = len-1; i >= 0; --i) {
958 process_block_or_loop(&loop_blocks[i]);
960 do_push_unused_livethroughs = false;
962 loop_info->max_register_pressure = max_register_pressure;
963 DB((dbg, LEVEL_2, "Regpressure in loop %p: %u\n", loop,
964 (unsigned) max_register_pressure));
966 DEL_ARR_F(loop_blocks);
969 static void fix_block_borders(ir_node *block, void *data)
971 block_info_t *block_info = get_block_info(block);
972 worklist_t *start_worklist = block_info->start_worklist;
973 int n_cfgpreds = get_Block_n_cfgpreds(block);
974 ir_loop *loop = get_irn_loop(block);
978 assert(start_worklist != NULL);
980 for (i = 0; i < n_cfgpreds; ++i) {
981 ir_node *pred_block = get_Block_cfgpred_block(block, i);
982 block_info_t *pred_block_info = get_block_info(pred_block);
983 worklist_t *end_worklist = pred_block_info->end_worklist;
984 ir_loop *pred_loop = get_irn_loop(pred_block);
985 bool is_loop_entry = false;
986 struct list_head *entry;
988 assert(end_worklist != NULL);
990 if (get_loop_depth(pred_loop) < get_loop_depth(loop)) {
991 is_loop_entry = true;
994 /* reload missing values */
995 activate_worklist(end_worklist);
997 list_for_each(entry, &start_worklist->live_values) {
998 worklist_entry_t *wl_entry
999 = list_entry(entry, worklist_entry_t, head);
1000 ir_node *value = wl_entry->value;
1002 if (is_Phi(value) && get_nodes_block(value) == block) {
1003 value = get_irn_n(value, i);
1005 /* we might have unknowns as argument for the phi */
1006 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, value))
1010 if (worklist_contains(value))
1012 if (wl_entry->unused_livethrough_loop != NULL && !is_loop_entry)
1015 be_add_reload_on_edge(senv, value, block, i, cls, 1);
1018 deactivate_worklist(end_worklist);
1022 static void be_spill_belady3(be_irg_t *birg, const arch_register_class_t *ncls)
1024 ir_graph *irg = be_get_birg_irg(birg);
1027 n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
1029 /* shortcut for register classes with ignore regs only */
1033 worklist_visited = 0;
1034 arch_env = be_get_birg_arch_env(birg);
1035 exec_freq = be_get_birg_exec_freq(birg);
1037 be_clear_links(irg);
1038 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK
1039 | IR_RESOURCE_LOOP_LINK);
1040 inc_irg_visited(irg);
1042 obstack_init(&obst);
1043 senv = be_new_spill_env(birg);
1045 assure_cf_loop(irg);
1046 clear_loop_info(get_irg_loop(irg));
1047 irg_block_walk_graph(irg, construct_loop_edges, NULL, NULL);
1049 process_loop(get_irg_loop(current_ir_graph));
1051 irg_block_walk_graph(irg, fix_block_borders, NULL, NULL);
1053 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK
1054 | IR_RESOURCE_LOOP_LINK);
1056 be_insert_spills_reloads(senv);
1058 obstack_free(&obst, NULL);
1061 be_delete_spill_env(senv);
1064 void be_init_spillbelady3(void)
1066 static be_spiller_t belady3_spiller = {
1070 be_register_spiller("belady3", &belady3_spiller);
1071 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady3");
1074 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady3);