From: Matthias Braun Date: Mon, 21 Jan 2008 21:39:57 +0000 (+0000) Subject: first code for yet another belady spiller (doesn't work with loops yet), removed... X-Git-Url: http://nsz.repo.hu/git/?a=commitdiff_plain;h=6a172145427beca7a9e54d4edd0c23394525d9a6;p=libfirm first code for yet another belady spiller (doesn't work with loops yet), removed obsolete linearscan code [r17485] --- diff --git a/ir/be/bemodule.c b/ir/be/bemodule.c index 3a87eb6bd..6c3d82923 100644 --- a/ir/be/bemodule.c +++ b/ir/be/bemodule.c @@ -60,7 +60,7 @@ void be_init_peephole(void); void be_init_ra(void); void be_init_spillbelady(void); void be_init_spillbelady2(void); -void be_init_spilllinearscan(void); +void be_init_spillbelady3(void); void be_init_ssaconstr(void); void be_init_ifg(void); void be_init_irgmod(void); @@ -104,7 +104,7 @@ void be_init_modules(void) be_init_ra(); be_init_spillbelady(); be_init_spillbelady2(); - be_init_spilllinearscan(); + be_init_spillbelady3(); be_init_daemelspill(); be_init_ssaconstr(); be_init_state(); diff --git a/ir/be/bespillbelady3.c b/ir/be/bespillbelady3.c new file mode 100644 index 000000000..29a196a59 --- /dev/null +++ b/ir/be/bespillbelady3.c @@ -0,0 +1,468 @@ +/* + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. + * + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. + */ + +/** + * @file + * @brief MIN-algorithm with some twists (aka belady spiller v3) + * @author Matthias Braun + * @date 15.01.2008 + * @version $Id$ + * + * TODO: + * - handle phis correctly, decide wether we should spill them + * - merge multiple start worksets of blocks + * - how to and when to do the tentative phase... + */ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "debug.h" +#include "list.h" +#include "pdeq.h" + +#include "irnode_t.h" +#include "irprintf.h" +#include "iredges_t.h" +#include "execfreq.h" + +#include "bemodule.h" +#include "bespill.h" +#include "beutil.h" +#include "bespilloptions.h" +#include "besched_t.h" +#include "be_t.h" + +DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) + +typedef struct worklist_entry_t worklist_entry_t; +struct worklist_entry_t { + struct list_head head; + ir_node *value; + unsigned timestep; + ir_node *reload_point; +}; + +typedef struct worklist_t worklist_t; +struct worklist_t { + struct list_head live_values; + size_t n_live_values; + unsigned current_timestep; +}; + +static const arch_env_t *arch_env; +static const arch_register_class_t *cls; +static struct obstack obst; +static spill_env_t *senv; +static size_t n_regs; +static int tentative_mode; +static ir_exec_freq *exec_freq; + +static void init_worklist(worklist_t *worklist, unsigned timestep) +{ + INIT_LIST_HEAD(&worklist->live_values); + worklist->n_live_values = 0; + worklist->current_timestep = timestep; +} + +static void mark_irn_not_visited(ir_node *node) +{ + set_irn_visited(node, get_irg_visited(current_ir_graph) - 1); +} + +static void deactivate_worklist(const worklist_t *worklist) +{ + struct list_head *entry; + + list_for_each(entry, &worklist->live_values) { + worklist_entry_t *wl_entry + = list_entry(entry, worklist_entry_t, head); + assert(irn_visited(wl_entry->value)); + mark_irn_not_visited(wl_entry->value); + set_irn_link(wl_entry->value, NULL); + } +} + +static void activate_worklist(const worklist_t *worklist) +{ + struct list_head *entry; + + list_for_each(entry, &worklist->live_values) { + worklist_entry_t *wl_entry + = list_entry(entry, worklist_entry_t, head); + ir_node *value = wl_entry->value; + + assert(irn_not_visited(value)); + mark_irn_visited(value); + set_irn_link(value, wl_entry); + } +} + +static worklist_t *duplicate_worklist(const worklist_t *worklist, + ir_node *block, + ir_node *succ_block, int succ_pos) +{ + ir_node *reload_point = NULL; + struct list_head *entry; + + if(succ_block != NULL && get_Block_n_cfgpreds(succ_block) > 1) { + reload_point = be_get_end_of_block_insertion_point(block); + } + + worklist_t *new_worklist = obstack_alloc(&obst, sizeof(new_worklist[0])); + INIT_LIST_HEAD(&new_worklist->live_values); + + new_worklist->current_timestep = worklist->current_timestep; + new_worklist->n_live_values = worklist->n_live_values; + + list_for_each(entry, &worklist->live_values) { + worklist_entry_t *wl_entry + = list_entry(entry, worklist_entry_t, head); + worklist_entry_t *new_entry + = obstack_alloc(&obst, sizeof(new_entry[0])); + ir_node *value = wl_entry->value; + + if(is_Phi(value) && get_nodes_block(value) == succ_block) { + value = get_Phi_pred(value, succ_pos); + } + + new_entry->value = value; + new_entry->timestep = wl_entry->timestep; + if(reload_point != NULL) { + new_entry->reload_point = reload_point; + } else { + new_entry->reload_point = wl_entry->reload_point; + } + + list_add_tail(&new_entry->head, &new_worklist->live_values); + } + + return new_worklist; +} + +#ifdef DEBUG_libfirm +static void print_worklist(const worklist_t *worklist, int level) +{ + struct list_head *entry; + + DB((dbg, level, "%d values (TS %u): ", worklist->n_live_values, + worklist->current_timestep)); + list_for_each(entry, &worklist->live_values) { + worklist_entry_t *wl_entry + = list_entry(entry, worklist_entry_t, head); + + //DB((dbg, level, "%+F(%+F) ", wl_entry->value, + // wl_entry->reload_point)); + DB((dbg, level, "%+F ", wl_entry->value)); + } +} +#endif + +static void place_reload(worklist_entry_t *entry) +{ + if(tentative_mode) + return; + DB((dbg, LEVEL_1, "reload of %+F before %+F", entry->value, + entry->reload_point)); + be_add_reload(senv, entry->value, entry->reload_point, cls, 1); +} + +static void spill_non_live_nodes(const worklist_t *worklist) +{ + struct list_head *entry; + + list_for_each(entry, &worklist->live_values) { + worklist_entry_t *wl_entry + = list_entry(entry, worklist_entry_t, head); + ir_node *value = wl_entry->value; + + if(irn_visited(value)) + continue; + + place_reload(wl_entry); + } +} + +/** + * makes sure the worklist contains not more than n_regs - room_needed entries + */ +static void make_room(worklist_t *worklist, size_t room_needed) +{ + int spills_needed = worklist->n_live_values + room_needed - n_regs; + if(spills_needed > 0) { + int i = spills_needed; + struct list_head *entry = worklist->live_values.next; + for(i = spills_needed; i > 0; --i) { + struct list_head *next = entry->next; + worklist_entry_t *wl_entry + = list_entry(entry, worklist_entry_t, head); + assert(irn_visited(wl_entry->value)); + mark_irn_not_visited(wl_entry->value); + place_reload(wl_entry); + list_del(entry); + + entry = next; + } + worklist->n_live_values -= spills_needed; + } +} + +/** + * a value was used, so bring it to the back of the worklist (which might + * result in a spill of another value). + */ +static void val_used(worklist_t *worklist, ir_node *value, ir_node *sched_point) +{ + /* is the node in the worklist already? */ + worklist_entry_t *entry = get_irn_link(value); + if(irn_visited(value)) { + assert(entry != NULL); + + assert(irn_visited(value)); + list_del(&entry->head); + } else { + if(entry == NULL) { + entry = obstack_alloc(&obst, sizeof(entry[0])); + entry->value = value; + set_irn_link(value, entry); + } + + ++worklist->n_live_values; + mark_irn_visited(value); + } + + entry->timestep = worklist->current_timestep; + entry->reload_point = sched_point; + list_add_tail(&entry->head, &worklist->live_values); +} + +static void worklist_remove(worklist_t *worklist, ir_node *value) +{ + worklist_entry_t *entry = get_irn_link(value); + assert(entry != NULL); + list_del(&entry->head); + --worklist->n_live_values; + + assert(irn_visited(value)); + mark_irn_not_visited(value); +} + +static void do_spilling(ir_node *block, worklist_t *worklist) +{ + ir_node *node; + + assert(worklist != NULL); + +#ifdef DEBUG_libfirm + DB((dbg, LEVEL_1, "worklist at end of %+F:", block)); + print_worklist(worklist, LEVEL_1); + DB((dbg, LEVEL_1, "\n")); +#endif + + sched_foreach_reverse(block, node) { + int i, arity; + size_t n_defs = 0; + + DB((dbg, LEVEL_2, "\t%+F... ", node)); + + if(is_Phi(node)) { + ir_node *node2; + /* TODO: if we have some free registers, then we could decide to + * not spill some phis (but not for phis where at least 1 input is + * themselfes) */ + + /* we have to spill all phis that are not live */ + sched_foreach_reverse_from(node, node2) { + assert(is_Phi(node2)); + + if(irn_visited(node2)) + continue; + if(!arch_irn_consider_in_reg_alloc(arch_env, cls, node2)) + continue; + + be_spill_phi(senv, node2); + } + break; + } + + /* remove values defined by this instruction from the workset. Values + * defined but not in the workset need free registers */ + if(get_irn_mode(node) == mode_T) { + const ir_edge_t *edge; + + foreach_out_edge(node, edge) { + ir_node *proj = get_edge_src_irn(edge); + if(!arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) + continue; + if(irn_visited(proj)) { + worklist_remove(worklist, proj); + } else { + ++n_defs; + } + } + } else if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) { + if(irn_visited(node)) { + worklist_remove(worklist, node); + } else { + n_defs = 1; + } + } + + /* make sure we have enough free registers for the spills */ + make_room(worklist, n_defs); + + /* put all values by the instruction into the workset */ + arity = get_irn_arity(node); + for(i = 0; i < arity; ++i) { + ir_node *use = get_irn_n(node, i); + + if(!arch_irn_consider_in_reg_alloc(arch_env, cls, use)) + continue; + + val_used(worklist, use, node); + } + + /* we might have too many values in the worklist now and need to spill + * some */ + make_room(worklist, 0); + + ++worklist->current_timestep; + +#ifdef DEBUG_libfirm + print_worklist(worklist, LEVEL_2); + DB((dbg, LEVEL_2, "\n")); +#endif + } + +#ifdef DEBUG_libfirm + DB((dbg, LEVEL_1, "worklist at begin of %+F:", block)); + print_worklist(worklist, LEVEL_1); + DB((dbg, LEVEL_1, "\n")); +#endif +} + +static void process_block(ir_node *block, void *env) +{ + int n_preds; + const ir_edge_t *edge; + (void) env; + + DB((dbg, LEVEL_1, "Processing %+F\n", block)); + + /* construct worklist */ + worklist_t *worklist = NULL; + double best_execfreq = -1; + ir_node *best_succ_block = NULL; + int best_pos = -1; + foreach_block_succ(block, edge) { + ir_node *succ_block = get_edge_src_irn(edge); + double execfreq = get_block_execfreq(exec_freq, succ_block); + + if(execfreq > best_execfreq) { + worklist_t *succ_worklist = get_irn_link(succ_block); + if(succ_worklist != NULL) { + best_execfreq = execfreq; + worklist = succ_worklist; + best_succ_block = succ_block; + best_pos = get_edge_src_pos(edge); + } + } + } + if(worklist == NULL) { + /* only the end-block has 0 successors */ + assert(block == get_irg_end_block(get_irn_irg(block))); + + worklist = obstack_alloc(&obst, sizeof(worklist[0])); + init_worklist(worklist, 0); + } else { + worklist = duplicate_worklist(worklist, block, best_succ_block, + best_pos); + activate_worklist(worklist); + + /* now we could have live values in the succ worklists that are not + * live anymore in the worklist we picked. We need reloads for them. + */ + if(!tentative_mode) { + foreach_block_succ(block, edge) { + ir_node *succ_block = get_edge_src_irn(edge); + worklist_t *succ_worklist = get_irn_link(succ_block); + + spill_non_live_nodes(succ_worklist); + } + } + } + + do_spilling(block, worklist); + deactivate_worklist(worklist); + + set_irn_link(block, worklist); + + /* we shouldn't have any live values left at the start block */ + n_preds = get_Block_n_cfgpreds(block); + assert(n_preds != 0 || worklist->n_live_values == 0); +} + +static void be_spill_belady3(be_irg_t *birg, const arch_register_class_t *ncls) +{ + ir_graph *irg = be_get_birg_irg(birg); + + cls = ncls; + n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL); + + if(n_regs == 0) + return; + + arch_env = be_get_birg_arch_env(birg); + exec_freq = be_get_birg_exec_freq(birg); + tentative_mode = 0; + + be_clear_links(irg); + set_using_irn_link(irg); + set_using_irn_visited(irg); + inc_irg_visited(irg); + + obstack_init(&obst); + senv = be_new_spill_env(birg); + + /* do a post-order walk over the CFG to make sure we have a maximum number + * of preds processed before entering a block */ + irg_block_edges_walk(get_irg_start_block(irg), NULL, process_block, NULL); + + clear_using_irn_link(irg); + clear_using_irn_visited(irg); + + be_insert_spills_reloads(senv); + + obstack_free(&obst, NULL); + + /* clean up */ + be_delete_spill_env(senv); +} + +void be_init_spillbelady3(void) +{ + static be_spiller_t belady3_spiller = { + be_spill_belady3 + }; + + be_register_spiller("belady3", &belady3_spiller); + FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady3"); +} + +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady3); diff --git a/ir/be/bespilllinearscan.c b/ir/be/bespilllinearscan.c deleted file mode 100644 index d8025e6ff..000000000 --- a/ir/be/bespilllinearscan.c +++ /dev/null @@ -1,433 +0,0 @@ -/* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. - * - * This file is part of libFirm. - * - * This file may be distributed and/or modified under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation and appearing in the file LICENSE.GPL included in the - * packaging of this file. - * - * Licensees holding valid libFirm Professional Edition licenses may use - * this file in accordance with the libFirm Commercial License. - * Agreement provided with the Software. - * - * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE - * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE. - */ - -/** - * @file - * @brief Linear Scan Spill algorithm - * @author Matthias Braun - * @date 20.09.2005 - * @version $Id$ - */ -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "irnode_t.h" -#include "irprintf.h" -#include "iredges_t.h" - -#include "bemodule.h" -#include "besched_t.h" -#include "bespilloptions.h" -#include "bespill.h" -#include "benode_t.h" -#include "be_t.h" -#include "belive_t.h" - -/* a place in the program */ -typedef struct place_t { - unsigned block_nr; - int timestep; -} place_t; - -typedef struct interval_t { - ir_node *value; - double spill_costs; - place_t begin; - place_t end; -} interval_t; - -static interval_t **intervals; - -DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) - -static struct obstack obst; -static const arch_env_t *arch_env; -static const arch_register_class_t *cls; -static spill_env_t *spill_env; -static unsigned n_regs; -static const be_lv_t *lv; - -static double get_spill_costs(ir_node *node) -{ - const ir_edge_t *edge; - ir_node *spill_place = skip_Proj(node); - double costs = be_get_spill_costs(spill_env, node, - spill_place); - foreach_out_edge(node, edge) { - ir_node *use = get_edge_src_irn(edge); - - /* keeps should be directly below the node */ - if(be_is_Keep(use)) { - continue; - } - - if(is_Phi(use)) { - int in = get_edge_src_pos(edge); - ir_node *block = get_nodes_block(use); - - costs += be_get_reload_costs_on_edge(spill_env, node, block, in); - } else { - costs += be_get_reload_costs(spill_env, node, use); - } - } - - return costs; -} - -/** - * spills a node by placing a reload before each usage - */ -static void spill_node(ir_node *node) -{ - const ir_edge_t *edge; - - DBG((dbg, LEVEL_3, "\tspilling %+F\n", node)); - - foreach_out_edge(node, edge) { - ir_node *use = get_edge_src_irn(edge); - if(is_Anchor(use)) - continue; - if(be_is_Keep(use)) - continue; - - if(is_Phi(use)) { - int in = get_edge_src_pos(edge); - ir_node *block = get_nodes_block(use); - - be_add_reload_on_edge(spill_env, node, block, in, cls, 1); - } else { - be_add_reload(spill_env, node, use, cls, 1); - } - } -} - - -static int place_less(const place_t *place1, const place_t *place2) -{ - if(place1->block_nr < place2->block_nr) - return 1; - if(place1->block_nr > place2->block_nr) - return 0; - - return place1->timestep < place2->timestep; -} - -static int place_equal(const place_t *place1, const place_t *place2) -{ - return place1->block_nr == place2->block_nr - && place1->timestep == place2->timestep; -} - -static void extend_interval(ir_node *value, const place_t *place) -{ - interval_t *interval; - - if(!irn_visited(value)) { - interval = obstack_alloc(&obst, sizeof(interval[0])); - interval->begin = *place; - interval->end = *place; - interval->value = value; - interval->spill_costs = 0; - - ARR_APP1(interval_t*, intervals, interval); - - set_irn_link(value, interval); - mark_irn_visited(value); - } else { - interval = get_irn_link(value); - - if(place_less(place, &interval->begin)) { - interval->begin = *place; - } - if(place_less(&interval->end, place)) { - interval->end = *place; - } - } -} - -/** - * link live intervals to values, put all intervals into a list, - * sort the list. We process the blocks in a toplogical order (by ignoring - * backedges). - */ -static void calculate_liveness_intervals(ir_node *block, unsigned block_nr) -{ - ir_node *node; - int i, arity; - place_t place; - - set_irn_link(block, INT_TO_PTR(block_nr)); - - place.block_nr = block_nr; - place.timestep = 0; - - be_lv_foreach(lv, block, be_lv_state_in, i) { - ir_node *node = be_lv_get_irn(lv, block, i); - if(!arch_irn_consider_in_reg_alloc(arch_env, cls, node)) - continue; - - extend_interval(node, &place); - } - - sched_foreach_reverse(block, node) { - - if(is_Phi(node)) - break; - - place.timestep = sched_get_time_step(node); - - if(get_irn_mode(node) == mode_T) { - const ir_edge_t *edge; - - foreach_out_edge(node, edge) { - ir_node *proj = get_edge_src_irn(edge); - if(arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) { - extend_interval(proj, &place); - } - } - } else if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) { - extend_interval(node, &place); - } - - arity = get_irn_arity(node); - for(i = 0; i < arity; ++i) { - ir_node *op = get_irn_n(node, i); - - if(arch_irn_consider_in_reg_alloc(arch_env, cls, op)) { - extend_interval(op, &place); - } - } - } - - place.timestep++; - be_lv_foreach(lv, block, be_lv_state_end, i) { - ir_node *node = be_lv_get_irn(lv, block, i); - if(!arch_irn_consider_in_reg_alloc(arch_env, cls, node)) - continue; - - extend_interval(node, &place); - } - - ir_printf("processing block %+F(%u)\n", block, block_nr); -} - -static unsigned next_block_nr; - -/** - * process blocks in a toplogical order (we ignore backedges and create a - * topological order from the remaining edges) - */ -static void process_block(ir_node *block) -{ - unsigned block_nr; - int n_preds; - int i; - - if(irn_visited(block)) - return; - mark_irn_visited(block); - - n_preds = get_Block_n_cfgpreds(block); - for(i = 0; i < n_preds; ++i) { - ir_node *pred_block; - - if(is_backedge(block, i)) - continue; - - pred_block = get_Block_cfgpred_block(block, i); - process_block(pred_block); - } - - block_nr = next_block_nr; - next_block_nr++; - calculate_liveness_intervals(block, block_nr); -} - -static void print_interval(const interval_t *interval) -{ - ir_fprintf(stderr, "%+F [%u,%d] -> [%u,%d]\n", interval->value, - interval->begin.block_nr, interval->begin.timestep, - interval->end.block_nr, interval->end.timestep); -} - -static int compare_spill_costs(const void *d1, const void *d2) -{ - const interval_t *interval1 = *((const interval_t**)d1); - const interval_t *interval2 = *((const interval_t**)d2); - if (interval2->spill_costs < interval1->spill_costs) - return -1; - return 1; -} - -static void do_spilling(void) -{ - interval_t **live_intervals; - unsigned n_live_intervals; - interval_t **intervals_to_allocate; - unsigned n_intervals_to_allocate; - int i, len; - unsigned a; - - live_intervals = alloca(n_regs * sizeof(live_intervals[0])); - n_live_intervals = 0; - intervals_to_allocate = alloca(n_regs * sizeof(intervals_to_allocate[0])); - - len = ARR_LEN(intervals); - for (i = 0; i < len; ) { - const place_t place = intervals[i]->begin; - int spills_needed; - - n_intervals_to_allocate = 0; - do { - interval_t *interval = intervals[i]; - - print_interval(interval); - - intervals_to_allocate[n_intervals_to_allocate] = intervals[i]; - ++n_intervals_to_allocate; - ++i; - } while (i < len && place_equal(&intervals[i]->begin, &place)); - - spills_needed = n_live_intervals + n_intervals_to_allocate - n_regs; - - /* first expire intervals whose endpoint is above our current place */ - if (spills_needed > 0) { - unsigned a; - - for (a = 0; a < n_live_intervals; ) { - interval_t *live_interval = live_intervals[a]; - if(place_less(&place, &live_interval->end)) { - ++a; - } else { - fprintf(stderr, "expired: "); - print_interval(live_interval); - live_intervals[a] = live_intervals[n_live_intervals-1]; - --n_live_intervals; - } - } - - spills_needed = n_live_intervals + n_intervals_to_allocate - n_regs; - } - /* spill intervals */ - if (spills_needed > 0) { - ir_fprintf(stderr, "need to spill %d values at %u,%d\n", - spills_needed, place.block_nr, place.timestep); - - for(a = 0; a < n_live_intervals; ++a) { - interval_t *live_interval = live_intervals[a]; - if(live_interval->spill_costs == 0) { - ir_node *value = live_interval->value; - live_interval->spill_costs = get_spill_costs(value); - ir_fprintf(stderr, "spillcosts for %+F: %f\n", value, - live_interval->spill_costs); - } - } - - qsort(live_intervals, n_live_intervals, sizeof(live_intervals[0]), - compare_spill_costs); - - a = n_live_intervals - spills_needed; - for ( ; a < n_live_intervals; ++a) { - const interval_t *live_interval = live_intervals[a]; - ir_node *value = live_interval->value; - - ir_fprintf(stderr, "spilling %+F (%f)\n", value, live_interval->spill_costs); - spill_node(value); - } - n_live_intervals -= spills_needed; - } - - assert(n_regs - n_live_intervals >= n_intervals_to_allocate); - - for (a = 0; a < n_intervals_to_allocate; ++a) { - live_intervals[n_live_intervals] = intervals_to_allocate[a]; - ++n_live_intervals; - } - assert(n_live_intervals <= n_regs); - } -} - -static int cmp_interval(const void *d1, const void *d2) -{ - const interval_t *interval1 = *((const interval_t**) d1); - const interval_t *interval2 = *((const interval_t**) d2); - - return !place_less(&interval1->begin, &interval2->begin); -} - -static void be_spill_linearscan(be_irg_t *birg, - const arch_register_class_t *new_cls) -{ - size_t n_intervals; - ir_node *end_block; - ir_graph *irg = be_get_birg_irg(birg); - - be_liveness_assure_sets(be_assure_liveness(birg)); - - arch_env = be_get_birg_arch_env(birg); - cls = new_cls; - intervals = NEW_ARR_F(interval_t*, 0); - spill_env = be_new_spill_env(birg); - lv = be_get_birg_liveness(birg); - n_regs = cls->n_regs - be_put_ignore_regs(birg, new_cls, NULL); - - obstack_init(&obst); - - set_using_irn_visited(irg); - set_using_irn_link(irg); - inc_irg_visited(irg); - - next_block_nr = 0; - - /* use toposort for liveness analysis */ - end_block = get_irg_end_block(irg); - process_block(end_block); - - assert(irn_visited(get_irg_start_block(irg))); - - n_intervals = ARR_LEN(intervals); - qsort(intervals, n_intervals, sizeof(intervals[0]), cmp_interval); - - do_spilling(); - - clear_using_irn_visited(irg); - clear_using_irn_link(irg); - - DEL_ARR_F(intervals); - obstack_free(&obst, NULL); - - /* Insert spill/reload nodes into the graph and fix usages */ - be_insert_spills_reloads(spill_env); - - be_delete_spill_env(spill_env); - spill_env = NULL; -} - -void be_init_spilllinearscan(void) -{ - static be_spiller_t spiller = { - be_spill_linearscan - }; - - be_register_spiller("linearscan", &spiller); - FIRM_DBG_REGISTER(dbg, "firm.be.spill.linearscan"); -} - -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spilllinearscan);