From: Matthias Braun Date: Sat, 30 Sep 2006 10:16:53 +0000 (+0000) Subject: - Added 2 new blockschedulers, a greedy algorithm and an "optimal" ILP that X-Git-Url: http://nsz.repo.hu/git/?a=commitdiff_plain;h=97927c8e372f337e8342b698072facf4ffa453ad;p=libfirm - Added 2 new blockschedulers, a greedy algorithm and an "optimal" ILP that both try to transform as many jumps as possible to fallthroughs (weighted by execution frequency). The results are most of the times better than the extbb blockscheduler. Though it seems like x86 branch prediction sees conditional backward jumps as always taken, so we have to make sure that for 50/50 jumps we don't create backward jumps. (nothing implemented for this yet) --- diff --git a/ir/be/beblocksched.c b/ir/be/beblocksched.c new file mode 100644 index 000000000..5e009e2cd --- /dev/null +++ b/ir/be/beblocksched.c @@ -0,0 +1,706 @@ +/* + * Author: Matthias Braun, Christoph Mallon + * Date: 27.09.2006 + * Copyright: (c) Universitaet Karlsruhe + * License: This file is protected by GPL - GNU GENERAL PUBLIC LICENSE. + * + */ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "beblocksched.h" + +#include + +#include "array.h" +#include "pdeq.h" + +#include "iredges.h" +#include "irgwalk.h" +#include "irgraph_t.h" +#include "irloop.h" +#include "irprintf.h" +#include "irdump_t.h" +#include "beirgmod.h" + +#ifdef WITH_LIBCORE +#include +#include +#include +#endif + +#ifdef WITH_ILP +#include +#include +#endif + +typedef enum _blocksched_algos_t { + BLOCKSCHED_NAIV, BLOCKSCHED_EXTBB, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP +} blocksched_algos_t; + +static int algo = BLOCKSCHED_GREEDY; + +static const lc_opt_enum_int_items_t blockschedalgo_items[] = { + { "naiv", BLOCKSCHED_NAIV }, + { "extbb", BLOCKSCHED_EXTBB }, + { "greedy", BLOCKSCHED_GREEDY }, +#ifdef WITH_ILP + { "ilp", BLOCKSCHED_ILP }, +#endif + { NULL, 0 } +}; + +static lc_opt_enum_int_var_t algo_var = { + &algo, blockschedalgo_items +}; + +static const lc_opt_table_entry_t be_blocksched_options[] = { + LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var), + { NULL } +}; + +/* + * ____ _ + * / ___|_ __ ___ ___ __| |_ _ + * | | _| '__/ _ \/ _ \/ _` | | | | + * | |_| | | | __/ __/ (_| | |_| | + * \____|_| \___|\___|\__,_|\__, | + * |___/ + */ + +typedef struct _blocksched_entry_t { + ir_node *block; + struct _blocksched_entry_t *next; + struct _blocksched_entry_t *prev; +} blocksched_entry_t; + +typedef struct _edge_t { + ir_node *block; + int pos; + double execfreq; + int highest_execfreq; /**< flag that indicates wether this edge is the edge with the highest + execfreq pointing away from this block */ +} edge_t; + +typedef struct _blocksched_env_t { + ir_graph *irg; + struct obstack *obst; + ir_exec_freq *execfreqs; + edge_t *edges; + pdeq *worklist; + int blockcount; +} blocksched_env_t; + +static void collect_egde_frequency(ir_node *block, void *data) +{ + blocksched_env_t *env = data; + ir_graph *irg = env->irg; + ir_node *startblock = get_irg_start_block(irg); + int arity; + edge_t edge; + blocksched_entry_t *entry; + + entry = obstack_alloc(env->obst, sizeof(entry[0])); + entry->block = block; + entry->next = NULL; + entry->prev = NULL; + set_irn_link(block, entry); + + if(block == startblock) + return; + + arity = get_irn_arity(block); + if(arity == 1) { + edge.block = block; + edge.pos = 0; + edge.execfreq = get_block_execfreq(env->execfreqs, block); + edge.highest_execfreq = 1; + ARR_APP1(edge_t, env->edges, edge); + } else { + int i; + double highest_execfreq = -1; + int highest_edge_num; + + edge.block = block; + for(i = 0; i < arity; ++i) { + double execfreq; + + ir_node *pred_block = get_Block_cfgpred_block(block, i); + execfreq = get_block_execfreq(env->execfreqs, pred_block); + + edge.pos = i; + edge.execfreq = execfreq; + edge.highest_execfreq = 0; + ARR_APP1(edge_t, env->edges, edge); + if(execfreq > highest_execfreq) { + highest_execfreq = execfreq; + highest_edge_num = ARR_LEN(env->edges) - 1; + } + } + + env->edges[highest_edge_num].highest_execfreq = 1; + } +} + +static int cmp_edges(const void *d1, const void *d2) +{ + const edge_t *e1 = d1; + const edge_t *e2 = d2; + return e2->execfreq - e1->execfreq; +} + +static void coalesce_blocks(blocksched_env_t *env) +{ + int i; + int edge_count = ARR_LEN(env->edges); + + // run1: only look at jumps + for(i = 0; i < edge_count; ++i) { + const edge_t *edge = & env->edges[i]; + ir_node *block = edge->block; + ir_node *pred_block; + blocksched_entry_t *entry, *pred_entry; + + // the block might have been removed already... + if(is_Bad(get_Block_cfgpred(block, 0))) + continue; + + if(!edge->highest_execfreq) + continue; + + pred_block = get_Block_cfgpred_block(block, edge->pos); + entry = get_irn_link(block); + pred_entry = get_irn_link(pred_block); + + if(pred_entry->next != NULL || entry->prev != NULL) + continue; + // only coalesce jumps + if(get_block_succ_next(pred_block, get_block_succ_first(pred_block)) != NULL) + continue; + + // schedule the 2 blocks behind each other + ir_fprintf(stderr, "Coalesce (Jump) %+F -> %+F (%.3g)\n", + pred_entry->block, entry->block, edge->execfreq); + pred_entry->next = entry; + entry->prev = pred_entry; + } + + // run2: remaining edges + for(i = 0; i < edge_count; ++i) { + const edge_t *edge = & env->edges[i]; + ir_node *block = edge->block; + ir_node *pred_block; + blocksched_entry_t *entry, *pred_entry; + + // the block might have been removed already... + if(is_Bad(get_Block_cfgpred(block, 0))) + continue; + + pred_block = get_Block_cfgpred_block(block, edge->pos); + entry = get_irn_link(block); + pred_entry = get_irn_link(pred_block); + + if(pred_entry->next != NULL || entry->prev != NULL) + continue; + + // schedule the 2 blocks behind each other + ir_fprintf(stderr, "Coalesce (CondJump) %+F -> %+F (%.3g)\n", + pred_entry->block, entry->block, edge->execfreq); + pred_entry->next = entry; + entry->prev = pred_entry; + } +} + +static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *env) +{ + ir_node *block = entry->block; + blocksched_entry_t *succ_entry; + const ir_edge_t *edge; + double best_succ_execfreq; + ir_node *succ = NULL; + + if(irn_visited(block)) + return; + env->blockcount++; + mark_irn_visited(block); + + ir_fprintf(stderr, "Pick succ of %+F\n", block); + + // put all successors into the worklist + foreach_block_succ(block, edge) { + ir_node *succ_block = get_edge_src_irn(edge); + + if(irn_visited(succ_block)) + continue; + + // we only need to put the first of a series of already connected + // blocks into the worklist + succ_entry = get_irn_link(succ_block); + while(succ_entry->prev != NULL) { + // break cycles... + if(succ_entry->prev->block == succ_block) { + succ_entry->prev->next = NULL; + succ_entry->prev = NULL; + break; + } + succ_entry = succ_entry->prev; + }; + + if(irn_visited(succ_entry->block)) + continue; + + ir_fprintf(stderr, "Put %+F into worklist\n", succ_entry->block); + pdeq_putr(env->worklist, succ_entry->block); + } + + if(entry->next != NULL) { + pick_block_successor(entry->next, env); + return; + } + + fprintf(stderr, "deciding...\n"); + best_succ_execfreq = -1; + /* no successor yet: pick the successor block with the highest execution + * frequency which has no predecessor yet + */ + foreach_block_succ(block, edge) { + ir_node *succ_block = get_edge_src_irn(edge); + + if(irn_visited(succ_block)) + continue; + + succ_entry = get_irn_link(succ_block); + if(succ_entry->prev != NULL) + continue; + + double execfreq = get_block_execfreq(env->execfreqs, succ_block); + if(execfreq > best_succ_execfreq) { + best_succ_execfreq = execfreq; + succ = succ_block; + } + } + + if(succ == NULL) { + fprintf(stderr, "pick from worklist\n"); + + do { + if(pdeq_empty(env->worklist)) { + fprintf(stderr, "worklist empty\n"); + return; + } + succ = pdeq_getl(env->worklist); + } while(irn_visited(succ)); + } + + succ_entry = get_irn_link(succ); + entry->next = succ_entry; + succ_entry->prev = entry; + + pick_block_successor(succ_entry, env); +} + +static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env) +{ + ir_graph *irg = env->irg; + ir_node *startblock = get_irg_start_block(irg); + blocksched_entry_t *entry = get_irn_link(startblock); + + inc_irg_visited(irg); + + env->worklist = new_pdeq(); + pick_block_successor(entry, env); + assert(pdeq_empty(env->worklist)); + del_pdeq(env->worklist); + + return entry; +} + +static ir_node **create_blocksched_array(blocksched_entry_t *first, int count, + struct obstack* obst) { + int i = 0; + ir_node **block_list; + blocksched_entry_t *entry; + + block_list = NEW_ARR_D(ir_node *, obst, count); + fprintf(stderr, "Blockschedule:\n"); + for(entry = first; entry != NULL; entry = entry->next) { + assert(i < count); + block_list[i++] = entry->block; + ir_fprintf(stderr, "\t%+F\n", entry->block); + } + assert(i == count); + + return block_list; +} + +static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execfreqs) +{ + blocksched_env_t env; + struct obstack obst; + blocksched_entry_t *start_entry; + ir_node **block_list; + + obstack_init(&obst); + + env.irg = irg; + env.obst = &obst; + env.execfreqs = execfreqs; + env.edges = NEW_ARR_F(edge_t, 0); + env.worklist = NULL; + env.blockcount = 0; + + // collect edge execution frequencies + irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env); + + // sort interblock edges by execution frequency + qsort(env.edges, ARR_LEN(env.edges), sizeof(env.edges[0]), cmp_edges); + + be_remove_empty_blocks(irg); + + if(algo != BLOCKSCHED_NAIV) + coalesce_blocks(&env); + + start_entry = finish_block_schedule(&env); + + block_list = create_blocksched_array(start_entry, env.blockcount, get_irg_obstack(irg)); + + DEL_ARR_F(env.edges); + obstack_free(&obst, NULL); + + return block_list; +} + +/* + * ___ _ ____ + * |_ _| | | _ \ + * | || | | |_) | + * | || |___| __/ + * |___|_____|_| + * + */ + +#ifdef WITH_ILP +typedef struct _ilp_edge_t { + ir_node *block; + int pos; + int ilpvar; +} ilp_edge_t; + +typedef struct _blocksched_ilp_env_t { + blocksched_env_t env; + ilp_edge_t *ilpedges; + lpp_t *lpp; +} blocksched_ilp_env_t; + +typedef struct _blocksched_ilp_entry_t { + ir_node *block; + struct _blocksched_entry_t *next; + struct _blocksched_entry_t *prev; + + int out_cst; +} blocksched_ilp_entry_t; + +static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp_env_t *env) +{ + char name[64]; + ilp_edge_t edge; + int edgeidx = ARR_LEN(env->ilpedges); + + snprintf(name, sizeof(name), "edge%d", edgeidx); + + edge.block = block; + edge.pos = pos; + edge.ilpvar = lpp_add_var_default(env->lpp, name, lpp_binary, execfreq, 1.0); + + ARR_APP1(ilp_edge_t, env->ilpedges, edge); + return edgeidx; +} + +static void collect_egde_frequency_ilp(ir_node *block, void *data) +{ + blocksched_ilp_env_t *env = data; + ir_graph *irg = env->env.irg; + ir_node *startblock = get_irg_start_block(irg); + int arity; + blocksched_ilp_entry_t *entry; + lpp_cst_t cst; + char name[64]; + int out_count; + + snprintf(name, sizeof(name), "block_out_constr_%ld", get_irn_node_nr(block)); + out_count = get_irn_n_edges_kind(block, EDGE_KIND_BLOCK); + + entry = obstack_alloc(env->env.obst, sizeof(entry[0])); + entry->block = block; + entry->next = NULL; + entry->prev = NULL; + entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1); + set_irn_link(block, entry); + + if(block == startblock) + return; + + arity = get_irn_arity(block); + if(arity == 1) { + double execfreq = get_block_execfreq(env->env.execfreqs, block); + add_ilp_edge(block, 0, execfreq, env); + } else { + int i; + int *edgenums = alloca(sizeof(edgenums[0]) * arity); + + snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block)); + cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1); + + for(i = 0; i < arity; ++i) { + double execfreq; + int edgenum; + ilp_edge_t *edge; + + ir_node *pred_block = get_Block_cfgpred_block(block, i); + execfreq = get_block_execfreq(env->env.execfreqs, pred_block); + + edgenum = add_ilp_edge(block, i, execfreq, env); + edge = & env->ilpedges[edgenum]; + lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0); + } + } +} + + +static void coalesce_blocks_ilp(blocksched_ilp_env_t *env) +{ + int i; + int edge_count = ARR_LEN(env->ilpedges); + FILE *f; + char fname[256]; + + /* complete out constraints */ + for(i = 0; i < edge_count; ++i) { + const ilp_edge_t *edge = & env->ilpedges[i]; + ir_node *block = edge->block; + ir_node *pred; + blocksched_ilp_entry_t *entry; + + // the block might have been removed already... + if(is_Bad(get_Block_cfgpred(block, 0))) + continue; + + pred = get_Block_cfgpred_block(block, edge->pos); + entry = get_irn_link(pred); + + ir_printf("Adding out cst to %+F from %+F,%d\n", + pred, block, edge->pos); + lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0); + } + + lpp_dump(env->lpp, "lpp.out"); + snprintf(fname, sizeof(fname), "lpp_%s.plain", get_irg_dump_name(env->env.irg)); + f = fopen(fname, "w"); + lpp_dump_plain(env->lpp, f); + fclose(f); + //lpp_solve_net(env->lpp, main_env->options->ilp_server, main_env->options->ilp_solver); + lpp_solve_net(env->lpp, "i44pc52", "cplex"); + assert(lpp_is_sol_valid(env->lpp)); + + /* Apply results to edges */ + for(i = 0; i < edge_count; ++i) { + const ilp_edge_t *edge = & env->ilpedges[i]; + ir_node *block = edge->block; + ir_node *pred; + int is_jump; + blocksched_entry_t *entry; + blocksched_entry_t *pred_entry; + + // the block might have been removed already... + if(is_Bad(get_Block_cfgpred(block, 0))) + continue; + + is_jump = lpp_get_var_sol(env->lpp, edge->ilpvar); + if(is_jump) + continue; + + pred = get_Block_cfgpred_block(block, edge->pos); + entry = get_irn_link(block); + pred_entry = get_irn_link(pred); + + assert(entry->prev == NULL && pred_entry->next == NULL); + entry->prev = pred_entry; + pred_entry->next = entry; + } +} + +static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreqs) +{ + blocksched_ilp_env_t env; + struct obstack obst; + blocksched_entry_t *start_entry; + ir_node **block_list; + + obstack_init(&obst); + + env.env.irg = irg; + env.env.obst = &obst; + env.env.execfreqs = execfreqs; + env.env.worklist = NULL; + env.env.blockcount = 0; + env.ilpedges = NEW_ARR_F(ilp_edge_t, 0); + + env.lpp = new_lpp("blockschedule", lpp_minimize); + lpp_set_time_limit(env.lpp, 20); + lpp_set_log(env.lpp, stdout); + + irg_block_walk_graph(irg, collect_egde_frequency_ilp, NULL, &env); + + be_remove_empty_blocks(irg); + + coalesce_blocks_ilp(&env); + + start_entry = finish_block_schedule(&env.env); + + block_list = create_blocksched_array(start_entry, env.env.blockcount, get_irg_obstack(irg)); + + DEL_ARR_F(env.ilpedges); + free_lpp(env.lpp); + obstack_free(&obst, NULL); + + return block_list; +} +#endif + +/* + * _____ _ ____ ____ + * | ____|_ _| |_| __ )| __ ) + * | _| \ \/ / __| _ \| _ \ + * | |___ > <| |_| |_) | |_) | + * |_____/_/\_\\__|____/|____/ + * + */ + +/** A simple forward single linked list. */ +typedef struct { + ir_node *start; /**< start of the list */ + ir_node *end; /**< last block in the list */ + unsigned n_blks; /**< number of blocks in the list */ +} anchor; + +static void add_block(anchor *list, ir_node *block) { + if(list->start == NULL) { + list->start = block; + list->end = block; + } else { + set_irn_link(list->end, block); + list->end = block; + } + + list->n_blks++; +} + +static void create_block_list(ir_node *leader_block, anchor *list) { + int i; + ir_node *block = NULL; + const ir_edge_t *edge; + + ir_extblk *extbb = get_Block_extbb(leader_block); + if(extbb_visited(extbb)) + return; + mark_extbb_visited(extbb); + + for(i = 0; i < get_extbb_n_blocks(extbb); ++i) { + block = get_extbb_block(extbb, i); + add_block(list, block); + } + + assert(block != NULL); + + // pick successor extbbs + foreach_block_succ(block, edge) { + ir_node *succ = get_edge_src_irn(edge); + + create_block_list(succ, list); + } + + for(i = 0; i < get_extbb_n_blocks(extbb) - 1; ++i) { + block = get_extbb_block(extbb, i); + foreach_block_succ(block, edge) { + ir_node *succ = get_edge_src_irn(edge); + + create_block_list(succ, list); + } + } +} + +void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs); + +/* + * Calculates a block schedule. The schedule is stored as a linked + * list starting at the start_block of the irg. + */ +static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs) +{ + anchor list; + ir_node **blk_list, *b, *n; + unsigned i; + + /* schedule extended basic blocks */ + compute_extbb_execfreqs(irg, execfreqs); + //compute_extbb(irg); + + list.start = NULL; + list.end = NULL; + list.n_blks = 0; + inc_irg_block_visited(irg); + create_block_list(get_irg_start_block(irg), &list); + + /** create an array, so we can go forward and backward */ + blk_list = NEW_ARR_D(ir_node *, irg->obst,list.n_blks); + + for (i = 0, b = list.start; b; b = n, ++i) { + n = get_irn_link(b); + blk_list[i] = b; + } + + return blk_list; +} + +/* + * __ __ _ + * | \/ | __ _(_)_ __ + * | |\/| |/ _` | | '_ \ + * | | | | (_| | | | | | + * |_| |_|\__,_|_|_| |_| + * + */ + +#ifdef WITH_LIBCORE +void be_block_schedule_register_options(lc_opt_entry_t *grp) +{ + static int run_once = 0; + lc_opt_entry_t *blocksched_grp; + + if(run_once) + return; + run_once = 1; + blocksched_grp = lc_opt_get_grp(grp, "blocksched"); + + lc_opt_add_table(blocksched_grp, be_blocksched_options); +} +#endif + +ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs) +{ + switch(algo) { + case BLOCKSCHED_GREEDY: + case BLOCKSCHED_NAIV: + return create_block_schedule_greedy(irg, execfreqs); + case BLOCKSCHED_EXTBB: + return create_extbb_block_schedule(irg, execfreqs); +#ifdef WITH_ILP + case BLOCKSCHED_ILP: + return create_block_schedule_ilp(irg, execfreqs); +#endif + } + + assert(0 && "unknown blocksched algo"); + return NULL; +} diff --git a/ir/be/beblocksched.h b/ir/be/beblocksched.h new file mode 100644 index 000000000..3c8a34531 --- /dev/null +++ b/ir/be/beblocksched.h @@ -0,0 +1,21 @@ +/* + * Block schedule calculator + * + * $Id$ + */ +#ifndef _BEBLOCKSCHED_H +#define _BEBLOCKSCHED_H + +#include "obst.h" +#include "execfreq.h" +#include "irnode.h" +#include "irgraph.h" + +ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs); + +#ifdef WITH_LIBCORE +#include +void be_block_schedule_register_options(lc_opt_entry_t *grp); +#endif + +#endif diff --git a/ir/be/bemain.c b/ir/be/bemain.c index c3edadab2..a9e2b242f 100644 --- a/ir/be/bemain.c +++ b/ir/be/bemain.c @@ -65,6 +65,7 @@ #include "bestat.h" #include "beverify.h" #include "beprofile.h" +#include "beblocksched.h" #include "be_dbgout.h" /* options visible for anyone */ @@ -196,6 +197,8 @@ void be_opt_register(void) /* scheduler register options */ list_sched_register_options(be_grp_root); + + be_block_schedule_register_options(be_grp_root); } #endif /* WITH_LIBCORE */ } @@ -475,6 +478,9 @@ static void be_main_loop(FILE *file_handle, const char *cup_name) optimization_state_t state; const arch_code_generator_if_t *cg_if; + /* set the current graph (this is important for several firm functions) */ + current_ir_graph = irg; + /* stop and reset timers */ BE_TIMER_ONLY( LC_STOP_AND_RESET_TIMER(t_abi); @@ -509,9 +515,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name) /* some transformations need to be done before abi introduce */ arch_code_generator_before_abi(birg->cg); - /* set the current graph (this is important for several firm functions) */ - current_ir_graph = irg; - /* reset the phi handler. */ be_phi_handler_reset(env.phi_handler); diff --git a/ir/be/besched.c b/ir/be/besched.c index 220733279..406542b19 100644 --- a/ir/be/besched.c +++ b/ir/be/besched.c @@ -205,91 +205,7 @@ ir_node *sched_skip(ir_node *from, int forward, sched_predicator_t *predicator, return curr; } -/** A simple forward single linked list. */ -typedef struct { - ir_node *start; /**< start of the list */ - ir_node *end; /**< last block in the list */ - unsigned n_blks; /**< number of blocks in the list */ -} anchor; - -static void add_block(anchor *list, ir_node *block) { - if(list->start == NULL) { - list->start = block; - list->end = block; - } else { - set_irn_link(list->end, block); - list->end = block; - } - - list->n_blks++; -} - -static void create_block_list(ir_node *leader_block, anchor *list) { - int i; - ir_node *block = NULL; - const ir_edge_t *edge; - - ir_extblk *extbb = get_Block_extbb(leader_block); - if(extbb_visited(extbb)) - return; - mark_extbb_visited(extbb); - - for(i = 0; i < get_extbb_n_blocks(extbb); ++i) { - block = get_extbb_block(extbb, i); - add_block(list, block); - } - - assert(block != NULL); - - // pick successor extbbs - foreach_block_succ(block, edge) { - ir_node *succ = get_edge_src_irn(edge); - - create_block_list(succ, list); - } - - for(i = 0; i < get_extbb_n_blocks(extbb) - 1; ++i) { - block = get_extbb_block(extbb, i); - foreach_block_succ(block, edge) { - ir_node *succ = get_edge_src_irn(edge); - - create_block_list(succ, list); - } - } -} - -void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs); - -/* - * Calculates a block schedule. The schedule is stored as a linked - * list starting at the start_block of the irg. - */ -ir_node **sched_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs) -{ - anchor list; - ir_node **blk_list, *b, *n; - unsigned i; - - /* schedule extended basic blocks */ - compute_extbb_execfreqs(irg, execfreqs); - //compute_extbb(irg); - - list.start = NULL; - list.end = NULL; - list.n_blks = 0; - inc_irg_block_visited(irg); - create_block_list(get_irg_start_block(irg), &list); - - /** create an array, so we can go forward and backward */ - blk_list = NEW_ARR_D(ir_node *, irg->obst,list.n_blks); - - for (i = 0, b = list.start; b; b = n, ++i) { - n = get_irn_link(b); - blk_list[i] = b; - } - - return blk_list; -} +//--------------------------------------------------------------------------- typedef struct remove_dead_nodes_env_t_ { ir_graph *irg; diff --git a/ir/be/bespill.c b/ir/be/bespill.c index 4d6c9a5cf..778481788 100644 --- a/ir/be/bespill.c +++ b/ir/be/bespill.c @@ -144,7 +144,7 @@ void be_delete_spill_env(spill_env_t *env) { free(env); } -/** +/* * ____ _ ____ _ _ * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___ * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __| diff --git a/ir/be/bespillmorgan.c b/ir/be/bespillmorgan.c index 4704aca27..d63469726 100644 --- a/ir/be/bespillmorgan.c +++ b/ir/be/bespillmorgan.c @@ -463,8 +463,6 @@ static int reduce_register_pressure_in_block(morgan_env_t *env, const ir_node* b } del_pset(live_nodes); - DBG((dbg, DBG_PRESSURE, "\tMax Pressure in %+F: %d\n", block, max_pressure)); - loop_unused_spills_needed = max_pressure - env->registers_available; if(loop_unused_spills_needed < 0) { @@ -473,7 +471,8 @@ static int reduce_register_pressure_in_block(morgan_env_t *env, const ir_node* b loop_unused_spills_needed = loop_unused_spills_possible; } - DBG((dbg, DBG_PRESSURE, "Unused spills for Block %+F needed: %d\n", block, loop_unused_spills_needed)); + DBG((dbg, DBG_PRESSURE, "Block %+F: max-pressure %d spills possible: %d spills used: %d\n", + block, max_pressure, loop_unused_spills_possible, loop_unused_spills_needed)); return loop_unused_spills_needed; }