X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeblocksched.c;h=7922129b1da6bb376cc1c5373dfbf514718a77b0;hb=929bd10ba046d7482e572d97ec592a4055c65970;hp=f7cefaadc8bf19d8a5aa73231fbabdcfcacef3d7;hpb=2fafc006101c3845f9fbec5783e44267355549c1;p=libfirm diff --git a/ir/be/beblocksched.c b/ir/be/beblocksched.c index f7cefaadc..7922129b1 100644 --- a/ir/be/beblocksched.c +++ b/ir/be/beblocksched.c @@ -22,7 +22,6 @@ * @brief Block-scheduling strategies. * @author Matthias Braun, Christoph Mallon * @date 27.09.2006 - * @version $Id$ * * The goals of the greedy (and ILP) algorithm here works by assuming that * we want to change as many jumps to fallthroughs as possible (executed jumps @@ -42,7 +41,7 @@ #include "array.h" #include "pdeq.h" - +#include "beirg.h" #include "iredges.h" #include "irgwalk.h" #include "irnode_t.h" @@ -52,6 +51,7 @@ #include "execfreq.h" #include "irdump_t.h" #include "irtools.h" +#include "util.h" #include "debug.h" #include "beirgmod.h" #include "bemodule.h" @@ -61,10 +61,8 @@ #include "lc_opts.h" #include "lc_opts_enum.h" -#ifdef WITH_ILP -#include -#include -#endif /* WITH_ILP */ +#include "lpp.h" +#include "lpp_net.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) @@ -77,9 +75,7 @@ static int algo = BLOCKSCHED_GREEDY; static const lc_opt_enum_int_items_t blockschedalgo_items[] = { { "naiv", BLOCKSCHED_NAIV }, { "greedy", BLOCKSCHED_GREEDY }, -#ifdef WITH_ILP { "ilp", BLOCKSCHED_ILP }, -#endif /* WITH_ILP */ { NULL, 0 } }; @@ -125,12 +121,16 @@ typedef struct blocksched_env_t blocksched_env_t; struct blocksched_env_t { ir_graph *irg; struct obstack *obst; - ir_exec_freq *execfreqs; edge_t *edges; pdeq *worklist; int blockcount; }; +static blocksched_entry_t* get_blocksched_entry(const ir_node *block) +{ + return (blocksched_entry_t*)get_irn_link(block); +} + /** * Collect cfg frequencies of all edges between blocks. * Also determines edge with highest frequency. @@ -163,11 +163,11 @@ static void collect_egde_frequency(ir_node *block, void *data) } else if (arity == 1) { ir_node *pred_block = get_Block_cfgpred_block(block, 0); ir_loop *pred_loop = get_irn_loop(pred_block); - float freq = (float)get_block_execfreq(env->execfreqs, block); + float freq = (float)get_block_execfreq(block); /* is it an edge leaving a loop */ if (get_loop_depth(pred_loop) > get_loop_depth(loop)) { - float pred_freq = (float)get_block_execfreq(env->execfreqs, pred_block); + float pred_freq = (float)get_block_execfreq(pred_block); edge.outedge_penalty_freq = -(pred_freq - freq); } @@ -186,7 +186,7 @@ static void collect_egde_frequency(ir_node *block, void *data) double execfreq; ir_node *pred_block = get_Block_cfgpred_block(block, i); - execfreq = get_block_execfreq(env->execfreqs, pred_block); + execfreq = get_block_execfreq(pred_block); edge.pos = i; edge.execfreq = execfreq; @@ -204,20 +204,53 @@ static void collect_egde_frequency(ir_node *block, void *data) } } +static int cmp_edges_base(const edge_t *e1, const edge_t *e2) +{ + long nr1 = get_irn_node_nr(e1->block); + long nr2 = get_irn_node_nr(e2->block); + if (nr1 < nr2) { + return 1; + } else if (nr1 > nr2) { + return -1; + } else { + if (e1->pos < e2->pos) { + return 1; + } else if (e1->pos > e2->pos) { + return -1; + } else { + return 0; + } + } +} + static int cmp_edges(const void *d1, const void *d2) { const edge_t *e1 = (const edge_t*)d1; const edge_t *e2 = (const edge_t*)d2; - - return QSORT_CMP(e2->execfreq, e1->execfreq); + double freq1 = e1->execfreq; + double freq2 = e2->execfreq; + if (freq1 < freq2) { + return 1; + } else if (freq1 > freq2) { + return -1; + } else { + return cmp_edges_base(e1, e2); + } } static int cmp_edges_outedge_penalty(const void *d1, const void *d2) { - const edge_t *e1 = (const edge_t*)d1; - const edge_t *e2 = (const edge_t*)d2; - /* reverse sorting as penalties are negative */ - return QSORT_CMP(e1->outedge_penalty_freq, e2->outedge_penalty_freq); + const edge_t *e1 = (const edge_t*)d1; + const edge_t *e2 = (const edge_t*)d2; + double pen1 = e1->outedge_penalty_freq; + double pen2 = e2->outedge_penalty_freq; + if (pen1 > pen2) { + return 1; + } else if (pen1 < pen2) { + return -1; + } else { + return cmp_edges_base(e1, e2); + } } static void clear_loop_links(ir_loop *loop) @@ -260,8 +293,8 @@ static void coalesce_blocks(blocksched_env_t *env) continue; pred_block = get_Block_cfgpred_block(block, pos); - entry = (blocksched_entry_t*)get_irn_link(block); - pred_entry = (blocksched_entry_t*)get_irn_link(pred_block); + entry = get_blocksched_entry(block); + pred_entry = get_blocksched_entry(pred_block); if (pred_entry->next != NULL || entry->prev != NULL) continue; @@ -299,8 +332,8 @@ static void coalesce_blocks(blocksched_env_t *env) continue; pred_block = get_Block_cfgpred_block(block, pos); - entry = (blocksched_entry_t*)get_irn_link(block); - pred_entry = (blocksched_entry_t*)get_irn_link(pred_block); + entry = get_blocksched_entry(block); + pred_entry = get_blocksched_entry(pred_block); if (pred_entry->next != NULL || entry->prev != NULL) continue; @@ -341,8 +374,8 @@ static void coalesce_blocks(blocksched_env_t *env) continue; pred_block = get_Block_cfgpred_block(block, pos); - entry = (blocksched_entry_t*)get_irn_link(block); - pred_entry = (blocksched_entry_t*)get_irn_link(pred_block); + entry = get_blocksched_entry(block); + pred_entry = get_blocksched_entry(pred_block); /* is 1 of the blocks already attached to another block? */ if (pred_entry->next != NULL || entry->prev != NULL) @@ -361,8 +394,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en ir_node *block = entry->block; ir_node *succ = NULL; blocksched_entry_t *succ_entry; - const ir_edge_t *edge; - double best_succ_execfreq; + double best_succ_execfreq; if (irn_visited_else_mark(block)) return; @@ -380,7 +412,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en /* we only need to put the first of a series of already connected * blocks into the worklist */ - succ_entry = (blocksched_entry_t*)get_irn_link(succ_block); + succ_entry = get_blocksched_entry(succ_block); while (succ_entry->prev != NULL) { /* break cycles... */ if (succ_entry->prev->block == succ_block) { @@ -389,7 +421,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en break; } succ_entry = succ_entry->prev; - }; + } if (irn_visited(succ_entry->block)) continue; @@ -411,16 +443,15 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en foreach_block_succ(block, edge) { ir_node *succ_block = get_edge_src_irn(edge); - double execfreq; if (irn_visited(succ_block)) continue; - succ_entry = (blocksched_entry_t*)get_irn_link(succ_block); + succ_entry = get_blocksched_entry(succ_block); if (succ_entry->prev != NULL) continue; - execfreq = get_block_execfreq(env->execfreqs, succ_block); + double execfreq = get_block_execfreq(succ_block); if (execfreq > best_succ_execfreq) { best_succ_execfreq = execfreq; succ = succ_block; @@ -439,7 +470,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en } while (irn_visited(succ)); } - succ_entry = (blocksched_entry_t*)get_irn_link(succ); + succ_entry = get_blocksched_entry(succ); entry->next = succ_entry; succ_entry->prev = entry; @@ -450,7 +481,7 @@ static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env) { ir_graph *irg = env->irg; ir_node *startblock = get_irg_start_block(irg); - blocksched_entry_t *entry = (blocksched_entry_t*)get_irn_link(startblock); + blocksched_entry_t *entry = get_blocksched_entry(startblock); ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED); inc_irg_visited(irg); @@ -486,7 +517,7 @@ static ir_node **create_blocksched_array(blocksched_env_t *env, blocksched_entry return block_list; } -static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execfreqs) +static ir_node **create_block_schedule_greedy(ir_graph *irg) { blocksched_env_t env; struct obstack obst; @@ -497,15 +528,11 @@ static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execf env.irg = irg; env.obst = &obst; - env.execfreqs = execfreqs; env.edges = NEW_ARR_F(edge_t, 0); env.worklist = NULL; env.blockcount = 0; - /* make sure loopinfo is up-to-date */ - if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) { - construct_cf_backedges(irg); - } + assure_loopinfo(irg); // collect edge execution frequencies irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env); @@ -534,7 +561,6 @@ static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execf * */ -#ifdef WITH_ILP typedef struct ilp_edge_t { ir_node *block; /**< source block */ int pos; /**< number of cfg predecessor (target) */ @@ -573,11 +599,10 @@ static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp static void collect_egde_frequency_ilp(ir_node *block, void *data) { - blocksched_ilp_env_t *env = data; + blocksched_ilp_env_t *env = (blocksched_ilp_env_t*)data; ir_graph *irg = env->env.irg; ir_node *startblock = get_irg_start_block(irg); int arity; - lpp_cst_t cst; char name[64]; int out_count; blocksched_ilp_entry_t *entry; @@ -589,7 +614,7 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data) entry->block = block; entry->next = NULL; entry->prev = NULL; - entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1); + entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, out_count - 1); set_irn_link(block, entry); if (block == startblock) @@ -597,14 +622,15 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data) arity = get_irn_arity(block); if (arity == 1) { - double execfreq = get_block_execfreq(env->env.execfreqs, block); + double execfreq = get_block_execfreq(block); add_ilp_edge(block, 0, execfreq, env); } else { int i; + int cst_idx; snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block)); - cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1); + cst_idx = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1); for (i = 0; i < arity; ++i) { double execfreq; @@ -612,19 +638,22 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data) ilp_edge_t *edge; ir_node *pred_block = get_Block_cfgpred_block(block, i); - execfreq = get_block_execfreq(env->env.execfreqs, pred_block); + execfreq = get_block_execfreq(pred_block); edgenum = add_ilp_edge(block, i, execfreq, env); edge = &env->ilpedges[edgenum]; - lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0); + lpp_set_factor_fast(env->lpp, cst_idx, edge->ilpvar, 1.0); } } } +static blocksched_ilp_entry_t *get_blocksched_ilp_entry(const ir_node *block) +{ + return (blocksched_ilp_entry_t*)get_irn_link(block); +} static void coalesce_blocks_ilp(blocksched_ilp_env_t *env) { int edge_count = ARR_LEN(env->ilpedges); - be_options_t *options = be_get_irg_options(env->env.irg); int i; /* complete out constraints */ @@ -639,14 +668,14 @@ static void coalesce_blocks_ilp(blocksched_ilp_env_t *env) continue; pred = get_Block_cfgpred_block(block, edge->pos); - entry = get_irn_link(pred); + entry = get_blocksched_ilp_entry(pred); DB((dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n", pred, block, edge->pos)); lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0); } - lpp_solve_net(env->lpp, options->ilp_server, options->ilp_solver); + lpp_solve_net(env->lpp, be_options.ilp_server, be_options.ilp_solver); assert(lpp_is_sol_valid(env->lpp)); /* Apply results to edges */ @@ -667,8 +696,8 @@ static void coalesce_blocks_ilp(blocksched_ilp_env_t *env) continue; pred = get_Block_cfgpred_block(block, edge->pos); - entry = get_irn_link(block); - pred_entry = get_irn_link(pred); + entry = get_blocksched_entry(block); + pred_entry = get_blocksched_entry(pred); assert(entry->prev == NULL && pred_entry->next == NULL); entry->prev = pred_entry; @@ -676,7 +705,7 @@ static void coalesce_blocks_ilp(blocksched_ilp_env_t *env) } } -static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreqs) +static ir_node **create_block_schedule_ilp(ir_graph *irg) { blocksched_ilp_env_t env; struct obstack obst; @@ -687,12 +716,11 @@ static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreq env.env.irg = irg; env.env.obst = &obst; - env.env.execfreqs = execfreqs; env.env.worklist = NULL; env.env.blockcount = 0; env.ilpedges = NEW_ARR_F(ilp_edge_t, 0); - env.lpp = new_lpp("blockschedule", lpp_minimize); + env.lpp = lpp_new("blockschedule", lpp_minimize); lpp_set_time_limit(env.lpp, 20); lpp_set_log(env.lpp, stdout); @@ -707,12 +735,11 @@ static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreq be_get_be_obst(irg)); DEL_ARR_F(env.ilpedges); - free_lpp(env.lpp); + lpp_free(env.lpp); obstack_free(&obst, NULL); return block_list; } -#endif /* WITH_ILP */ /* * __ __ _ @@ -722,7 +749,7 @@ static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreq * |_| |_|\__,_|_|_| |_| * */ -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched); +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched) void be_init_blocksched(void) { lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); @@ -734,16 +761,12 @@ void be_init_blocksched(void) ir_node **be_create_block_schedule(ir_graph *irg) { - ir_exec_freq *execfreqs = be_get_irg_exec_freq(irg); - switch (algo) { case BLOCKSCHED_GREEDY: case BLOCKSCHED_NAIV: - return create_block_schedule_greedy(irg, execfreqs); -#ifdef WITH_ILP + return create_block_schedule_greedy(irg); case BLOCKSCHED_ILP: - return create_block_schedule_ilp(irg, execfreqs); -#endif /* WITH_ILP */ + return create_block_schedule_ilp(irg); } panic("unknown blocksched algo");