X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeblocksched.c;h=d783a8922080fe64cfac9b157bed263db104bae5;hb=b2008e0b7c77ead1540a4793d235fafb6fcff873;hp=7c0ce9f5493f3d69eb837e61170e54c2cd38efa4;hpb=5d6cfd12693c34b702aab148038ebb562f2695f9;p=libfirm diff --git a/ir/be/beblocksched.c b/ir/be/beblocksched.c index 7c0ce9f54..d783a8922 100644 --- a/ir/be/beblocksched.c +++ b/ir/be/beblocksched.c @@ -22,7 +22,6 @@ * @brief Block-scheduling strategies. * @author Matthias Braun, Christoph Mallon * @date 27.09.2006 - * @version $Id$ * * The goals of the greedy (and ILP) algorithm here works by assuming that * we want to change as many jumps to fallthroughs as possible (executed jumps @@ -52,6 +51,7 @@ #include "execfreq.h" #include "irdump_t.h" #include "irtools.h" +#include "util.h" #include "debug.h" #include "beirgmod.h" #include "bemodule.h" @@ -61,25 +61,21 @@ #include "lc_opts.h" #include "lc_opts_enum.h" -#ifdef WITH_ILP -#include -#include -#endif /* WITH_ILP */ +#include "lpp.h" +#include "lpp_net.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) -typedef enum _blocksched_algos_t { +typedef enum blocksched_algos_t { BLOCKSCHED_NAIV, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP } blocksched_algos_t; static int algo = BLOCKSCHED_GREEDY; static const lc_opt_enum_int_items_t blockschedalgo_items[] = { - { "naiv", BLOCKSCHED_NAIV }, + { "naiv", BLOCKSCHED_NAIV }, { "greedy", BLOCKSCHED_GREEDY }, -#ifdef WITH_ILP { "ilp", BLOCKSCHED_ILP }, -#endif /* WITH_ILP */ { NULL, 0 } }; @@ -88,7 +84,7 @@ static lc_opt_enum_int_var_t algo_var = { }; static const lc_opt_table_entry_t be_blocksched_options[] = { - LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var), + LC_OPT_ENT_ENUM_INT ("blockscheduler", "the block scheduling algorithm", &algo_var), LC_OPT_LAST }; @@ -137,7 +133,7 @@ struct blocksched_env_t { */ static void collect_egde_frequency(ir_node *block, void *data) { - blocksched_env_t *env = data; + blocksched_env_t *env = (blocksched_env_t*)data; int arity; edge_t edge; blocksched_entry_t *entry; @@ -199,23 +195,23 @@ static void collect_egde_frequency(ir_node *block, void *data) } } - if(highest_edge_num >= 0) + if (highest_edge_num >= 0) env->edges[highest_edge_num].highest_execfreq = 1; } } static int cmp_edges(const void *d1, const void *d2) { - const edge_t *e1 = d1; - const edge_t *e2 = d2; + const edge_t *e1 = (const edge_t*)d1; + const edge_t *e2 = (const edge_t*)d2; return QSORT_CMP(e2->execfreq, e1->execfreq); } static int cmp_edges_outedge_penalty(const void *d1, const void *d2) { - const edge_t *e1 = d1; - const edge_t *e2 = d2; + const edge_t *e1 = (const edge_t*)d1; + const edge_t *e2 = (const edge_t*)d2; /* reverse sorting as penalties are negative */ return QSORT_CMP(e1->outedge_penalty_freq, e2->outedge_penalty_freq); } @@ -260,8 +256,8 @@ static void coalesce_blocks(blocksched_env_t *env) continue; pred_block = get_Block_cfgpred_block(block, pos); - entry = get_irn_link(block); - pred_entry = get_irn_link(pred_block); + entry = (blocksched_entry_t*)get_irn_link(block); + pred_entry = (blocksched_entry_t*)get_irn_link(pred_block); if (pred_entry->next != NULL || entry->prev != NULL) continue; @@ -299,8 +295,8 @@ static void coalesce_blocks(blocksched_env_t *env) continue; pred_block = get_Block_cfgpred_block(block, pos); - entry = get_irn_link(block); - pred_entry = get_irn_link(pred_block); + entry = (blocksched_entry_t*)get_irn_link(block); + pred_entry = (blocksched_entry_t*)get_irn_link(pred_block); if (pred_entry->next != NULL || entry->prev != NULL) continue; @@ -341,8 +337,8 @@ static void coalesce_blocks(blocksched_env_t *env) continue; pred_block = get_Block_cfgpred_block(block, pos); - entry = get_irn_link(block); - pred_entry = get_irn_link(pred_block); + entry = (blocksched_entry_t*)get_irn_link(block); + pred_entry = (blocksched_entry_t*)get_irn_link(pred_block); /* is 1 of the blocks already attached to another block? */ if (pred_entry->next != NULL || entry->prev != NULL) @@ -380,7 +376,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en /* we only need to put the first of a series of already connected * blocks into the worklist */ - succ_entry = get_irn_link(succ_block); + succ_entry = (blocksched_entry_t*)get_irn_link(succ_block); while (succ_entry->prev != NULL) { /* break cycles... */ if (succ_entry->prev->block == succ_block) { @@ -389,7 +385,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en break; } succ_entry = succ_entry->prev; - }; + } if (irn_visited(succ_entry->block)) continue; @@ -416,7 +412,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en if (irn_visited(succ_block)) continue; - succ_entry = get_irn_link(succ_block); + succ_entry = (blocksched_entry_t*)get_irn_link(succ_block); if (succ_entry->prev != NULL) continue; @@ -435,11 +431,11 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en DB((dbg, LEVEL_1, "worklist empty\n")); return; } - succ = pdeq_getl(env->worklist); + succ = (ir_node*)pdeq_getl(env->worklist); } while (irn_visited(succ)); } - succ_entry = get_irn_link(succ); + succ_entry = (blocksched_entry_t*)get_irn_link(succ); entry->next = succ_entry; succ_entry->prev = entry; @@ -450,7 +446,7 @@ static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env) { ir_graph *irg = env->irg; ir_node *startblock = get_irg_start_block(irg); - blocksched_entry_t *entry = get_irn_link(startblock); + blocksched_entry_t *entry = (blocksched_entry_t*)get_irn_link(startblock); ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED); inc_irg_visited(irg); @@ -502,10 +498,7 @@ static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execf env.worklist = NULL; env.blockcount = 0; - /* make sure loopinfo is up-to-date */ - if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) { - construct_cf_backedges(irg); - } + assure_loopinfo(irg); // collect edge execution frequencies irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env); @@ -517,7 +510,7 @@ static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execf start_entry = finish_block_schedule(&env); block_list = create_blocksched_array(&env, start_entry, env.blockcount, - be_get_birg_obst(irg)); + be_get_be_obst(irg)); DEL_ARR_F(env.edges); obstack_free(&obst, NULL); @@ -534,23 +527,22 @@ static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execf * */ -#ifdef WITH_ILP -typedef struct _ilp_edge_t { +typedef struct ilp_edge_t { ir_node *block; /**< source block */ int pos; /**< number of cfg predecessor (target) */ int ilpvar; } ilp_edge_t; -typedef struct _blocksched_ilp_env_t { +typedef struct blocksched_ilp_env_t { blocksched_env_t env; ilp_edge_t *ilpedges; lpp_t *lpp; } blocksched_ilp_env_t; -typedef struct _blocksched_ilp_entry_t { +typedef struct blocksched_ilp_entry_t { ir_node *block; - struct _blocksched_entry_t *next; - struct _blocksched_entry_t *prev; + struct blocksched_entry_t *next; + struct blocksched_entry_t *prev; int out_cst; } blocksched_ilp_entry_t; @@ -589,7 +581,7 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data) entry->block = block; entry->next = NULL; entry->prev = NULL; - entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1); + entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, out_count - 1); set_irn_link(block, entry); if (block == startblock) @@ -604,7 +596,7 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data) int i; snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block)); - cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1); + cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1); for (i = 0; i < arity; ++i) { double execfreq; @@ -623,11 +615,11 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data) static void coalesce_blocks_ilp(blocksched_ilp_env_t *env) { - int i; - int edge_count = ARR_LEN(env->ilpedges); + int edge_count = ARR_LEN(env->ilpedges); + int i; /* complete out constraints */ - for(i = 0; i < edge_count; ++i) { + for (i = 0; i < edge_count; ++i) { const ilp_edge_t *edge = &env->ilpedges[i]; ir_node *block = edge->block; ir_node *pred; @@ -645,20 +637,7 @@ static void coalesce_blocks_ilp(blocksched_ilp_env_t *env) lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0); } -#if 0 - { - FILE *f; - char fname[256]; - lpp_dump(env->lpp, "lpp.out"); - snprintf(fname, sizeof(fname), "lpp_%s.plain", get_irg_dump_name(env->env.irg)); - f = fopen(fname, "w"); - lpp_dump_plain(env->lpp, f); - fclose(f); - } -#endif - - //lpp_solve_net(env->lpp, main_env->options->ilp_server, main_env->options->ilp_solver); - lpp_solve_net(env->lpp, "i44pc52", "cplex"); + lpp_solve_net(env->lpp, be_options.ilp_server, be_options.ilp_solver); assert(lpp_is_sol_valid(env->lpp)); /* Apply results to edges */ @@ -704,7 +683,7 @@ static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreq env.env.blockcount = 0; env.ilpedges = NEW_ARR_F(ilp_edge_t, 0); - env.lpp = new_lpp("blockschedule", lpp_minimize); + env.lpp = lpp_new("blockschedule", lpp_minimize); lpp_set_time_limit(env.lpp, 20); lpp_set_log(env.lpp, stdout); @@ -716,15 +695,14 @@ static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreq start_entry = finish_block_schedule(&env.env); block_list = create_blocksched_array(&env.env, start_entry, env.env.blockcount, - be_get_birg_obst(irg)); + be_get_be_obst(irg)); DEL_ARR_F(env.ilpedges); - free_lpp(env.lpp); + lpp_free(env.lpp); obstack_free(&obst, NULL); return block_list; } -#endif /* WITH_ILP */ /* * __ __ _ @@ -734,30 +712,27 @@ static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreq * |_| |_|\__,_|_|_| |_| * */ +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched) void be_init_blocksched(void) { lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); - lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched"); - lc_opt_add_table(blocksched_grp, be_blocksched_options); + lc_opt_add_table(be_grp, be_blocksched_options); FIRM_DBG_REGISTER(dbg, "firm.be.blocksched"); } -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched); - -ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs) +ir_node **be_create_block_schedule(ir_graph *irg) { - switch(algo) { + ir_exec_freq *execfreqs = be_get_irg_exec_freq(irg); + + switch (algo) { case BLOCKSCHED_GREEDY: case BLOCKSCHED_NAIV: return create_block_schedule_greedy(irg, execfreqs); -#ifdef WITH_ILP case BLOCKSCHED_ILP: return create_block_schedule_ilp(irg, execfreqs); -#endif /* WITH_ILP */ } panic("unknown blocksched algo"); - return NULL; }