* @brief Block-scheduling strategies.
* @author Matthias Braun, Christoph Mallon
* @date 27.09.2006
- * @version $Id$
*
* The goals of the greedy (and ILP) algorithm here works by assuming that
* we want to change as many jumps to fallthroughs as possible (executed jumps
#include "execfreq.h"
#include "irdump_t.h"
#include "irtools.h"
+#include "util.h"
#include "debug.h"
#include "beirgmod.h"
#include "bemodule.h"
#include "lc_opts.h"
#include "lc_opts_enum.h"
-#ifdef WITH_ILP
-#include <lpp/lpp.h>
-#include <lpp/lpp_net.h>
-#endif /* WITH_ILP */
+#include "lpp.h"
+#include "lpp_net.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-typedef enum _blocksched_algos_t {
+typedef enum blocksched_algos_t {
BLOCKSCHED_NAIV, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP
} blocksched_algos_t;
static int algo = BLOCKSCHED_GREEDY;
static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
- { "naiv", BLOCKSCHED_NAIV },
+ { "naiv", BLOCKSCHED_NAIV },
{ "greedy", BLOCKSCHED_GREEDY },
-#ifdef WITH_ILP
{ "ilp", BLOCKSCHED_ILP },
-#endif /* WITH_ILP */
{ NULL, 0 }
};
};
static const lc_opt_table_entry_t be_blocksched_options[] = {
- LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
+ LC_OPT_ENT_ENUM_INT ("blockscheduler", "the block scheduling algorithm", &algo_var),
LC_OPT_LAST
};
int blockcount;
};
+static blocksched_entry_t* get_blocksched_entry(const ir_node *block)
+{
+ return (blocksched_entry_t*)get_irn_link(block);
+}
+
/**
* Collect cfg frequencies of all edges between blocks.
* Also determines edge with highest frequency.
*/
static void collect_egde_frequency(ir_node *block, void *data)
{
- blocksched_env_t *env = data;
+ blocksched_env_t *env = (blocksched_env_t*)data;
int arity;
edge_t edge;
blocksched_entry_t *entry;
}
}
-static int cmp_edges(const void *d1, const void *d2)
+static int cmp_edges_base(const edge_t *e1, const edge_t *e2)
{
- const edge_t *e1 = d1;
- const edge_t *e2 = d2;
+ long nr1 = get_irn_node_nr(e1->block);
+ long nr2 = get_irn_node_nr(e2->block);
+ if (nr1 < nr2) {
+ return 1;
+ } else if (nr1 > nr2) {
+ return -1;
+ } else {
+ if (e1->pos < e2->pos) {
+ return 1;
+ } else if (e1->pos > e2->pos) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+}
- return QSORT_CMP(e2->execfreq, e1->execfreq);
+static int cmp_edges(const void *d1, const void *d2)
+{
+ const edge_t *e1 = (const edge_t*)d1;
+ const edge_t *e2 = (const edge_t*)d2;
+ double freq1 = e1->execfreq;
+ double freq2 = e2->execfreq;
+ if (freq1 < freq2) {
+ return 1;
+ } else if (freq1 > freq2) {
+ return -1;
+ } else {
+ return cmp_edges_base(e1, e2);
+ }
}
static int cmp_edges_outedge_penalty(const void *d1, const void *d2)
{
- const edge_t *e1 = d1;
- const edge_t *e2 = d2;
- /* reverse sorting as penalties are negative */
- return QSORT_CMP(e1->outedge_penalty_freq, e2->outedge_penalty_freq);
+ const edge_t *e1 = (const edge_t*)d1;
+ const edge_t *e2 = (const edge_t*)d2;
+ double pen1 = e1->outedge_penalty_freq;
+ double pen2 = e2->outedge_penalty_freq;
+ if (pen1 > pen2) {
+ return 1;
+ } else if (pen1 < pen2) {
+ return -1;
+ } else {
+ return cmp_edges_base(e1, e2);
+ }
}
static void clear_loop_links(ir_loop *loop)
continue;
pred_block = get_Block_cfgpred_block(block, pos);
- entry = get_irn_link(block);
- pred_entry = get_irn_link(pred_block);
+ entry = get_blocksched_entry(block);
+ pred_entry = get_blocksched_entry(pred_block);
if (pred_entry->next != NULL || entry->prev != NULL)
continue;
continue;
pred_block = get_Block_cfgpred_block(block, pos);
- entry = get_irn_link(block);
- pred_entry = get_irn_link(pred_block);
+ entry = get_blocksched_entry(block);
+ pred_entry = get_blocksched_entry(pred_block);
if (pred_entry->next != NULL || entry->prev != NULL)
continue;
continue;
pred_block = get_Block_cfgpred_block(block, pos);
- entry = get_irn_link(block);
- pred_entry = get_irn_link(pred_block);
+ entry = get_blocksched_entry(block);
+ pred_entry = get_blocksched_entry(pred_block);
/* is 1 of the blocks already attached to another block? */
if (pred_entry->next != NULL || entry->prev != NULL)
ir_node *block = entry->block;
ir_node *succ = NULL;
blocksched_entry_t *succ_entry;
- const ir_edge_t *edge;
- double best_succ_execfreq;
+ double best_succ_execfreq;
if (irn_visited_else_mark(block))
return;
/* we only need to put the first of a series of already connected
* blocks into the worklist */
- succ_entry = get_irn_link(succ_block);
+ succ_entry = get_blocksched_entry(succ_block);
while (succ_entry->prev != NULL) {
/* break cycles... */
if (succ_entry->prev->block == succ_block) {
break;
}
succ_entry = succ_entry->prev;
- };
+ }
if (irn_visited(succ_entry->block))
continue;
if (irn_visited(succ_block))
continue;
- succ_entry = get_irn_link(succ_block);
+ succ_entry = get_blocksched_entry(succ_block);
if (succ_entry->prev != NULL)
continue;
DB((dbg, LEVEL_1, "worklist empty\n"));
return;
}
- succ = pdeq_getl(env->worklist);
+ succ = (ir_node*)pdeq_getl(env->worklist);
} while (irn_visited(succ));
}
- succ_entry = get_irn_link(succ);
+ succ_entry = get_blocksched_entry(succ);
entry->next = succ_entry;
succ_entry->prev = entry;
{
ir_graph *irg = env->irg;
ir_node *startblock = get_irg_start_block(irg);
- blocksched_entry_t *entry = get_irn_link(startblock);
+ blocksched_entry_t *entry = get_blocksched_entry(startblock);
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
env.worklist = NULL;
env.blockcount = 0;
- /* make sure loopinfo is up-to-date */
- if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
- construct_cf_backedges(irg);
- }
+ assure_loopinfo(irg);
// collect edge execution frequencies
irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
start_entry = finish_block_schedule(&env);
block_list = create_blocksched_array(&env, start_entry, env.blockcount,
- be_get_birg_obst(irg));
+ be_get_be_obst(irg));
DEL_ARR_F(env.edges);
obstack_free(&obst, NULL);
*
*/
-#ifdef WITH_ILP
-typedef struct _ilp_edge_t {
+typedef struct ilp_edge_t {
ir_node *block; /**< source block */
int pos; /**< number of cfg predecessor (target) */
int ilpvar;
} ilp_edge_t;
-typedef struct _blocksched_ilp_env_t {
+typedef struct blocksched_ilp_env_t {
blocksched_env_t env;
ilp_edge_t *ilpedges;
lpp_t *lpp;
} blocksched_ilp_env_t;
-typedef struct _blocksched_ilp_entry_t {
+typedef struct blocksched_ilp_entry_t {
ir_node *block;
- struct _blocksched_entry_t *next;
- struct _blocksched_entry_t *prev;
+ struct blocksched_entry_t *next;
+ struct blocksched_entry_t *prev;
int out_cst;
} blocksched_ilp_entry_t;
static void collect_egde_frequency_ilp(ir_node *block, void *data)
{
- blocksched_ilp_env_t *env = data;
+ blocksched_ilp_env_t *env = (blocksched_ilp_env_t*)data;
ir_graph *irg = env->env.irg;
ir_node *startblock = get_irg_start_block(irg);
int arity;
- lpp_cst_t cst;
char name[64];
int out_count;
blocksched_ilp_entry_t *entry;
entry->block = block;
entry->next = NULL;
entry->prev = NULL;
- entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1);
+ entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, out_count - 1);
set_irn_link(block, entry);
if (block == startblock)
}
else {
int i;
+ int cst_idx;
snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
- cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1);
+ cst_idx = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1);
for (i = 0; i < arity; ++i) {
double execfreq;
execfreq = get_block_execfreq(env->env.execfreqs, pred_block);
edgenum = add_ilp_edge(block, i, execfreq, env);
edge = &env->ilpedges[edgenum];
- lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0);
+ lpp_set_factor_fast(env->lpp, cst_idx, edge->ilpvar, 1.0);
}
}
}
+static blocksched_ilp_entry_t *get_blocksched_ilp_entry(const ir_node *block)
+{
+ return (blocksched_ilp_entry_t*)get_irn_link(block);
+}
static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
{
- int i;
- int edge_count = ARR_LEN(env->ilpedges);
+ int edge_count = ARR_LEN(env->ilpedges);
+ int i;
/* complete out constraints */
for (i = 0; i < edge_count; ++i) {
continue;
pred = get_Block_cfgpred_block(block, edge->pos);
- entry = get_irn_link(pred);
+ entry = get_blocksched_ilp_entry(pred);
DB((dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n",
pred, block, edge->pos));
lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
}
-#if 0
- {
- FILE *f;
- char fname[256];
- lpp_dump(env->lpp, "lpp.out");
- snprintf(fname, sizeof(fname), "lpp_%s.plain", get_irg_dump_name(env->env.irg));
- f = fopen(fname, "w");
- lpp_dump_plain(env->lpp, f);
- fclose(f);
- }
-#endif
-
- //lpp_solve_net(env->lpp, main_env->options->ilp_server, main_env->options->ilp_solver);
- lpp_solve_net(env->lpp, "i44pc52", "cplex");
+ lpp_solve_net(env->lpp, be_options.ilp_server, be_options.ilp_solver);
assert(lpp_is_sol_valid(env->lpp));
/* Apply results to edges */
continue;
pred = get_Block_cfgpred_block(block, edge->pos);
- entry = get_irn_link(block);
- pred_entry = get_irn_link(pred);
+ entry = get_blocksched_entry(block);
+ pred_entry = get_blocksched_entry(pred);
assert(entry->prev == NULL && pred_entry->next == NULL);
entry->prev = pred_entry;
env.env.blockcount = 0;
env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
- env.lpp = new_lpp("blockschedule", lpp_minimize);
+ env.lpp = lpp_new("blockschedule", lpp_minimize);
lpp_set_time_limit(env.lpp, 20);
lpp_set_log(env.lpp, stdout);
start_entry = finish_block_schedule(&env.env);
block_list = create_blocksched_array(&env.env, start_entry,
env.env.blockcount,
- be_get_birg_obst(irg));
+ be_get_be_obst(irg));
DEL_ARR_F(env.ilpedges);
- free_lpp(env.lpp);
+ lpp_free(env.lpp);
obstack_free(&obst, NULL);
return block_list;
}
-#endif /* WITH_ILP */
/*
* __ __ _
* |_| |_|\__,_|_|_| |_|
*
*/
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched)
void be_init_blocksched(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
- lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
- lc_opt_add_table(blocksched_grp, be_blocksched_options);
+ lc_opt_add_table(be_grp, be_blocksched_options);
FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched);
-
-ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
+ir_node **be_create_block_schedule(ir_graph *irg)
{
+ ir_exec_freq *execfreqs = be_get_irg_exec_freq(irg);
+
switch (algo) {
case BLOCKSCHED_GREEDY:
case BLOCKSCHED_NAIV:
return create_block_schedule_greedy(irg, execfreqs);
-#ifdef WITH_ILP
case BLOCKSCHED_ILP:
return create_block_schedule_ilp(irg, execfreqs);
-#endif /* WITH_ILP */
}
panic("unknown blocksched algo");
- return NULL;
}