* @brief Block-scheduling strategies.
* @author Matthias Braun, Christoph Mallon
* @date 27.09.2006
- * @version $Id$
*
* The goals of the greedy (and ILP) algorithm here works by assuming that
* we want to change as many jumps to fallthroughs as possible (executed jumps
#include "execfreq.h"
#include "irdump_t.h"
#include "irtools.h"
+#include "util.h"
#include "debug.h"
#include "beirgmod.h"
#include "bemodule.h"
#include "lc_opts.h"
#include "lc_opts_enum.h"
-#ifdef WITH_ILP
-#include <lpp/lpp.h>
-#include <lpp/lpp_net.h>
-#endif /* WITH_ILP */
+#include "lpp.h"
+#include "lpp_net.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
{ "naiv", BLOCKSCHED_NAIV },
{ "greedy", BLOCKSCHED_GREEDY },
-#ifdef WITH_ILP
{ "ilp", BLOCKSCHED_ILP },
-#endif /* WITH_ILP */
{ NULL, 0 }
};
};
static const lc_opt_table_entry_t be_blocksched_options[] = {
- LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
+ LC_OPT_ENT_ENUM_INT ("blockscheduler", "the block scheduling algorithm", &algo_var),
LC_OPT_LAST
};
}
}
+static int cmp_edges_base(const edge_t *e1, const edge_t *e2)
+{
+ long nr1 = get_irn_node_nr(e1->block);
+ long nr2 = get_irn_node_nr(e2->block);
+ if (nr1 < nr2) {
+ return 1;
+ } else if (nr1 > nr2) {
+ return -1;
+ } else {
+ if (e1->pos < e2->pos) {
+ return 1;
+ } else if (e1->pos > e2->pos) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+}
+
static int cmp_edges(const void *d1, const void *d2)
{
const edge_t *e1 = (const edge_t*)d1;
const edge_t *e2 = (const edge_t*)d2;
-
- return QSORT_CMP(e2->execfreq, e1->execfreq);
+ double freq1 = e1->execfreq;
+ double freq2 = e2->execfreq;
+ if (freq1 < freq2) {
+ return 1;
+ } else if (freq1 > freq2) {
+ return -1;
+ } else {
+ return cmp_edges_base(e1, e2);
+ }
}
static int cmp_edges_outedge_penalty(const void *d1, const void *d2)
{
- const edge_t *e1 = (const edge_t*)d1;
- const edge_t *e2 = (const edge_t*)d2;
- /* reverse sorting as penalties are negative */
- return QSORT_CMP(e1->outedge_penalty_freq, e2->outedge_penalty_freq);
+ const edge_t *e1 = (const edge_t*)d1;
+ const edge_t *e2 = (const edge_t*)d2;
+ double pen1 = e1->outedge_penalty_freq;
+ double pen2 = e2->outedge_penalty_freq;
+ if (pen1 > pen2) {
+ return 1;
+ } else if (pen1 < pen2) {
+ return -1;
+ } else {
+ return cmp_edges_base(e1, e2);
+ }
}
static void clear_loop_links(ir_loop *loop)
break;
}
succ_entry = succ_entry->prev;
- };
+ }
if (irn_visited(succ_entry->block))
continue;
env.worklist = NULL;
env.blockcount = 0;
- /* make sure loopinfo is up-to-date */
- if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
- construct_cf_backedges(irg);
- }
+ assure_loopinfo(irg);
// collect edge execution frequencies
irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
*
*/
-#ifdef WITH_ILP
typedef struct ilp_edge_t {
ir_node *block; /**< source block */
int pos; /**< number of cfg predecessor (target) */
entry->block = block;
entry->next = NULL;
entry->prev = NULL;
- entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1);
+ entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, out_count - 1);
set_irn_link(block, entry);
if (block == startblock)
int i;
snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
- cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1);
+ cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1);
for (i = 0; i < arity; ++i) {
double execfreq;
static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
{
int edge_count = ARR_LEN(env->ilpedges);
- be_options_t *options = be_get_irg_options(env->env.irg);
int i;
/* complete out constraints */
lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
}
- lpp_solve_net(env->lpp, options->ilp_server, options->ilp_solver);
+ lpp_solve_net(env->lpp, be_options.ilp_server, be_options.ilp_solver);
assert(lpp_is_sol_valid(env->lpp));
/* Apply results to edges */
env.env.blockcount = 0;
env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
- env.lpp = new_lpp("blockschedule", lpp_minimize);
+ env.lpp = lpp_new("blockschedule", lpp_minimize);
lpp_set_time_limit(env.lpp, 20);
lpp_set_log(env.lpp, stdout);
be_get_be_obst(irg));
DEL_ARR_F(env.ilpedges);
- free_lpp(env.lpp);
+ lpp_free(env.lpp);
obstack_free(&obst, NULL);
return block_list;
}
-#endif /* WITH_ILP */
/*
* __ __ _
* |_| |_|\__,_|_|_| |_|
*
*/
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched)
void be_init_blocksched(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
- lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
- lc_opt_add_table(blocksched_grp, be_blocksched_options);
+ lc_opt_add_table(be_grp, be_blocksched_options);
FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
}
case BLOCKSCHED_GREEDY:
case BLOCKSCHED_NAIV:
return create_block_schedule_greedy(irg, execfreqs);
-#ifdef WITH_ILP
case BLOCKSCHED_ILP:
return create_block_schedule_ilp(irg, execfreqs);
-#endif /* WITH_ILP */
}
panic("unknown blocksched algo");