* @brief Block-scheduling strategies.
* @author Matthias Braun, Christoph Mallon
* @date 27.09.2006
- * @version $Id$
*
* The goals of the greedy (and ILP) algorithm here works by assuming that
* we want to change as many jumps to fallthroughs as possible (executed jumps
#include "execfreq.h"
#include "irdump_t.h"
#include "irtools.h"
+#include "util.h"
#include "debug.h"
#include "beirgmod.h"
#include "bemodule.h"
#include "lc_opts.h"
#include "lc_opts_enum.h"
-#ifdef WITH_ILP
-#include <lpp/lpp.h>
-#include <lpp/lpp_net.h>
-#endif /* WITH_ILP */
+#include "lpp.h"
+#include "lpp_net.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
{ "naiv", BLOCKSCHED_NAIV },
{ "greedy", BLOCKSCHED_GREEDY },
-#ifdef WITH_ILP
{ "ilp", BLOCKSCHED_ILP },
-#endif /* WITH_ILP */
{ NULL, 0 }
};
};
static const lc_opt_table_entry_t be_blocksched_options[] = {
- LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
+ LC_OPT_ENT_ENUM_INT ("blockscheduler", "the block scheduling algorithm", &algo_var),
LC_OPT_LAST
};
*/
static void collect_egde_frequency(ir_node *block, void *data)
{
- blocksched_env_t *env = data;
+ blocksched_env_t *env = (blocksched_env_t*)data;
int arity;
edge_t edge;
blocksched_entry_t *entry;
static int cmp_edges(const void *d1, const void *d2)
{
- const edge_t *e1 = d1;
- const edge_t *e2 = d2;
+ const edge_t *e1 = (const edge_t*)d1;
+ const edge_t *e2 = (const edge_t*)d2;
return QSORT_CMP(e2->execfreq, e1->execfreq);
}
static int cmp_edges_outedge_penalty(const void *d1, const void *d2)
{
- const edge_t *e1 = d1;
- const edge_t *e2 = d2;
+ const edge_t *e1 = (const edge_t*)d1;
+ const edge_t *e2 = (const edge_t*)d2;
/* reverse sorting as penalties are negative */
return QSORT_CMP(e1->outedge_penalty_freq, e2->outedge_penalty_freq);
}
continue;
pred_block = get_Block_cfgpred_block(block, pos);
- entry = get_irn_link(block);
- pred_entry = get_irn_link(pred_block);
+ entry = (blocksched_entry_t*)get_irn_link(block);
+ pred_entry = (blocksched_entry_t*)get_irn_link(pred_block);
if (pred_entry->next != NULL || entry->prev != NULL)
continue;
continue;
pred_block = get_Block_cfgpred_block(block, pos);
- entry = get_irn_link(block);
- pred_entry = get_irn_link(pred_block);
+ entry = (blocksched_entry_t*)get_irn_link(block);
+ pred_entry = (blocksched_entry_t*)get_irn_link(pred_block);
if (pred_entry->next != NULL || entry->prev != NULL)
continue;
continue;
pred_block = get_Block_cfgpred_block(block, pos);
- entry = get_irn_link(block);
- pred_entry = get_irn_link(pred_block);
+ entry = (blocksched_entry_t*)get_irn_link(block);
+ pred_entry = (blocksched_entry_t*)get_irn_link(pred_block);
/* is 1 of the blocks already attached to another block? */
if (pred_entry->next != NULL || entry->prev != NULL)
/* we only need to put the first of a series of already connected
* blocks into the worklist */
- succ_entry = get_irn_link(succ_block);
+ succ_entry = (blocksched_entry_t*)get_irn_link(succ_block);
while (succ_entry->prev != NULL) {
/* break cycles... */
if (succ_entry->prev->block == succ_block) {
break;
}
succ_entry = succ_entry->prev;
- };
+ }
if (irn_visited(succ_entry->block))
continue;
if (irn_visited(succ_block))
continue;
- succ_entry = get_irn_link(succ_block);
+ succ_entry = (blocksched_entry_t*)get_irn_link(succ_block);
if (succ_entry->prev != NULL)
continue;
DB((dbg, LEVEL_1, "worklist empty\n"));
return;
}
- succ = pdeq_getl(env->worklist);
+ succ = (ir_node*)pdeq_getl(env->worklist);
} while (irn_visited(succ));
}
- succ_entry = get_irn_link(succ);
+ succ_entry = (blocksched_entry_t*)get_irn_link(succ);
entry->next = succ_entry;
succ_entry->prev = entry;
{
ir_graph *irg = env->irg;
ir_node *startblock = get_irg_start_block(irg);
- blocksched_entry_t *entry = get_irn_link(startblock);
+ blocksched_entry_t *entry = (blocksched_entry_t*)get_irn_link(startblock);
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
env.worklist = NULL;
env.blockcount = 0;
- /* make sure loopinfo is up-to-date */
- if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
- construct_cf_backedges(irg);
- }
+ assure_loopinfo(irg);
// collect edge execution frequencies
irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
*
*/
-#ifdef WITH_ILP
typedef struct ilp_edge_t {
ir_node *block; /**< source block */
int pos; /**< number of cfg predecessor (target) */
entry->block = block;
entry->next = NULL;
entry->prev = NULL;
- entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1);
+ entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, out_count - 1);
set_irn_link(block, entry);
if (block == startblock)
int i;
snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
- cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1);
+ cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1);
for (i = 0; i < arity; ++i) {
double execfreq;
env.env.blockcount = 0;
env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
- env.lpp = new_lpp("blockschedule", lpp_minimize);
+ env.lpp = lpp_new("blockschedule", lpp_minimize);
lpp_set_time_limit(env.lpp, 20);
lpp_set_log(env.lpp, stdout);
be_get_be_obst(irg));
DEL_ARR_F(env.ilpedges);
- free_lpp(env.lpp);
+ lpp_free(env.lpp);
obstack_free(&obst, NULL);
return block_list;
}
-#endif /* WITH_ILP */
/*
* __ __ _
* |_| |_|\__,_|_|_| |_|
*
*/
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched)
void be_init_blocksched(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
- lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
- lc_opt_add_table(blocksched_grp, be_blocksched_options);
+ lc_opt_add_table(be_grp, be_blocksched_options);
FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
}
case BLOCKSCHED_GREEDY:
case BLOCKSCHED_NAIV:
return create_block_schedule_greedy(irg, execfreqs);
-#ifdef WITH_ILP
case BLOCKSCHED_ILP:
return create_block_schedule_ilp(irg, execfreqs);
-#endif /* WITH_ILP */
}
panic("unknown blocksched algo");