static int cmp_call_arg(const void *a, const void *b, size_t n)
{
const be_abi_call_arg_t *p = a, *q = b;
+ (void) n;
return !(p->is_res == q->is_res && p->pos == q->pos);
}
static const lc_opt_table_entry_t be_blocksched_options[] = {
LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
- { NULL }
+ LC_OPT_ENT_NULL
};
/*
int i = 0;
ir_node **block_list;
blocksched_entry_t *entry;
+ (void) env;
block_list = NEW_ARR_D(ir_node *, obst, count);
DBG((dbg, LEVEL_1, "Blockschedule:\n"));
}
static void plotter_default_free(plotter_t *self) {
+ (void) self;
}
typedef struct {
{
int phi_arg = 0;
const ir_edge_t *edge;
+ (void) env;
+ (void) rel_bl;
foreach_out_edge(irn, edge)
phi_arg |= is_Phi(edge->src);
BE_CH_DUMP_NONE,
BE_CH_LOWER_PERM_SWAP,
BE_CH_VRFY_WARN,
+ "",
+ ""
};
typedef struct _post_spill_env_t {
LC_OPT_ENT_ENUM_PTR ("perm", "perm lowering options", &lower_perm_var),
LC_OPT_ENT_ENUM_MASK("dump", "select dump phases", &dump_var),
LC_OPT_ENT_ENUM_PTR ("vrfy", "verify options", &be_ch_vrfy_var),
- { NULL }
+ LC_OPT_ENT_NULL
};
static void dump(unsigned mask, ir_graph *irg,
LC_OPT_ENT_INT ("iter", "iterations for subtree nodes", &subtree_iter),
LC_OPT_ENT_DBL ("cf", "factor of constraint importance (between 0.0 and 1.0)", &constr_factor),
LC_OPT_ENT_INT ("max", "maximum recursion depth", &max_depth),
- { NULL }
+ LC_OPT_ENT_NULL
};
void be_init_copyheur2(void)
static const lc_opt_table_entry_t options[] = {
LC_OPT_ENT_ENUM_MASK("dump", "dump ifg cloud", &dump_var),
LC_OPT_ENT_INT ("dbg", "debug level for the Java coalescer", &dbg_level),
- { NULL }
+ LC_OPT_ENT_NULL
};
void be_init_copyheur3(void)
* Write a chunk to stderr for debugging.
*/
static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c) {
- int idx;
+ bitset_pos_t idx;
if (c->weight_consistent)
ir_fprintf(stderr, " $%d ", c->weight);
ir_fprintf(stderr, "{");
* Dump all admissible colors to stderr.
*/
static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node) {
- int idx;
+ bitset_pos_t idx;
+ (void) env;
+
if (bitset_popcnt(node->adm_colors) < 1)
fprintf(stderr, "no admissible colors?!?");
else {
* Always returns true.
*/
static int decider_always_yes(const co_mst_irn_t *node, int col) {
+ (void) node;
+ (void) col;
return 1;
}
* Check if affinity chunk @p chunk interferes with node @p irn.
*/
static INLINE int aff_chunk_interferes(co_mst_env_t *env, const aff_chunk_t *chunk, ir_node *irn) {
+ (void) env;
return bitset_is_set(chunk->interfere, get_irn_idx(irn));
}
be_ifg_foreach_node(ifg, iter, irn)
if (!sr_is_removed(ienv->sr, irn)) {
- int col, cst_idx;
+ bitset_pos_t col;
+ int cst_idx;
const arch_register_req_t *req;
int curr_node_color = get_irn_col(ienv->co, irn);
int node_nr = (int)get_irn_node_nr(irn);
bitset_foreach(colors, col) {
int var_idx = lpp_add_var(ienv->lp, name_cdd(buf, 'x', node_nr, col), lpp_binary, 0.0);
- lpp_set_start_value(ienv->lp, var_idx, (col == curr_node_color) ? 1.0 : 0.0);
+ lpp_set_start_value(ienv->lp, var_idx, (col == (unsigned) curr_node_color) ? 1.0 : 0.0);
lpp_set_factor_fast(ienv->lp, cst_idx, var_idx, 1);
lenv->last_x_var = var_idx;
static int compare_edge_t(const void *k1, const void *k2, size_t size) {
const edge_t *e1 = k1;
const edge_t *e2 = k2;
+ (void) size;
return ! (e1->n1 == e2->n1 && e1->n2 == e2->n2);
}
LC_OPT_ENT_ENUM_MASK ("style", "dump style for ifg dumping", &style_var),
LC_OPT_ENT_BOOL ("stats", "dump statistics after each optimization", &do_stats),
LC_OPT_ENT_BOOL ("improve", "run heur3 before if algo can exploit start solutions", &improve),
- { NULL }
+ LC_OPT_ENT_NULL
};
/* Insert additional options registration functions here. */
int cost = 0;
ir_loop *loop;
ir_node *root_block = get_nodes_block(root);
+ (void) co;
+ (void) arg;
if (is_Phi(root)) {
/* for phis the copies are placed in the corresponding pred-block */
int res;
ir_node *root_bl = get_nodes_block(root);
ir_node *copy_bl = is_Phi(root) ? get_Block_cfgpred_block(root_bl, pos) : root_bl;
+ (void) arg;
res = get_block_execfreq_ulong(co->cenv->birg->exec_freq, copy_bl);
/* don't allow values smaller than one. */
int co_get_costs_all_one(const copy_opt_t *co, ir_node *root, ir_node *arg, int pos) {
+ (void) co;
+ (void) root;
+ (void) arg;
+ (void) pos;
return 1;
}
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
bitset_t *curr;
- int max, pos, curr_weight, best_weight = 0;
+ bitset_pos_t pos;
+ int max, curr_weight, best_weight = 0;
/* assign the nodes into two groups.
* safe: node has no interference, hence it is in every max stable set.
static int compare_affinity_node_t(const void *k1, const void *k2, size_t size) {
const affinity_node_t *n1 = k1;
const affinity_node_t *n2 = k2;
+ (void) size;
return (n1->irn != n2->irn);
}
return res == 0 ? 1 : res;
#else
ir_loop *loop = get_irn_loop(bl);
+ (void) env;
if(loop) {
int d = get_loop_depth(loop);
return 1 + d * d;
static void *appel_clique_walker_irn_init(ir_phase *phase, ir_node *irn, void *old)
{
appel_block_info_t *res = NULL;
+ (void) old;
if(is_Block(irn)) {
appel_clique_walker_t *d = (void *) phase;
|_| |___/
*/
-static const char *get_dot_color_name(int col)
+static const char *get_dot_color_name(size_t col)
{
static const char *names[] = {
"blue",
static void ifg_dump_graph_attr(FILE *f, void *self)
{
+ (void) self;
fprintf(f, "overlap=scale");
}
void co_solve_park_moon(copy_opt_t *opt)
{
-
+ (void) opt;
}
static int void_algo(copy_opt_t *co)
{
+ (void) co;
return 0;
}
co_algo_t *algo_func;
int was_optimal = 0;
- if (algo < 0 || algo >= CO_ALGO_LAST)
+ if (algo >= CO_ALGO_LAST)
return;
be_liveness_assure_chk(be_get_birg_liveness(cenv->birg));
static void *regs_irn_data_init(ir_phase *ph, ir_node *irn, void *data)
{
coloring_t *coloring = (coloring_t *) ph;
+ (void) data;
+
return (void *) arch_get_irn_register(coloring->arch_env, irn);
}
static const lc_opt_table_entry_t be_ifg_options[] = {
LC_OPT_ENT_ENUM_PTR ("ifg", "interference graph flavour", &ifg_flavor_var),
- { NULL }
+ LC_OPT_ENT_NULL
};
void be_init_ifg(void)
static ir_node *ifg_clique_neighbours_next(const void *self, void *iter)
{
+ (void) self;
return get_next_neighbour(iter);
}
static void ifg_clique_neighbours_break(const void *self, void *iter)
{
cli_iter_t *it = iter;
+ (void) self;
bitset_free(it->visited_neighbours);
static ir_node *ifg_clique_nodes_next(const void *self, void *iter)
{
+ (void) self;
return get_next_node(iter);
}
static void ifg_clique_nodes_break(const void *self, void *iter)
{
cli_iter_t *it = iter;
+ (void) self;
bitset_free(it->visited_nodes);
static ir_node *ifg_list_nodes_next(const void *self, void *iter)
{
+ (void) self;
return get_next_node(iter);
}
static void ifg_list_nodes_break(const void *self, void *iter)
{
nodes_iter_t *it = iter;
+ (void) self;
it->curr_node_idx = 0;
it->ifg = NULL;
}
static ir_node *ifg_list_neighbours_next(const void *self, void *iter)
{
+ (void) self;
return get_next_neighbour(iter);
}
static void ifg_list_neighbours_break(const void *self, void *iter)
{
adj_iter_t *it= iter;
+ (void) self;
it->curr_adj_element = NULL;
it->ifg = NULL;
}
static void *ptr_irn_data_init(ir_phase *ph, ir_node *irn, void *data)
{
ptr_head_t *head = phase_alloc(ph, sizeof(*head));
+ (void) irn;
+ (void) data;
INIT_LIST_HEAD(&head->list);
return head;
}
static void write_pointers(bitset_t *live, ifg_pointer_t *ifg)
{
ir_node *live_irn;
- bitset_pos_t elm;
+ bitset_pos_t elm;
bitset_foreach_irn(ifg->env->irg, live, elm, live_irn)
{
static ir_node *ifg_std_nodes_next(const void *self, void *iter)
{
+ (void) self;
return get_next_node(iter);
}
static void ifg_std_nodes_break(const void *self, void *iter)
{
+ (void) self;
node_break(iter, 1);
}
static INLINE void neighbours_break(adj_iter_t *it, int force)
{
+ (void) force;
assert(it->valid == 1);
ir_nodeset_destroy(&it->neighbours);
it->valid = 0;
static ir_node *ifg_std_neighbours_next(const void *self, void *iter)
{
+ (void) self;
return get_next_neighbour(iter);
}
static void ifg_std_neighbours_break(const void *self, void *iter)
{
+ (void) self;
neighbours_break(iter, 1);
}
static int ifg_std_cliques_next(const void *self, void *iter)
{
+ (void) self;
return get_next_clique(iter);
}
static void ifg_std_cliques_break(const void *self, void *iter)
{
+ (void) self;
free_clique_iter(iter);
}
LC_OPT_ENT_BOOL("regpress", "Use register pressure constraints", &ilp_opts.regpress),
LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
LC_OPT_ENT_STR("lpp_log", "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
- { NULL }
+ LC_OPT_ENT_NULL
};
/*
unsigned n_consumer = 0;
ir_edge_kind_t ekind[2] = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
ir_node **consumer;
- int idx;
+ unsigned idx;
if (! consider_for_sched(env->arch_env->isa, irn))
return;
static void *lv_phase_data_init(ir_phase *phase, ir_node *irn, void *old)
{
struct _be_lv_info_t *info = phase_alloc(phase, LV_STD_SIZE * sizeof(info[0]));
+ (void) irn;
+
memset(info, 0, LV_STD_SIZE * sizeof(info[0]));
info[0].u.head.n_size = LV_STD_SIZE - 1;
return info;
LC_OPT_ENT_STR ("ilp.server", "the ilp server name", be_options.ilp_server, sizeof(be_options.ilp_server)),
LC_OPT_ENT_STR ("ilp.solver", "the ilp solver name", be_options.ilp_solver, sizeof(be_options.ilp_solver)),
#endif /* WITH_ILP */
- { NULL }
+ LC_OPT_ENT_NULL
};
static be_module_list_entry_t *isa_ifs = NULL;
}
/** The be parameters returned by default, all off. */
-const static backend_params be_params = {
+static const backend_params be_params = {
0, /* need dword lowering */
0, /* don't support inlien assembler yet */
0, /* no different calling conventions */
be_opcode_t be_get_irn_opcode(const ir_node *irn)
{
- return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
+ return is_be_node(irn) ? (be_opcode_t) get_irn_opcode(irn) - beo_base : beo_NoBeOp;
}
/**
}
static void
-be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
+be_node_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
{
be_reg_data_t *r = retrieve_reg_data(irn);
+ (void) self;
r->reg = reg;
}
{
int out_pos = pos;
+ (void) self;
if (pos < 0) {
if (get_irn_mode(irn) == mode_T)
return arch_no_register_req;
}
const arch_register_t *
-be_node_get_irn_reg(const void *_self, const ir_node *irn)
+be_node_get_irn_reg(const void *self, const ir_node *irn)
{
be_reg_data_t *r;
+ (void) self;
if (get_irn_mode(irn) == mode_T)
return NULL;
r = retrieve_reg_data(irn);
return r->reg;
}
-static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
+static arch_irn_class_t be_node_classify(const void *self, const ir_node *irn)
{
redir_proj((const ir_node **) &irn);
+ (void) self;
switch(be_get_irn_opcode(irn)) {
#define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
XXX(Spill, spill);
return 0;
}
-static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *node)
+static arch_irn_flags_t be_node_get_flags(const void *self, const ir_node *node)
{
be_req_t *bereq;
int pos = -1;
+ (void) self;
if(is_Proj(node)) {
pos = OUT_POS(get_Proj_proj(node));
static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
{
+ (void) self;
return be_get_frame_entity(irn);
}
static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
{
be_frame_attr_t *a;
+ (void) self;
assert(be_has_frame_entity(irn));
static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
{
+ (void) self;
if(be_has_frame_entity(irn)) {
be_frame_attr_t *a = get_irn_attr(irn);
a->offset = offset;
static int be_node_get_sp_bias(const void *self, const ir_node *irn)
{
+ (void) self;
return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0;
}
const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
{
redir_proj((const ir_node **) &irn);
+ (void) self;
return is_be_node(irn) ? &be_node_irn_ops : NULL;
}
{
phi_handler_t *phi_handler = get_phi_handler_from_ops(self);
phi_attr_t *attr;
+ (void) self;
+ (void) pos;
if(!mode_is_datab(get_irn_mode(irn)))
return arch_no_register_req;
static
arch_irn_class_t phi_classify(const void *self, const ir_node *irn)
{
+ (void) self;
+ (void) irn;
return arch_irn_class_normal;
}
{
phi_handler_t *h = get_phi_handler_from_ops(self);
phi_attr_t *attr = get_Phi_attr(h, irn);
+ (void) self;
return attr->flags;
}
static
-ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn)
+ir_entity *phi_get_frame_entity(const void *self, const ir_node *irn)
{
+ (void) self;
+ (void) irn;
return NULL;
}
static
-void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
+void phi_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
{
+ (void) self;
+ (void) irn;
+ (void) ent;
assert(0);
}
static
-void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
+void phi_set_frame_offset(const void *self, ir_node *irn, int bias)
{
+ (void) self;
+ (void) irn;
+ (void) bias;
assert(0);
}
static
int phi_get_sp_bias(const void* self, const ir_node *irn)
{
+ (void) self;
+ (void) irn;
return 0;
}
static int
cmp_execcount(const void * a, const void * b, size_t size)
{
+ (void) size;
return ((execcount_t*)a)->block != ((execcount_t*)b)->block;
}
block_counter(ir_node * bb, void * data)
{
unsigned int *count = data;
+ (void) bb;
*count = *count + 1;
}
static void
profile_node_info(void *ctx, FILE *f, const ir_node *irn)
{
+ (void) ctx;
if(is_Block(irn)) {
fprintf(f, "profiled execution count: %u\n", be_profile_get_block_execcount(irn));
}
static void *mris_irn_data_init(ir_phase *ph, ir_node *irn, void *data)
{
mris_irn_t *mi = data ? data : phase_alloc(ph, sizeof(mi[0]));
+ (void) irn;
memset(mi, 0, sizeof(mi[0]));
INIT_LIST_HEAD(&mi->lineage_list);
return mi;
const arch_env_t *arch_env = block_env;
ir_node *irn = NULL;
int only_branches_left = 1;
+ (void) live_set;
/* assure that branches and constants are executed last */
ir_nodeset_iterator_init(&iter, ready_set);
reg_pressure_selector_env_t *env = block_env;
ir_node *irn, *res = NULL;
int curr_cost = INT_MAX;
+ (void) live_set;
assert(ir_nodeset_size(ready_set) > 0);
static const lc_opt_table_entry_t rss_option_table[] = {
LC_OPT_ENT_ENUM_MASK("dump", "dump phases", &dump_var),
- { NULL }
+ LC_OPT_ENT_NULL
};
/******************************************************************************
plist_t *list;
ir_node **arr;
plist_element_t *el;
+ (void) rss;
assert(is_Sink(v->irn) || ((plist_count(v->descendant_list) > 0 && v->descendants) || 1));
assert(is_Sink(u->irn) || ((plist_count(u->consumer_list) > 0 && u->consumer) || 1));
ir_nodeset_iterator_t iter;
sched_timestep_t max_delay = 0;
ir_node *irn;
+ (void) live_set;
/* calculate the max delay of all candidates */
foreach_ir_nodeset(ready_set, irn, iter) {
{
const spill_info_t *xx = x;
const spill_info_t *yy = y;
+ (void) size;
+
return xx->to_spill != yy->to_spill;
}
{
ir_node *block = get_nodes_block(after);
double freq = get_block_execfreq(env->exec_freq, block);
+ (void) to_spill;
return env->spill_cost * freq;
}
loc_t loc;
loc.time = USES_INFINITY;
loc.irn = node;
+ (void) block;
if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, node)) {
loc.time = USES_INFINITY;
//---------------------------------------------------------------------------
-static int loop_edge_cmp(const void* p1, const void* p2, size_t s) {
+static int loop_edge_cmp(const void* p1, const void* p2, size_t size) {
loop_edge_t *e1 = (loop_edge_t*) p1;
loop_edge_t *e2 = (loop_edge_t*) p2;
+ (void) size;
return e1->block != e2->block || e1->pos != e2->pos;
}
-static int loop_attr_cmp(const void *e1, const void *e2, size_t s) {
+static int loop_attr_cmp(const void *e1, const void *e2, size_t size) {
loop_attr_t *la1 = (loop_attr_t*) e1;
loop_attr_t *la2 = (loop_attr_t*) e2;
+ (void) size;
return la1->loop != la2->loop;
}
-static int block_attr_cmp(const void *e1, const void *e2, size_t s) {
+static int block_attr_cmp(const void *e1, const void *e2, size_t size) {
block_attr_t *b1 = (block_attr_t*) e1;
block_attr_t *b2 = (block_attr_t*) e2;
+ (void) size;
return b1->block != b2->block;
}
const bitset_t *cand_bitset = loop_attr->livethrough_unused;
int candidatecount = bitset_popcnt(cand_bitset);
spillcandidate_t *candidates;
+ bitset_pos_t idx;
int i, c;
loop_edge_t *edge;
DBG((dbg, DBG_CHOOSE, "Candidates for loop %d\n", get_loop_loop_nr(loop_attr->loop)));
// build candidiatelist
c = 0;
- bitset_foreach(cand_bitset, i) {
- ir_node *node = get_idx_irn(env->irg, i);
+ bitset_foreach(cand_bitset, idx) {
+ ir_node *node = get_idx_irn(env->irg, idx);
candidates[c].node = node;
candidates[c].cost = 0;
static const lc_opt_table_entry_t be_spill_options[] = {
LC_OPT_ENT_BOOL ("coalesce_slots", "coalesce the spill slots", &be_coalesce_spill_slots),
LC_OPT_ENT_BOOL ("remat", "try to rematerialize values instead of reloading", &be_do_remats),
- { NULL }
+ LC_OPT_ENT_NULL
};
static be_module_list_entry_t *spillers = NULL;
{
const spill_t* s1 = d1;
const spill_t* s2 = d2;
+ (void) size;
+
return s1->spill != s2->spill;
}
{
const memperm_t* e1 = d1;
const memperm_t* e2 = d2;
+ (void) size;
+
return e1->block != e2->block;
}
#define is_curr_reg_class(irn) (get_reg_cls(p) == chordal_env->cls)
static void clear_link(ir_node *irn, void *data) {
+ (void) data;
set_irn_link(irn, NULL);
}
static int cmp_perm_proj(const void *a, const void *b, size_t n) {
const perm_proj_t *p = a;
const perm_proj_t *q = b;
+ (void) n;
+
return !(p->arg == q->arg);
}
static int cmp_stat_data(const void *a, const void *b, size_t len) {
const be_stat_irg_t *p1 = a;
const be_stat_irg_t *p2 = b;
+ (void) len;
return p1->irg != p2->irg;
}
{
const be_use_t *p = a;
const be_use_t *q = b;
+ (void) n;
+
return !(p->block == q->block && p->node == q->node);
}
return result;
}
-static int be_is_phi_argument(const be_lv_t *lv, const ir_node *block, const ir_node *def)
+static int be_is_phi_argument(const ir_node *block, const ir_node *def)
{
ir_node *node;
ir_node *succ_block = NULL;
}
#endif
- if(be_is_phi_argument(env->lv, block, def)) {
+ if(be_is_phi_argument(block, def)) {
// TODO we really should continue searching the uses of the phi,
// as a phi isn't a real use that implies a reload (because we could
// easily spill the whole phi)
{
ir_node *node;
unsigned step = 0;
+ (void) data;
sched_foreach(block, node) {
set_irn_link(node, INT_TO_PTR(step));
static void collect_phis(ir_node *irn, void *data)
{
- if(is_Phi(irn)) {
- ir_node *bl = get_nodes_block(irn);
- set_irn_link(irn, get_irn_link(bl));
- set_irn_link(bl, irn);
- }
+ (void) data;
+ if(is_Phi(irn)) {
+ ir_node *bl = get_nodes_block(irn);
+ set_irn_link(irn, get_irn_link(bl));
+ set_irn_link(bl, irn);
+ }
}
void be_clear_links(ir_graph *irg)
static int cmp_spill(const void* d1, const void* d2, size_t size) {
const spill_t* s1 = d1;
const spill_t* s2 = d2;
+ (void) size;
+
return s1->spill != s2->spill;
}