be_add_module_to_list(©opts, name, copyopt);
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt)
void be_init_copyopt(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
return 0;
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone)
void be_init_copynone(void)
{
static co_algo_info copyheur = {
{
if (env->ifg)
return be_ifg_connected(env->ifg, a, b);
- else
- return be_values_interfere(env->birg->lv, a, b);
+ else {
+ be_lv_t *lv = be_get_irg_liveness(env->irg);
+ return be_values_interfere(lv, a, b);
+ }
}
copy_opt_t *new_copy_opt(be_chordal_env_t *chordal_env, cost_fct_t get_costs)
{
const char *s1, *s2, *s3;
- int len;
+ size_t len;
copy_opt_t *co;
FIRM_DBG_REGISTER(dbg, "ir.be.copyopt");
static int co_is_optimizable_root(ir_node *irn)
{
const arch_register_req_t *req;
- const arch_register_t *reg;
if (arch_irn_is_ignore(irn))
return 0;
- reg = arch_get_irn_register(irn);
- if (arch_register_type_is(reg, ignore))
- return 0;
-
if (is_Reg_Phi(irn) || is_Perm_Proj(irn))
return 1;
int res;
ir_node *root_bl = get_nodes_block(root);
ir_node *copy_bl = is_Phi(root) ? get_Block_cfgpred_block(root_bl, pos) : root_bl;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(co->cenv->irg);
(void) arg;
- res = get_block_execfreq_ulong(co->cenv->birg->exec_freq, copy_bl);
+ res = get_block_execfreq_ulong(exec_freq, copy_bl);
/* don't allow values smaller than one. */
return res < 1 ? 1 : res;
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
bitset_t *curr;
- unsigned pos;
+ size_t pos;
int curr_weight, best_weight = 0;
/* assign the nodes into two groups.
/* now compute the best set out of the unsafe nodes*/
if (unsafe_count > MIS_HEUR_TRIGGER) {
bitset_t *best = bitset_alloca(unsafe_count);
- /* Heuristik: Greedy trial and error form index 0 to unsafe_count-1 */
+ /* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */
for (i=0; i<unsafe_count; ++i) {
bitset_set(best, i);
/* check if it is a stable set */
goto no_stable_set;
/* if we arrive here, we have a stable set */
- /* compute the weigth of the stable set*/
+ /* compute the weight of the stable set*/
curr_weight = 0;
bitset_foreach(curr, pos)
curr_weight += unsafe_costs[pos];
static void co_collect_units(ir_node *irn, void *env)
{
const arch_register_req_t *req;
- copy_opt_t *co = env;
+ copy_opt_t *co = (copy_opt_t*)env;
unit_t *unit;
if (get_irn_mode(irn) == mode_T)
static int compare_affinity_node_t(const void *k1, const void *k2, size_t size)
{
- const affinity_node_t *n1 = k1;
- const affinity_node_t *n2 = k2;
+ const affinity_node_t *n1 = (const affinity_node_t*)k1;
+ const affinity_node_t *n2 = (const affinity_node_t*)k2;
(void) size;
return (n1->irn != n2->irn);
new_node.irn = n1;
new_node.degree = 0;
new_node.neighbours = NULL;
- node = set_insert(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
+ node = (affinity_node_t*)set_insert(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
for (nbr = node->neighbours; nbr; nbr = nbr->next)
if (nbr->irn == n2) {
static void build_graph_walker(ir_node *irn, void *env)
{
const arch_register_req_t *req;
- copy_opt_t *co = env;
+ copy_opt_t *co = (copy_opt_t*)env;
int pos, max;
- const arch_register_t *reg;
if (get_irn_mode(irn) == mode_T)
return;
if (req->cls != co->cls || arch_irn_is_ignore(irn))
return;
- reg = arch_get_irn_register(irn);
- if (arch_register_type_is(reg, ignore))
- return;
-
if (is_Reg_Phi(irn)) { /* Phis */
for (pos=0, max=get_irn_arity(irn); pos<max; ++pos) {
ir_node *arg = get_irn_n(irn, pos);
co->nodes = NULL;
}
-/* co_solve_ilp1() co_solve_ilp2() are implemented in becopyilpX.c */
-
int co_gs_is_optimizable(copy_opt_t *co, ir_node *irn)
{
affinity_node_t new_node, *n;
ASSERT_GS_AVAIL(co);
new_node.irn = irn;
- n = set_find(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
+ n = (affinity_node_t*)set_find(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
if (n) {
return (n->degree > 0);
} else
be_ifg_t *ifg = co->cenv->ifg;
int *color_map = ALLOCAN(int, co->cls->n_regs);
int *node_map = XMALLOCN(int, get_irg_last_idx(co->irg) + 1);
+ ir_graph *irg = co->irg;
+ be_irg_t *birg = be_birg_from_irg(irg);
ir_node *irn;
nodes_iter_t it;
n_regs = 0;
for (i = 0; i < co->cls->n_regs; ++i) {
const arch_register_t *reg = &co->cls->regs[i];
- color_map[i] = arch_register_type_is(reg, ignore) ? -1 : n_regs++;
+ if (rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
+ color_map[i] = n_regs++;
+ } else {
+ color_map[i] = -1;
+ }
}
/*
return col < sizeof(names)/sizeof(names[0]) ? names[col] : "white";
}
-typedef struct _co_ifg_dump_t {
+typedef struct co_ifg_dump_t {
const copy_opt_t *co;
unsigned flags;
} co_ifg_dump_t;
static void ifg_dump_node_attr(FILE *f, void *self, ir_node *irn)
{
- co_ifg_dump_t *env = self;
+ co_ifg_dump_t *env = (co_ifg_dump_t*)self;
const arch_register_t *reg = arch_get_irn_register(irn);
const arch_register_req_t *req = arch_get_register_req_out(irn);
int limited = arch_register_req_is(req, limited);
static void ifg_dump_at_end(FILE *file, void *self)
{
- co_ifg_dump_t *env = self;
+ co_ifg_dump_t *env = (co_ifg_dump_t*)self;
affinity_node_t *a;
co_gs_foreach_aff_node(env->co, a) {
char buf[1024];
size_t i, n;
char *tu_name;
+ const char *cup_name = be_get_irg_main_env(env->irg)->cup_name;
- n = strlen(env->birg->main_env->cup_name);
+ n = strlen(cup_name);
tu_name = XMALLOCN(char, n + 1);
- strcpy(tu_name, env->birg->main_env->cup_name);
+ strcpy(tu_name, cup_name);
for (i = 0; i < n; ++i)
if (tu_name[i] == '.')
tu_name[i] = '_';
if (selected_copyopt->copyopt == void_algo)
return;
- be_liveness_assure_chk(be_get_birg_liveness(cenv->birg));
+ be_liveness_assure_chk(be_get_irg_liveness(cenv->irg));
co = new_copy_opt(cenv, cost_func);
co_build_ou_structure(co);