X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbecopyopt.c;h=f90ec70953ab5c151fec6e998e15eeb478e18192;hb=595a5d410e24d1ecb517de36a29282bb2ac64eff;hp=868dc9287ec565ec0bd9e44bf2a100bd4ccb8ecd;hpb=2922c6d1781518f7b564f28fad474d0e1c7aa7f2;p=libfirm diff --git a/ir/be/becopyopt.c b/ir/be/becopyopt.c index 868dc9287..f90ec7095 100644 --- a/ir/be/becopyopt.c +++ b/ir/be/becopyopt.c @@ -75,7 +75,7 @@ static unsigned dump_flags = 0; static unsigned style_flags = 0; -static unsigned do_stats = 0; +static int do_stats = 0; static cost_fct_t cost_func = co_get_costs_exec_freq; static int improve = 1; @@ -136,7 +136,7 @@ void be_register_copyopt(const char *name, co_algo_info *copyopt) be_add_module_to_list(©opts, name, copyopt); } -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt); +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt) void be_init_copyopt(void) { lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); @@ -155,7 +155,7 @@ static int void_algo(copy_opt_t *co) return 0; } -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone); +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone) void be_init_copynone(void) { static co_algo_info copyheur = { @@ -194,7 +194,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) copy_opt_t *new_copy_opt(be_chordal_env_t *chordal_env, cost_fct_t get_costs) { const char *s1, *s2, *s3; - int len; + size_t len; copy_opt_t *co; FIRM_DBG_REGISTER(dbg, "ir.be.copyopt"); @@ -228,19 +228,14 @@ void free_copy_opt(copy_opt_t *co) static int co_is_optimizable_root(ir_node *irn) { const arch_register_req_t *req; - const arch_register_t *reg; if (arch_irn_is_ignore(irn)) return 0; - reg = arch_get_irn_register(irn); - if (arch_register_type_is(reg, ignore)) - return 0; - if (is_Reg_Phi(irn) || is_Perm_Proj(irn)) return 1; - req = arch_get_register_req_out(irn); + req = arch_get_irn_register_req(irn); if (is_2addr_code(req)) return 1; @@ -313,7 +308,7 @@ static int ou_max_ind_set_costs(unit_t *ou) ir_node **safe, **unsafe; int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs; bitset_t *curr; - unsigned pos; + size_t pos; int curr_weight, best_weight = 0; /* assign the nodes into two groups. @@ -349,7 +344,7 @@ static int ou_max_ind_set_costs(unit_t *ou) /* now compute the best set out of the unsafe nodes*/ if (unsafe_count > MIS_HEUR_TRIGGER) { bitset_t *best = bitset_alloca(unsafe_count); - /* Heuristik: Greedy trial and error form index 0 to unsafe_count-1 */ + /* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */ for (i=0; icls != co->cls) return; if (!co_is_optimizable_root(irn)) @@ -427,7 +422,7 @@ static void co_collect_units(ir_node *irn, void *env) int o, arg_pos; ir_node *arg = get_irn_n(irn, i); - assert(arch_get_irn_reg_class_out(arg) == co->cls && "Argument not in same register class."); + assert(arch_get_irn_reg_class(arg) == co->cls && "Argument not in same register class."); if (arg == irn) continue; if (nodes_interfere(co->cenv, irn, arg)) { @@ -549,7 +544,7 @@ static int compare_ous(const void *k1, const void *k2) /* Units with constraints come first */ u1_has_constr = 0; for (i=0; inode_count; ++i) { - arch_get_register_req_out(&req, u1->nodes[i]); + arch_get_irn_register_req(&req, u1->nodes[i]); if (arch_register_req_is(&req, limited)) { u1_has_constr = 1; break; @@ -558,7 +553,7 @@ static int compare_ous(const void *k1, const void *k2) u2_has_constr = 0; for (i=0; inode_count; ++i) { - arch_get_register_req_out(&req, u2->nodes[i]); + arch_get_irn_register_req(&req, u2->nodes[i]); if (arch_register_req_is(&req, limited)) { u2_has_constr = 1; break; @@ -753,8 +748,8 @@ void co_complete_stats(const copy_opt_t *co, co_complete_stats_t *stat) static int compare_affinity_node_t(const void *k1, const void *k2, size_t size) { - const affinity_node_t *n1 = k1; - const affinity_node_t *n2 = k2; + const affinity_node_t *n1 = (const affinity_node_t*)k1; + const affinity_node_t *n2 = (const affinity_node_t*)k2; (void) size; return (n1->irn != n2->irn); @@ -769,7 +764,7 @@ static void add_edge(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs) new_node.irn = n1; new_node.degree = 0; new_node.neighbours = NULL; - node = set_insert(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn)); + node = (affinity_node_t*)set_insert(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn)); for (nbr = node->neighbours; nbr; nbr = nbr->next) if (nbr->irn == n2) { @@ -803,20 +798,15 @@ static inline void add_edges(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs static void build_graph_walker(ir_node *irn, void *env) { const arch_register_req_t *req; - copy_opt_t *co = env; + copy_opt_t *co = (copy_opt_t*)env; int pos, max; - const arch_register_t *reg; if (get_irn_mode(irn) == mode_T) return; - req = arch_get_register_req_out(irn); + req = arch_get_irn_register_req(irn); if (req->cls != co->cls || arch_irn_is_ignore(irn)) return; - reg = arch_get_irn_register(irn); - if (arch_register_type_is(reg, ignore)) - return; - if (is_Reg_Phi(irn)) { /* Phis */ for (pos=0, max=get_irn_arity(irn); posnodes = NULL; } -/* co_solve_ilp1() co_solve_ilp2() are implemented in becopyilpX.c */ - int co_gs_is_optimizable(copy_opt_t *co, ir_node *irn) { affinity_node_t new_node, *n; @@ -867,7 +855,7 @@ int co_gs_is_optimizable(copy_opt_t *co, ir_node *irn) ASSERT_GS_AVAIL(co); new_node.irn = irn; - n = set_find(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn)); + n = (affinity_node_t*)set_find(co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn)); if (n) { return (n->degree > 0); } else @@ -884,7 +872,7 @@ static int co_dump_appel_disjoint_constraints(const copy_opt_t *co, ir_node *a, constr[1] = bitset_alloca(co->cls->n_regs); for (j = 0; j < 2; ++j) { - const arch_register_req_t *req = arch_get_register_req_out(nodes[j]); + const arch_register_req_t *req = arch_get_irn_register_req(nodes[j]); if (arch_register_req_is(req, limited)) rbitset_copy_to_bitset(req->limited, constr[j]); else @@ -900,6 +888,8 @@ void co_dump_appel_graph(const copy_opt_t *co, FILE *f) be_ifg_t *ifg = co->cenv->ifg; int *color_map = ALLOCAN(int, co->cls->n_regs); int *node_map = XMALLOCN(int, get_irg_last_idx(co->irg) + 1); + ir_graph *irg = co->irg; + be_irg_t *birg = be_birg_from_irg(irg); ir_node *irn; nodes_iter_t it; @@ -910,7 +900,11 @@ void co_dump_appel_graph(const copy_opt_t *co, FILE *f) n_regs = 0; for (i = 0; i < co->cls->n_regs; ++i) { const arch_register_t *reg = &co->cls->regs[i]; - color_map[i] = arch_register_type_is(reg, ignore) ? -1 : n_regs++; + if (rbitset_is_set(birg->allocatable_regs, reg->global_index)) { + color_map[i] = n_regs++; + } else { + color_map[i] = -1; + } } /* @@ -931,7 +925,7 @@ void co_dump_appel_graph(const copy_opt_t *co, FILE *f) if (!arch_irn_is_ignore(irn)) { int idx = node_map[get_irn_idx(irn)]; affinity_node_t *a = get_affinity_info(co, irn); - const arch_register_req_t *req = arch_get_register_req_out(irn); + const arch_register_req_t *req = arch_get_irn_register_req(irn); ir_node *adj; if (arch_register_req_is(req, limited)) { @@ -1015,7 +1009,7 @@ static const char *get_dot_color_name(size_t col) return col < sizeof(names)/sizeof(names[0]) ? names[col] : "white"; } -typedef struct _co_ifg_dump_t { +typedef struct co_ifg_dump_t { const copy_opt_t *co; unsigned flags; } co_ifg_dump_t; @@ -1034,9 +1028,9 @@ static int ifg_is_dump_node(void *self, ir_node *irn) static void ifg_dump_node_attr(FILE *f, void *self, ir_node *irn) { - co_ifg_dump_t *env = self; + co_ifg_dump_t *env = (co_ifg_dump_t*)self; const arch_register_t *reg = arch_get_irn_register(irn); - const arch_register_req_t *req = arch_get_register_req_out(irn); + const arch_register_req_t *req = arch_get_irn_register_req(irn); int limited = arch_register_req_is(req, limited); if (env->flags & CO_IFG_DUMP_LABELS) { @@ -1061,7 +1055,7 @@ static void ifg_dump_node_attr(FILE *f, void *self, ir_node *irn) static void ifg_dump_at_end(FILE *file, void *self) { - co_ifg_dump_t *env = self; + co_ifg_dump_t *env = (co_ifg_dump_t*)self; affinity_node_t *a; co_gs_foreach_aff_node(env->co, a) {