static unsigned dump_flags = 0;
static unsigned style_flags = 0;
-static unsigned do_stats = 0;
+static int do_stats = 0;
static cost_fct_t cost_func = co_get_costs_exec_freq;
static int improve = 1;
be_add_module_to_list(©opts, name, copyopt);
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt)
void be_init_copyopt(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
return 0;
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone)
void be_init_copynone(void)
{
static co_algo_info copyheur = {
copy_opt_t *new_copy_opt(be_chordal_env_t *chordal_env, cost_fct_t get_costs)
{
const char *s1, *s2, *s3;
- int len;
+ size_t len;
copy_opt_t *co;
FIRM_DBG_REGISTER(dbg, "ir.be.copyopt");
if (is_Reg_Phi(irn) || is_Perm_Proj(irn))
return 1;
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
if (is_2addr_code(req))
return 1;
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
bitset_t *curr;
- unsigned pos;
+ size_t pos;
int curr_weight, best_weight = 0;
/* assign the nodes into two groups.
/* now compute the best set out of the unsafe nodes*/
if (unsafe_count > MIS_HEUR_TRIGGER) {
bitset_t *best = bitset_alloca(unsafe_count);
- /* Heuristik: Greedy trial and error form index 0 to unsafe_count-1 */
+ /* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */
for (i=0; i<unsafe_count; ++i) {
bitset_set(best, i);
/* check if it is a stable set */
goto no_stable_set;
/* if we arrive here, we have a stable set */
- /* compute the weigth of the stable set*/
+ /* compute the weight of the stable set*/
curr_weight = 0;
bitset_foreach(curr, pos)
curr_weight += unsafe_costs[pos];
if (get_irn_mode(irn) == mode_T)
return;
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
if (req->cls != co->cls)
return;
if (!co_is_optimizable_root(irn))
int o, arg_pos;
ir_node *arg = get_irn_n(irn, i);
- assert(arch_get_irn_reg_class_out(arg) == co->cls && "Argument not in same register class.");
+ assert(arch_get_irn_reg_class(arg) == co->cls && "Argument not in same register class.");
if (arg == irn)
continue;
if (nodes_interfere(co->cenv, irn, arg)) {
/* Units with constraints come first */
u1_has_constr = 0;
for (i=0; i<u1->node_count; ++i) {
- arch_get_register_req_out(&req, u1->nodes[i]);
+ arch_get_irn_register_req(&req, u1->nodes[i]);
if (arch_register_req_is(&req, limited)) {
u1_has_constr = 1;
break;
u2_has_constr = 0;
for (i=0; i<u2->node_count; ++i) {
- arch_get_register_req_out(&req, u2->nodes[i]);
+ arch_get_irn_register_req(&req, u2->nodes[i]);
if (arch_register_req_is(&req, limited)) {
u2_has_constr = 1;
break;
if (get_irn_mode(irn) == mode_T)
return;
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
if (req->cls != co->cls || arch_irn_is_ignore(irn))
return;
constr[1] = bitset_alloca(co->cls->n_regs);
for (j = 0; j < 2; ++j) {
- const arch_register_req_t *req = arch_get_register_req_out(nodes[j]);
+ const arch_register_req_t *req = arch_get_irn_register_req(nodes[j]);
if (arch_register_req_is(req, limited))
rbitset_copy_to_bitset(req->limited, constr[j]);
else
if (!arch_irn_is_ignore(irn)) {
int idx = node_map[get_irn_idx(irn)];
affinity_node_t *a = get_affinity_info(co, irn);
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
ir_node *adj;
if (arch_register_req_is(req, limited)) {
{
co_ifg_dump_t *env = (co_ifg_dump_t*)self;
const arch_register_t *reg = arch_get_irn_register(irn);
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
int limited = arch_register_req_is(req, limited);
if (env->flags & CO_IFG_DUMP_LABELS) {