DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static struct obstack obst;
-static be_irg_t *birg;
static ir_graph *irg;
static const arch_register_class_t *cls;
static const arch_register_req_t *default_cls_req;
* the information is per firm-node.
*/
struct allocation_info_t {
- unsigned last_uses; /**< bitset indicating last uses (input pos) */
+ unsigned last_uses[2]; /**< bitset indicating last uses (input pos) */
ir_node *current_value; /**< copy of the value that should be used */
ir_node *original_value; /**< for copies point to original value */
float prefs[0]; /**< register preferences */
static void check_defs(const ir_nodeset_t *live_nodes, float weight,
ir_node *node)
{
- const arch_register_req_t *req;
-
- if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
- foreach_out_edge(node, edge) {
- ir_node *proj = get_edge_src_irn(edge);
- check_defs(live_nodes, weight, proj);
- }
- return;
- }
-
- if (!arch_irn_consider_in_reg_alloc(cls, node))
- return;
-
- req = arch_get_register_req_out(node);
+ const arch_register_req_t *req = arch_get_register_req_out(node);
if (req->type & arch_register_req_type_limited) {
const unsigned *limited = req->limited;
float penalty = weight * DEF_FACTOR;
if (is_Phi(node))
break;
- if (create_preferences)
- check_defs(&live_nodes, weight, node);
+ if (create_preferences) {
+ ir_node *value;
+ be_foreach_definition(node, cls, value,
+ check_defs(&live_nodes, weight, value);
+ );
+ }
/* mark last uses */
arity = get_irn_arity(node);
/* the allocation info node currently only uses 1 unsigned value
to mark last used inputs. So we will fail for a node with more than
32 inputs. */
- if (arity >= (int) sizeof(unsigned) * 8) {
+ if (arity >= (int) sizeof(info->last_uses) * 8) {
panic("Node with more than %d inputs not supported yet",
- (int) sizeof(unsigned) * 8);
+ (int) sizeof(info->last_uses) * 8);
}
info = get_allocation_info(node);
/* last usage of a value? */
if (!ir_nodeset_contains(&live_nodes, op)) {
- rbitset_set(&info->last_uses, i);
+ rbitset_set(info->last_uses, i);
}
}
ir_nodeset_destroy(&live_nodes);
}
-static void congruence_def(ir_nodeset_t *live_nodes, ir_node *node)
+static void congruence_def(ir_nodeset_t *live_nodes, const ir_node *node)
{
- const arch_register_req_t *req;
-
- if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
- foreach_out_edge(node, edge) {
- ir_node *def = get_edge_src_irn(edge);
- congruence_def(live_nodes, def);
- }
- return;
- }
-
- if (!arch_irn_consider_in_reg_alloc(cls, node))
- return;
+ const arch_register_req_t *req = arch_get_register_req_out(node);
/* should be same constraint? */
- req = arch_get_register_req_out(node);
if (req->type & arch_register_req_type_should_be_same) {
- ir_node *insn = skip_Proj(node);
+ const ir_node *insn = skip_Proj_const(node);
int arity = get_irn_arity(insn);
int i;
unsigned node_idx = get_irn_idx(node);
/* check should be same constraints */
sched_foreach_reverse(block, node) {
+ ir_node *value;
if (is_Phi(node))
break;
- congruence_def(&live_nodes, node);
+ be_foreach_definition(node, cls, value,
+ congruence_def(&live_nodes, value);
+ );
be_liveness_transfer(cls, node, &live_nodes);
}
-
-
/**
* Assign register reg to the given node.
*
break;
}
if (i >= n_regs) {
+ /* the common reason to hit this panic is when 1 of your nodes is not
+ * register pressure faithful */
panic("No register left for %+F\n", node);
}
static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
{
allocation_info_t *info = get_allocation_info(node);
- const unsigned *last_uses = &info->last_uses;
+ const unsigned *last_uses = info->last_uses;
int arity = get_irn_arity(node);
int i;
ir_node *op;
const arch_register_t *reg;
- if (!rbitset_is_set(&info->last_uses, i))
+ if (!rbitset_is_set(info->last_uses, i))
continue;
op = get_irn_n(node, i);
hungarian_problem_t *bp;
unsigned l, r;
unsigned *assignment;
+ ir_node *value;
/* construct a list of register occupied by live-through values */
unsigned *live_through_regs = NULL;
}
/* is any of the live-throughs using a constrained output register? */
- if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
-
- foreach_out_edge(node, edge) {
- ir_node *proj = get_edge_src_irn(edge);
- const arch_register_req_t *req;
-
- if (!arch_irn_consider_in_reg_alloc(cls, proj))
- continue;
-
- req = arch_get_register_req_out(proj);
- if (!(req->type & arch_register_req_type_limited))
- continue;
-
- if (live_through_regs == NULL) {
- rbitset_alloca(live_through_regs, n_regs);
- determine_live_through_regs(live_through_regs, node);
- }
-
- rbitset_or(forbidden_regs, req->limited, n_regs);
- if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
- good = false;
- }
- }
- } else {
- if (arch_irn_consider_in_reg_alloc(cls, node)) {
- const arch_register_req_t *req = arch_get_register_req_out(node);
- if (req->type & arch_register_req_type_limited) {
- rbitset_alloca(live_through_regs, n_regs);
- determine_live_through_regs(live_through_regs, node);
- if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
- good = false;
- rbitset_or(forbidden_regs, req->limited, n_regs);
- }
- }
+ be_foreach_definition(node, cls, value,
+ if (! (req_->type & arch_register_req_type_limited))
+ continue;
+ if (live_through_regs == NULL) {
+ rbitset_alloca(live_through_regs, n_regs);
+ determine_live_through_regs(live_through_regs, node);
}
- }
+ rbitset_or(forbidden_regs, req_->limited, n_regs);
+ if (rbitsets_have_common(req_->limited, live_through_regs, n_regs))
+ good = false;
+ );
if (good)
return;
for (r = 0; r < n_regs; ++r) {
if (rbitset_is_set(limited, r))
continue;
- hungarian_remv(bp, r, current_reg);
+ hungarian_remove(bp, r, current_reg);
}
}
hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
assignment = ALLOCAN(unsigned, n_regs);
- res = hungarian_solve(bp, (int*) assignment, NULL, 0);
+ res = hungarian_solve(bp, assignment, NULL, 0);
assert(res == 0);
#if 0
int n_phis = 0;
int n;
int res;
- int *assignment;
+ unsigned *assignment;
ir_node *node;
hungarian_problem_t *bp;
//hungarian_print_cost_matrix(bp, 7);
hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
- assignment = ALLOCAN(int, n_regs);
+ assignment = ALLOCAN(unsigned, n_regs);
res = hungarian_solve(bp, assignment, NULL, 0);
assert(res == 0);
if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
- r = assignment[n++];
+ r = assignment[n++];
assert(rbitset_is_set(normal_regs, r));
reg = arch_register_for_index(cls, r);
DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
sched_foreach(block, node) {
int i;
int arity;
+ ir_node *value;
/* phis are already assigned */
if (is_Phi(node))
/* assign output registers */
/* TODO: 2 phases: first: pre-assigned ones, 2nd real regs */
- if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
- foreach_out_edge(node, edge) {
- ir_node *proj = get_edge_src_irn(edge);
- if (!arch_irn_consider_in_reg_alloc(cls, proj))
- continue;
- assign_reg(block, proj, forbidden_regs);
- }
- } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
- assign_reg(block, node, forbidden_regs);
- }
+ be_foreach_definition(node, cls, value,
+ assign_reg(block, value, forbidden_regs);
+ );
}
ir_nodeset_destroy(&live_nodes);
static void dump(int mask, ir_graph *irg, const char *suffix)
{
- if (birg->main_env->options->dump_flags & mask)
+ if (be_get_irg_options(irg)->dump_flags & mask)
dump_ir_graph(irg, suffix);
}
{
/* make sure all nodes show their real register pressure */
be_timer_push(T_RA_CONSTR);
- be_pre_spill_prepare_constr(birg, cls);
+ be_pre_spill_prepare_constr(irg, cls);
be_timer_pop(T_RA_CONSTR);
dump(DUMP_RA, irg, "-spillprepare");
/* spill */
be_timer_push(T_RA_SPILL);
- be_do_spill(birg, cls);
+ be_do_spill(irg, cls);
be_timer_pop(T_RA_SPILL);
be_timer_push(T_RA_SPILL_APPLY);
/**
* The pref register allocator for a whole procedure.
*/
-static void be_pref_alloc(be_irg_t *new_birg)
+static void be_pref_alloc(ir_graph *new_irg)
{
- const arch_env_t *arch_env = new_birg->main_env->arch_env;
+ const arch_env_t *arch_env = be_get_irg_arch_env(new_irg);
int n_cls = arch_env_get_n_reg_class(arch_env);
int c;
obstack_init(&obst);
- birg = new_birg;
- irg = be_get_birg_irg(birg);
- execfreqs = birg->exec_freq;
+ irg = new_irg;
+ execfreqs = be_get_irg_exec_freq(irg);
/* determine a good coloring order */
determine_block_order();
n_regs = arch_register_class_n_regs(cls);
normal_regs = rbitset_malloc(n_regs);
- be_abi_set_non_ignore_regs(birg->abi, cls, normal_regs);
+ be_abi_set_non_ignore_regs(be_get_irg_abi(irg), cls, normal_regs);
spill();
/* verify schedule and register pressure */
be_timer_push(T_VERIFY);
- if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
- be_verify_schedule(birg);
- be_verify_register_pressure(birg, cls, irg);
- } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
- assert(be_verify_schedule(birg) && "Schedule verification failed");
- assert(be_verify_register_pressure(birg, cls, irg)
+ if (be_get_irg_options(irg)->verify_option == BE_VERIFY_WARN) {
+ be_verify_schedule(irg);
+ be_verify_register_pressure(irg, cls);
+ } else if (be_get_irg_options(irg)->verify_option == BE_VERIFY_ASSERT) {
+ assert(be_verify_schedule(irg) && "Schedule verification failed");
+ assert(be_verify_register_pressure(irg, cls)
&& "Register pressure verification failed");
}
be_timer_pop(T_VERIFY);
}
be_timer_push(T_RA_SPILL_APPLY);
- be_abi_fix_stack_nodes(birg->abi);
+ be_abi_fix_stack_nodes(irg);
be_timer_pop(T_RA_SPILL_APPLY);
be_timer_push(T_VERIFY);
- if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
- be_verify_register_allocation(birg);
- } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
- assert(be_verify_register_allocation(birg)
- && "Register allocation invalid");
+ if (be_get_irg_options(irg)->verify_option == BE_VERIFY_WARN) {
+ be_verify_register_allocation(irg);
+ } else if (be_get_irg_options(irg)->verify_option == BE_VERIFY_ASSERT) {
+ assert(be_verify_register_allocation(irg)
+ && "Register allocation invalid");
}
be_timer_pop(T_VERIFY);