#include "unionfind.h"
#include "pdeq.h"
#include "hungarian.h"
-
+#include "statev.h"
#include "beabi.h"
#include "bechordal_t.h"
#include "be.h"
static int *congruence_classes;
static ir_node **block_order;
static size_t n_block_order;
-static int create_preferences = true;
-static int create_congruence_classes = true;
-static int propagate_phi_registers = true;
-
-static const lc_opt_table_entry_t options[] = {
- LC_OPT_ENT_BOOL("prefs", "use preference based coloring", &create_preferences),
- LC_OPT_ENT_BOOL("congruences", "create congruence classes", &create_congruence_classes),
- LC_OPT_ENT_BOOL("prop_phi", "propagate phi registers", &propagate_phi_registers),
- LC_OPT_LAST
-};
/** currently active assignments (while processing a basic block)
* maps registers to values(their current copies) */
ir_node *node)
{
const arch_register_req_t *req = arch_get_irn_register_req(node);
- if (req->type & arch_register_req_type_limited) {
+ if (arch_register_req_is(req, limited)) {
const unsigned *limited = req->limited;
float penalty = weight * DEF_FACTOR;
give_penalties_for_limits(live_nodes, penalty, limited, node);
}
- if (req->type & arch_register_req_type_should_be_same) {
+ if (arch_register_req_is(req, should_be_same)) {
ir_node *insn = skip_Proj(node);
allocation_info_t *info = get_allocation_info(node);
int arity = get_irn_arity(insn);
if (is_Phi(node))
break;
- if (create_preferences) {
- ir_node *value;
- be_foreach_definition(node, cls, value,
- check_defs(&live_nodes, weight, value);
- );
- }
+ be_foreach_definition(node, cls, value,
+ check_defs(&live_nodes, weight, value);
+ );
/* mark last uses */
int arity = get_irn_arity(node);
be_liveness_transfer(cls, node, &live_nodes);
- if (create_preferences) {
- /* update weights based on usage constraints */
- for (int i = 0; i < arity; ++i) {
- ir_node *op = get_irn_n(node, i);
- if (!arch_irn_consider_in_reg_alloc(cls, op))
- continue;
+ /* update weights based on usage constraints */
+ for (int i = 0; i < arity; ++i) {
+ ir_node *op = get_irn_n(node, i);
+ if (!arch_irn_consider_in_reg_alloc(cls, op))
+ continue;
- const arch_register_req_t *req
- = arch_get_irn_register_req_in(node, i);
- if (!(req->type & arch_register_req_type_limited))
- continue;
+ const arch_register_req_t *req
+ = arch_get_irn_register_req_in(node, i);
+ if (!arch_register_req_is(req, limited))
+ continue;
- const unsigned *limited = req->limited;
- give_penalties_for_limits(&live_nodes, weight * USE_FACTOR,
- limited, op);
- }
+ const unsigned *limited = req->limited;
+ give_penalties_for_limits(&live_nodes, weight * USE_FACTOR,
+ limited, op);
}
}
const arch_register_req_t *req = arch_get_irn_register_req(node);
/* should be same constraint? */
- if (req->type & arch_register_req_type_should_be_same) {
+ if (arch_register_req_is(req, should_be_same)) {
const ir_node *insn = skip_Proj_const(node);
int arity = get_irn_arity(insn);
unsigned node_idx = get_irn_idx(node);
/* check should be same constraints */
ir_node *last_phi = NULL;
sched_foreach_reverse(block, node) {
- ir_node *value;
if (is_Phi(node)) {
last_phi = node;
break;
*/
static void use_reg(ir_node *node, const arch_register_t *reg, unsigned width)
{
- unsigned r = arch_register_get_index(reg);
+ unsigned r = reg->index;
for (unsigned r0 = r; r0 < r + width; ++r0)
assignments[r0] = node;
arch_set_irn_register(node, reg);
const arch_register_t *reg = arch_get_irn_register(node);
const arch_register_req_t *req = arch_get_irn_register_req(node);
- unsigned r = arch_register_get_index(reg);
+ unsigned r = reg->index;
/* assignment->value may be NULL if a value is used at 2 inputs
* so it gets freed twice. */
for (unsigned r0 = r; r0 < r + req->width; ++r0) {
allocation_info_t *info = get_allocation_info(to_split);
float delta = 0;
- /* stupid hack: don't optimisticallt split don't spill nodes...
+ /* stupid hack: don't optimistically split don't spill nodes...
* (so we don't split away the values produced because of
* must_be_different constraints) */
ir_node *original_insn = skip_Proj(info->original_value);
return false;
const arch_register_t *from_reg = arch_get_irn_register(to_split);
- unsigned from_r = arch_register_get_index(from_reg);
+ unsigned from_r = from_reg->index;
ir_node *block = get_nodes_block(before);
float split_threshold = (float)get_block_execfreq(block) * SPLIT_DELTA;
unsigned width = 1;
mark_as_copy_of(copy, to_split);
/* hacky, but correct here */
- if (assignments[arch_register_get_index(from_reg)] == to_split)
+ if (assignments[from_reg->index] == to_split)
free_reg_of_value(to_split);
use_reg(copy, reg, width);
sched_add_before(before, copy);
}
/* ignore reqs must be preassigned */
- assert (! (req->type & arch_register_req_type_ignore));
+ assert(!arch_register_req_is(req, ignore));
/* give should_be_same boni */
allocation_info_t *info = get_allocation_info(node);
ir_node *in_node = skip_Proj(node);
- if (req->type & arch_register_req_type_should_be_same) {
+ if (arch_register_req_is(req, should_be_same)) {
float weight = (float)get_block_execfreq(block);
int arity = get_irn_arity(in_node);
ir_node *in = get_irn_n(in_node, i);
const arch_register_t *reg = arch_get_irn_register(in);
- unsigned reg_index = arch_register_get_index(reg);
+ unsigned reg_index = reg->index;
/* if the value didn't die here then we should not propagate the
* should_be_same info */
DB((dbg, LEVEL_2, "\n"));
const unsigned *allowed_regs = normal_regs;
- if (req->type & arch_register_req_type_limited) {
+ if (arch_register_req_is(req, limited)) {
allowed_regs = req->limited;
}
continue;
/* alignment constraint? */
if (width > 1) {
- if ((req->type & arch_register_req_type_aligned)
- && (final_reg_index % width) != 0)
+ if (arch_register_req_is(req, aligned) && (final_reg_index % width) != 0)
continue;
bool fine = true;
for (unsigned r0 = r+1; r0 < r+width; ++r0) {
}
/* old register has 1 user less, permutation is resolved */
- assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
+ assert(arch_get_irn_register(src)->index == old_r);
permutation[r] = r;
assert(n_used[old_r] > 0);
ir_node *op = get_irn_n(node, i);
const arch_register_t *reg = arch_get_irn_register(op);
- rbitset_clear(bitset, arch_register_get_index(reg));
+ rbitset_clear(bitset, reg->index);
}
}
continue;
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
- if (!(req->type & arch_register_req_type_limited))
+ if (!arch_register_req_is(req, limited))
continue;
const unsigned *limited = req->limited;
const arch_register_t *reg = arch_get_irn_register(op);
- unsigned current_reg = arch_register_get_index(reg);
+ unsigned current_reg = reg->index;
for (unsigned r = 0; r < n_regs; ++r) {
if (rbitset_is_set(limited, r))
continue;
if (req->width > 1)
double_width = true;
const arch_register_t *reg = arch_get_irn_register(op);
- unsigned reg_index = arch_register_get_index(reg);
- if (req->type & arch_register_req_type_aligned) {
+ unsigned reg_index = reg->index;
+ if (arch_register_req_is(req, aligned)) {
if (!is_aligned(reg_index, req->width)) {
good = false;
continue;
}
}
- if (!(req->type & arch_register_req_type_limited))
+ if (!arch_register_req_is(req, limited))
continue;
const unsigned *limited = req->limited;
}
/* is any of the live-throughs using a constrained output register? */
- ir_node *value;
unsigned *live_through_regs = NULL;
be_foreach_definition(node, cls, value,
+ (void)value;
if (req_->width > 1)
double_width = true;
- if (! (req_->type & arch_register_req_type_limited))
+ if (!arch_register_req_is(req_, limited))
continue;
if (live_through_regs == NULL) {
- rbitset_alloca(live_through_regs, n_regs);
+ live_through_regs = rbitset_alloca(n_regs);
determine_live_through_regs(live_through_regs, node);
}
rbitset_or(forbidden_regs, req_->limited, n_regs);
/* create these arrays if we haven't yet */
if (live_through_regs == NULL) {
- rbitset_alloca(live_through_regs, n_regs);
+ live_through_regs = rbitset_alloca(n_regs);
}
if (double_width) {
continue;
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
- if (!(req->type & arch_register_req_type_limited))
+ if (!arch_register_req_is(req, limited))
continue;
const unsigned *limited = req->limited;
const arch_register_t *reg = arch_get_irn_register(op);
- unsigned current_reg = arch_register_get_index(reg);
+ unsigned current_reg = reg->index;
for (unsigned r = 0; r < n_regs; ++r) {
if (rbitset_is_set(limited, r))
continue;
assert(a >= 0);
const arch_register_t *reg = arch_get_irn_register(phi);
- int regn = arch_register_get_index(reg);
+ int regn = reg->index;
/* same register? nothing to do */
if (regn == a)
continue;
ir_node *op = pred_info->assignments[a];
const arch_register_t *op_reg = arch_get_irn_register(op);
- /* virtual or joker registers are ok too */
- if ((op_reg->type & arch_register_type_joker)
- || (op_reg->type & arch_register_type_virtual))
+ /* Virtual registers are ok, too. */
+ if (op_reg->type & arch_register_type_virtual)
continue;
permutation[regn] = a;
/* we have permuted all values into the correct registers so we can
simply query which value occupies the phis register in the
predecessor */
- int a = arch_register_get_index(arch_get_irn_register(phi));
+ int a = arch_get_irn_register(phi)->index;
ir_node *op = pred_info->assignments[a];
set_Phi_pred(phi, p, op);
}
continue;
/* give bonus for already assigned register */
- float weight = (float)get_block_execfreq(pred_block);
- unsigned r = arch_register_get_index(reg);
- info->prefs[r] += weight * AFF_PHI;
+ float weight = (float)get_block_execfreq(pred_block);
+ info->prefs[reg->index] += weight * AFF_PHI;
}
}
use_reg(node, reg, req->width);
/* adapt preferences for phi inputs */
- if (propagate_phi_registers)
- propagate_phi_register(node, r);
+ propagate_phi_register(node, r);
}
}
if (req->cls != cls)
continue;
- if (req->type & arch_register_req_type_ignore) {
+ if (arch_register_req_is(req, limited)) {
allocation_info_t *info = get_allocation_info(node);
info->current_value = node;
ir_nodeset_insert(&live_nodes, node);
}
- unsigned *forbidden_regs; /**< collects registers which must
- not be used for optimistic splits */
- rbitset_alloca(forbidden_regs, n_regs);
+ /** Collects registers which must not be used for optimistic splits. */
+ unsigned *const forbidden_regs = rbitset_alloca(n_regs);
/* handle phis... */
assign_phi_registers(block);
continue;
const arch_register_t *reg = arch_get_irn_register(op);
- rbitset_set(forbidden_regs, arch_register_get_index(reg));
+ rbitset_set(forbidden_regs, reg->index);
}
/* free registers of values last used at this instruction */
free_last_uses(&live_nodes, node);
/* assign output registers */
- ir_node *value;
be_foreach_definition_(node, cls, value,
assign_reg(block, value, forbidden_regs);
);
be_clear_links(irg);
irg_block_walk_graph(irg, NULL, analyze_block, NULL);
- if (create_congruence_classes)
- combine_congruence_classes();
+ combine_congruence_classes();
for (size_t i = 0; i < n_block_order; ++i) {
ir_node *block = block_order[i];
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_pref_alloc)
void be_init_pref_alloc(void)
{
- static be_ra_t be_ra_pref = {
- be_pref_alloc,
- };
- lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
- lc_opt_entry_t *prefalloc_group = lc_opt_get_grp(be_grp, "prefalloc");
- lc_opt_add_table(prefalloc_group, options);
-
+ static be_ra_t be_ra_pref = { be_pref_alloc };
be_register_allocator("pref", &be_ra_pref);
FIRM_DBG_REGISTER(dbg, "firm.be.prefalloc");
}