void be_init_default_asm_constraint_flags(void);
-/**
- * Put the registers to be ignored in this IRG into a bitset.
- * @param irg The graph
- * @param cls The register class.
- * @param bs The bitset (may be NULL).
- * @return The number of registers to be ignored.
- */
-unsigned be_put_ignore_regs(const ir_graph *irg,
- const arch_register_class_t *cls, bitset_t *bs);
+void be_put_allocatable_regs(const ir_graph *irg,
+ const arch_register_class_t *cls, bitset_t *bs);
+
+void be_set_allocatable_regs(const ir_graph *irg,
+ const arch_register_class_t *cls,
+ unsigned *raw_bitset);
+unsigned be_get_n_allocatable_regs(const ir_graph *irg,
+ const arch_register_class_t *cls);
/**
* Initialize the backend. Must be run first in init_firm();
be_set_irg_abi(irg, NULL);
}
-void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
+void be_put_allocatable_regs(const ir_graph *irg,
+ const arch_register_class_t *cls, bitset_t *bs)
{
- arch_register_t *reg;
+ be_abi_irg_t *abi = be_get_irg_abi(irg);
+ const arch_register_t *reg;
+ unsigned i;
- for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
- if (reg->reg_class == cls)
+ assert(bitset_size(bs) == cls->n_regs);
+ bitset_clear_all(bs);
+
+ for (i = 0; i < cls->n_regs; ++i) {
+ reg = &cls->regs[i];
+ if (! (reg->type & arch_register_type_ignore))
bitset_set(bs, reg->index);
+ }
+
+ for (reg = pset_first(abi->ignore_regs); reg != NULL;
+ reg = pset_next(abi->ignore_regs)) {
+ if (reg->reg_class == cls)
+ bitset_clear(bs, reg->index);
+ }
+}
+
+unsigned be_get_n_allocatable_regs(const ir_graph *irg,
+ const arch_register_class_t *cls)
+{
+ bitset_t *bs = bitset_alloca(cls->n_regs);
+ be_put_allocatable_regs(irg, cls, bs);
+ return bitset_popcount(bs);
}
-void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
+void be_set_allocatable_regs(const ir_graph *irg,
+ const arch_register_class_t *cls,
+ unsigned *raw_bitset)
{
+ be_abi_irg_t *abi = be_get_irg_abi(irg);
unsigned i;
arch_register_t *reg;
void be_abi_free(ir_graph *irg);
-/**
- * Put the registers which are forbidden specifically for this IRG in a bitset.
- */
-void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs);
-
-void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset);
-
ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg);
ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg);
}
}
-void arch_put_non_ignore_regs(const arch_register_class_t *cls, bitset_t *bs)
-{
- unsigned i;
-
- for (i = 0; i < cls->n_regs; ++i) {
- if (!arch_register_type_is(&cls->regs[i], ignore))
- bitset_set(bs, i);
- }
-}
-
int arch_reg_is_allocatable(const ir_node *irn, int pos,
const arch_register_t *reg)
{
*/
const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos);
-/**
- * Put all registers which shall not be ignored by the register
- * allocator in a bit set.
- * @param cls The register class to consider.
- * @param bs The bit set to put the registers to.
- */
-extern void arch_put_non_ignore_regs(const arch_register_class_t *cls,
- bitset_t *bs);
-
/**
* Check, if a register is assignable to an operand of a node.
* @param irn The node.
{
bitset_t *tmp = alloc_env->tmp_colors;
bitset_copy(tmp, colors);
- bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
- return bitset_next_clear(tmp, 0);
+ bitset_flip_all(tmp);
+ bitset_and(tmp, alloc_env->chordal_env->allocatable_regs);
+ return bitset_next_set(tmp, 0);
}
static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
alloc_nodes[n_alloc] = proj;
pmap_insert(partners, proj, NULL);
- bitset_clear_all(bs);
- arch_put_non_ignore_regs(env->cls, bs);
- bitset_andnot(bs, env->ignore_colors);
- bitset_foreach(bs, col) {
+ bitset_foreach(env->allocatable_regs, col) {
//hungarian_add(bp, n_alloc, col, 1);
bipartite_add(bp, n_alloc, col);
}
{
be_insn_env_t ie;
- ie.ignore_colors = env->ignore_colors;
- ie.obst = env->obst;
- ie.cls = env->cls;
+ ie.allocatable_regs = env->allocatable_regs;
+ ie.obst = env->obst;
+ ie.cls = env->cls;
return be_scan_insn(&ie, irn);
}
ir_graph *irg = pse->irg;
ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
- pse->cls = cls;
- chordal_env->cls = cls;
- chordal_env->border_heads = pmap_create();
- chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
+ pse->cls = cls;
+ chordal_env->cls = cls;
+ chordal_env->border_heads = pmap_create();
+ chordal_env->allocatable_regs = bitset_malloc(chordal_env->cls->n_regs);
be_assure_liveness(irg);
be_liveness_assure_chk(be_get_irg_liveness(irg));
stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq));
/* put all ignore registers into the ignore register set. */
- be_put_ignore_regs(irg, pse->cls, chordal_env->ignore_colors);
+ be_put_allocatable_regs(irg, pse->cls, chordal_env->allocatable_regs);
be_timer_push(T_RA_CONSTR);
be_pre_spill_prepare_constr(irg, chordal_env->cls);
be_chordal_env_t *chordal_env = &pse->cenv;
ir_graph *irg = pse->irg;
ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
- int colors_n = arch_register_class_n_regs(chordal_env->cls);
- int allocatable_regs
- = colors_n - be_put_ignore_regs(irg, chordal_env->cls, NULL);
+ int allocatable_regs = be_get_n_allocatable_regs(irg, chordal_env->cls);
/* some special classes contain only ignore regs, no work to be done */
if (allocatable_regs > 0) {
/* free some always allocated data structures */
pmap_destroy(chordal_env->border_heads);
- bitset_free(chordal_env->ignore_colors);
+ bitset_free(chordal_env->allocatable_regs);
}
/**
be_assure_liveness(irg);
- chordal_env.obst = &obst;
- chordal_env.opts = &options;
- chordal_env.irg = irg;
- chordal_env.border_heads = NULL;
- chordal_env.ifg = NULL;
- chordal_env.ignore_colors = NULL;
+ chordal_env.obst = &obst;
+ chordal_env.opts = &options;
+ chordal_env.irg = irg;
+ chordal_env.border_heads = NULL;
+ chordal_env.ifg = NULL;
+ chordal_env.allocatable_regs = NULL;
obstack_init(&obst);
const arch_register_class_t *cls; /**< The current register class. */
pmap *border_heads; /**< Maps blocks to border heads. */
be_ifg_t *ifg; /**< The interference graph. */
- bitset_t *ignore_colors;/**< A set of colors which shall be ignored in register allocation. */
+ bitset_t *allocatable_regs; /**< set of allocatable registers */
};
static inline struct list_head *_get_block_border_head(const be_chordal_env_t *inf, ir_node *bl) {
int free_col;
/* Get all possible colors */
- bitset_copy(free_cols, co->cenv->ignore_colors);
- bitset_flip_all(free_cols);
+ bitset_copy(free_cols, co->cenv->allocatable_regs);
/* Exclude colors not assignable to the irn */
req = arch_get_register_req_out(irn);
qnode_t *curr = NULL;
qnode_t *tmp;
const arch_register_req_t *req;
- bitset_t const* ignore;
+ bitset_t const* allocatable_regs;
unsigned n_regs;
unsigned idx;
int i;
/* init queue */
INIT_LIST_HEAD(&ou->queue);
- req = arch_get_register_req_out(ou->nodes[0]);
- ignore = ou->co->cenv->ignore_colors;
- n_regs = req->cls->n_regs;
+ req = arch_get_register_req_out(ou->nodes[0]);
+ allocatable_regs = ou->co->cenv->allocatable_regs;
+ n_regs = req->cls->n_regs;
if (arch_register_req_is(req, limited)) {
unsigned const* limited = req->limited;
for (idx = 0; idx != n_regs; ++idx) {
- if (bitset_is_set(ignore, idx))
+ if (!bitset_is_set(allocatable_regs, idx))
continue;
if (!rbitset_is_set(limited, idx))
continue;
}
} else {
for (idx = 0; idx != n_regs; ++idx) {
- if (bitset_is_set(ignore, idx))
+ if (!bitset_is_set(allocatable_regs, idx))
continue;
ou_insert_qnode(ou, new_qnode(ou, idx));
typedef struct {
ir_phase ph;
copy_opt_t *co;
- bitset_t *ignore_regs;
+ bitset_t *allocatable_regs;
co2_irn_t *touched;
int visited;
int n_regs;
}
ci->is_constrained = 1;
} else {
- bitset_copy(ci->adm_cache, env->ignore_regs);
- bitset_flip_all(ci->adm_cache);
+ bitset_copy(ci->adm_cache, env->allocatable_regs);
}
}
env.visited = 0;
env.co = co;
env.n_regs = co->cls->n_regs;
- env.ignore_regs = bitset_alloca(co->cls->n_regs);
- be_put_ignore_regs(co->cenv->irg, co->cls, env.ignore_regs);
+ env.allocatable_regs = bitset_alloca(co->cls->n_regs);
+ be_put_allocatable_regs(co->cenv->irg, co->cls, env.allocatable_regs);
FIRM_DBG_REGISTER(env.dbg, "firm.be.co2");
INIT_LIST_HEAD(&env.cloud_head);
typedef struct co_mst_env_t {
int n_regs; /**< number of regs in class */
int k; /**< number of non-ignore registers in class */
- bitset_t *ignore_regs; /**< set containing all global ignore registers */
+ bitset_t *allocatable_regs; /**< set containing all global ignore registers */
ir_phase ph; /**< phase object holding data for nodes */
pqueue_t *chunks; /**< priority queue for chunks */
list_head chunklist; /**< list holding all chunks */
bitset_set_all(res->adm_colors);
/* exclude global ignore registers as well */
- bitset_andnot(res->adm_colors, env->ignore_regs);
+ bitset_and(res->adm_colors, env->allocatable_regs);
/* compute the constraint factor */
res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
int n_succeeded;
/* skip ignore colors */
- if (bitset_is_set(env->ignore_regs, col))
+ if (!bitset_is_set(env->allocatable_regs, col))
continue;
DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
*/
static int co_solve_heuristic_mst(copy_opt_t *co)
{
- unsigned n_regs = co->cls->n_regs;
- bitset_t *ignore_regs = bitset_alloca(n_regs);
+ unsigned n_regs = co->cls->n_regs;
+ bitset_t *allocatable_regs = bitset_alloca(n_regs);
unsigned i, j, k;
ir_node *irn;
co_mst_env_t mst_env;
phase_init(&mst_env.ph, co->irg, co_mst_irn_init);
phase_set_private(&mst_env.ph, &mst_env);
- k = be_put_ignore_regs(co->cenv->irg, co->cls, ignore_regs);
- k = n_regs - k;
+ be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
+ k = bitset_popcount(allocatable_regs);
- mst_env.n_regs = n_regs;
- mst_env.k = k;
- mst_env.chunks = new_pqueue();
- mst_env.co = co;
- mst_env.ignore_regs = ignore_regs;
- mst_env.ifg = co->cenv->ifg;
+ mst_env.n_regs = n_regs;
+ mst_env.k = k;
+ mst_env.chunks = new_pqueue();
+ mst_env.co = co;
+ mst_env.allocatable_regs = allocatable_regs;
+ mst_env.ifg = co->cenv->ifg;
INIT_LIST_HEAD(&mst_env.chunklist);
- mst_env.chunk_visited = 0;
- mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
+ mst_env.chunk_visited = 0;
+ mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
for (i = 0; i < n_regs; ++i) {
col_cost_t *vec = phase_alloc(&mst_env.ph, sizeof(*vec) * n_regs);
my.normal_colors = bitset_alloca(arch_register_class_n_regs(co->cls));
bitset_clear_all(my.normal_colors);
- arch_put_non_ignore_regs(co->cls, my.normal_colors);
+ be_put_allocatable_regs(co->irg, co->cls, my.normal_colors);
my.n_colors = bitset_popcount(my.normal_colors);
ienv = new_ilp_env(co, ilp2_build, ilp2_apply, &my);
assert(cls == env->cls);
- op->regs = bitset_obstack_alloc(obst, env->cls->n_regs);
-
if (type & arch_register_req_type_limited) {
- rbitset_copy_to_bitset(req->limited, op->regs);
+ bitset_t *regs = bitset_obstack_alloc(obst, env->cls->n_regs);
+ rbitset_copy_to_bitset(req->limited, regs);
+ op->regs = regs;
} else {
- arch_put_non_ignore_regs(env->cls, op->regs);
- if (env->ignore_colors)
- bitset_andnot(op->regs, env->ignore_colors);
+ op->regs = env->allocatable_regs;
}
}
return insn;
}
-
-be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, ir_graph *irg,
- const arch_register_class_t *cls,
- struct obstack *obst)
-{
- ie->cls = cls;
- ie->obst = obst;
- ie->ignore_colors = bitset_obstack_alloc(obst, cls->n_regs);
- be_abi_put_ignore_regs(be_get_irg_abi(irg), cls, ie->ignore_colors);
-
- return ie;
-}
ir_node *irn; /**< Firm node of the insn this operand belongs to */
ir_node *carrier; /**< node representing the operand value (Proj or the node itself for defs, the used value for uses) */
be_operand_t *partner; /**< used in bechordal later... (TODO what does it do?) */
- bitset_t *regs; /**< admissible register bitset */
+ const bitset_t *regs; /**< admissible register bitset */
int pos; /**< pos of the operand (0 to n are inputs, -1 to -n are outputs) */
const arch_register_req_t *req; /**< register constraints for the carrier node */
unsigned has_constraints : 1; /**< the carrier node has register constraints (the constraint type is limited) */
struct be_insn_env_t {
struct obstack *obst;
const arch_register_class_t *cls;
- bitset_t *ignore_colors;
+ bitset_t *allocatable_regs;
};
#define be_insn_n_defs(insn) ((insn)->use_start)
be_insn_t *be_scan_insn(const be_insn_env_t *env, ir_node *irn);
-be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, ir_graph *irg, const arch_register_class_t *cls, struct obstack *obst);
-
#endif /* FIRM_BE_BEINSN_T_H */
name ? name : "lower_for_target",
do_lower_for_target);
}
-
-unsigned be_put_ignore_regs(const ir_graph *irg,
- const arch_register_class_t *cls, bitset_t *bs)
-{
- if (bs == NULL)
- bs = bitset_alloca(cls->n_regs);
- else
- bitset_clear_all(bs);
-
- assert(bitset_size(bs) == cls->n_regs);
- arch_put_non_ignore_regs(cls, bs);
- bitset_flip_all(bs);
- be_abi_put_ignore_regs(be_get_irg_abi(irg), cls, bs);
-
- return bitset_popcount(bs);
-}
ir_graph *irg; /**< The graph under examination. */
const arch_register_class_t *cls; /**< Current processed register class */
be_lv_t *lv;
- bitset_t *ignored_regs;
+ bitset_t *allocatable_regs;
pbqp_matrix *ife_matrix_template;
pbqp_matrix *aff_matrix_template;
plist_t *rpeo;
{
const arch_register_class_t *cls = pbqp_alloc_env->cls;
pbqp *pbqp_inst = pbqp_alloc_env->pbqp_inst;
- bitset_t *ignored_regs = pbqp_alloc_env->ignored_regs;
+ bitset_t *allocatable_regs = pbqp_alloc_env->allocatable_regs;
unsigned colors_n = arch_register_class_n_regs(cls);
unsigned cntConstrains = 0;
/* set costs depending on register constrains */
unsigned idx;
for (idx = 0; idx < colors_n; idx++) {
- if (bitset_is_set(ignored_regs, idx) || !arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, idx))) {
+ if (!bitset_is_set(allocatable_regs, idx) || !arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, idx))) {
/* constrained */
vector_set(costs_vector, idx, INF_COSTS);
cntConstrains++;
/* initialize pbqp allocation data structure */
- pbqp_alloc_env.pbqp_inst = alloc_pbqp(get_irg_last_idx(irg)); /* initialize pbqp instance */
- pbqp_alloc_env.cls = cls;
- pbqp_alloc_env.irg = irg;
- pbqp_alloc_env.lv = lv;
- pbqp_alloc_env.ignored_regs = bitset_malloc(colors_n);
- pbqp_alloc_env.rpeo = plist_new();
- pbqp_alloc_env.restr_nodes = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
- pbqp_alloc_env.ife_edge_num = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
- pbqp_alloc_env.env = env;
- be_put_ignore_regs(irg, cls, pbqp_alloc_env.ignored_regs); /* get ignored registers */
+ pbqp_alloc_env.pbqp_inst = alloc_pbqp(get_irg_last_idx(irg)); /* initialize pbqp instance */
+ pbqp_alloc_env.cls = cls;
+ pbqp_alloc_env.irg = irg;
+ pbqp_alloc_env.lv = lv;
+ pbqp_alloc_env.allocatable_regs = bitset_malloc(colors_n);
+ pbqp_alloc_env.rpeo = plist_new();
+ pbqp_alloc_env.restr_nodes = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
+ pbqp_alloc_env.ife_edge_num = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
+ pbqp_alloc_env.env = env;
+ be_put_allocatable_regs(irg, cls, pbqp_alloc_env.allocatable_regs);
/* create costs matrix template for interference edges */
#if KAPS_DUMP
fclose(file_before);
#endif
- bitset_free(pbqp_alloc_env.ignored_regs);
+ bitset_free(pbqp_alloc_env.allocatable_regs);
free_pbqp(pbqp_alloc_env.pbqp_inst);
plist_free(pbqp_alloc_env.rpeo);
xfree(pbqp_alloc_env.restr_nodes);
n_regs = arch_register_class_n_regs(cls);
normal_regs = rbitset_malloc(n_regs);
- be_abi_set_non_ignore_regs(be_get_irg_abi(irg), cls, normal_regs);
+ be_set_allocatable_regs(irg, cls, normal_regs);
spill();
*/
static void perform_value_serialization_heuristic(rss_t *rss)
{
- bitset_t *arch_nonign_bs = bitset_alloca(arch_register_class_n_regs(rss->cls));
- bitset_t *abi_ign_bs = bitset_alloca(arch_register_class_n_regs(rss->cls));
unsigned available_regs, iteration;
dvg_t dvg;
ir_nodeset_t *sat_vals;
pset *ser_set = new_pset(cmp_rss_edges, 20);
- /* available_regs = R = |arch_non_ignore_regs cut ~abi_ignore_regs| */
- arch_put_non_ignore_regs(rss->cls, arch_nonign_bs);
- be_abi_put_ignore_regs(rss->abi, rss->cls, abi_ign_bs);
- bitset_andnot(arch_nonign_bs, abi_ign_bs);
- available_regs = bitset_popcount(arch_nonign_bs);
- //num_live = pset_count(rss->live_block);
- //available_regs -= num_live < available_regs ? num_live : 0;
+ available_regs = be_get_n_allocatable_regs(rss->irg, rss->cls);
DBG((rss->dbg, LEVEL_1, "\n\t#available regs: %d\n\n", available_regs));
obstack_init(&obst);
cls = rcls;
lv = be_get_irg_liveness(irg);
- n_regs = cls->n_regs - be_put_ignore_regs(irg, cls, NULL);
+ n_regs = be_get_n_allocatable_regs(irg, cls);
ws = new_workset();
uses = be_begin_uses(irg, lv);
loop_ana = be_new_loop_pressure(irg, cls);
int i, n_regs;
/* some special classes contain only ignore regs, nothing to do then */
- n_regs = cls->n_regs - be_put_ignore_regs(irg, cls, NULL);
+ n_regs = be_get_n_allocatable_regs(irg, cls);
if (n_regs == 0)
return;
static void be_spill_daemel(ir_graph *irg, const arch_register_class_t *new_cls)
{
- n_regs = new_cls->n_regs - be_put_ignore_regs(irg, new_cls, NULL);
+ n_regs = be_get_n_allocatable_regs(irg, new_cls);
if (n_regs == 0)
return;
env.lv = be_liveness(irg);
env.irg = irg;
env.cls = cls;
- env.registers_available
- = env.cls->n_regs - be_put_ignore_regs(irg, env.cls, NULL);
+ env.registers_available = be_get_n_allocatable_regs(irg, cls);
env.problem_found = 0;
be_liveness_assure_sets(env.lv);
if (r_clobber_bits != 0) {
if (parsed_constraint.all_registers_allowed) {
parsed_constraint.all_registers_allowed = 0;
- be_abi_set_non_ignore_regs(be_get_irg_abi(current_ir_graph),
+ be_set_allocatable_regs(current_ir_graph,
parsed_constraint.cls,
&parsed_constraint.allowed_registers);
}