code \
)
+#define be_foreach_use(node, ccls, in_req, value, value_req, code) \
+ do { \
+ for (int i_ = 0, n_ = get_irn_arity(node); i_ < n_; ++i_) { \
+ const arch_register_req_t *in_req = arch_get_irn_register_req_in(node, i_); \
+ if (in_req->cls != ccls) \
+ continue; \
+ ir_node *value = get_irn_n(node, i_); \
+ const arch_register_req_t *value_req = arch_get_irn_register_req(value); \
+ if (value_req->type & arch_register_req_type_ignore) \
+ continue; \
+ code \
+ } \
+ } while (0)
+
static inline const arch_register_class_t *arch_get_irn_reg_class(
const ir_node *node)
{
* If the node is no phi node we can examine the uses.
*/
if (!is_Phi(irn)) {
- for (int i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *op = get_irn_n(irn, i);
-
- if (arch_irn_consider_in_reg_alloc(env->cls, op)) {
- int nr = get_irn_idx(op);
- const char *msg = "-";
-
- if (!bitset_is_set(live, nr)) {
- border_use(op, step, 1);
- bitset_set(live, nr);
- msg = "X";
- }
-
- DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
+ be_foreach_use(irn, env->cls, in_req_, op, op_req_,
+ unsigned idx = get_irn_idx(op);
+ const char *msg = "-";
+
+ if (!bitset_is_set(live, idx)) {
+ border_use(op, step, 1);
+ bitset_set(live, idx);
+ msg = "X";
}
- }
+
+ DB((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i_, op));
+ );
}
++step;
}
{
struct obstack *const obst = &env->obst;
be_operand_t o;
- int i, n;
be_insn_t *insn = OALLOCZ(obst, be_insn_t);
insn->use_start = insn->n_ops;
/* now collect the uses for this node */
- for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *op = get_irn_n(irn, i);
-
- if (arch_irn_consider_in_reg_alloc(env->cls, op)) {
- /* found a register use, create an operand */
- arch_register_req_t const *const req = arch_get_irn_register_req_in(irn, i);
- if (arch_register_req_is(req, limited)) {
- o.regs = req->limited;
- has_constraints = true;
- } else {
- o.regs = env->allocatable_regs->data;
- }
- o.carrier = op;
- o.partner = NULL;
- obstack_grow(obst, &o, sizeof(o));
- insn->n_ops++;
+ be_foreach_use(irn, cls, in_req, op, op_req,
+ /* found a register use, create an operand */
+ if (arch_register_req_is(in_req, limited)) {
+ o.regs = in_req->limited;
+ has_constraints = true;
+ } else {
+ o.regs = env->allocatable_regs->data;
}
- }
+ o.carrier = op;
+ o.partner = NULL;
+ obstack_grow(obst, &o, sizeof(o));
+ insn->n_ops++;
+ );
if (!has_constraints)
return NULL;
int new_size;
ir_node *frontier = bl;
int i, n;
+ be_lv_t *lv = be_get_irg_liveness(irg);
/* get some Proj and find out the register class of that Proj. */
ir_node *one_proj = get_edge_src_irn(get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL));
* the Perm, increasing the register pressure by one.
*/
sched_foreach_reverse_from(sched_prev(perm), irn) {
- for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
- ir_node *op = get_irn_n(irn, i);
- be_lv_t *lv = be_get_irg_liveness(irg);
- if (arch_irn_consider_in_reg_alloc(cls, op) &&
- !be_values_interfere(lv, op, one_proj)) {
+ be_foreach_use(irn, cls, in_req_, op, op_req_,
+ if (!be_values_interfere(lv, op, one_proj)) {
frontier = irn;
goto found_front;
}
- }
+ );
}
found_front:
unsigned *def_constr = NULL;
int arity = get_irn_arity(node);
- int i, i2;
-
/* Insert a copy for constraint inputs attached to a value which can't
* fulfill the constraint
* (typical example: stack pointer as input to copyb)
* TODO: This really just checks precolored registers at the moment and
* ignores the general case of not matching in/out constraints
*/
- for (i = 0; i < arity; ++i) {
+ for (int i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
const arch_register_t *reg;
}
/* insert copies for nodes that occur constrained more than once. */
- for (i = 0; i < arity; ++i) {
- ir_node *in;
- ir_node *copy;
- const arch_register_req_t *req;
-
- req = arch_get_irn_register_req_in(node, i);
- if (req->cls != cls)
- continue;
-
+ be_foreach_use(node, cls, req, in, in_req_,
if (!arch_register_req_is(req, limited))
continue;
- in = get_irn_n(node, i);
- if (!arch_irn_consider_in_reg_alloc(cls, in))
- continue;
-
- for (i2 = i + 1; i2 < arity; ++i2) {
+ for (int i2 = i_ + 1; i2 < arity; ++i2) {
ir_node *in2;
const arch_register_req_t *req2;
if (rbitsets_equal(req->limited, req2->limited, cls->n_regs))
continue;
- copy = be_new_Copy(block, in);
+ ir_node *copy = be_new_Copy(block, in);
stat_ev_int("constr_copy", 1);
sched_add_before(node, copy);
"inserting multiple constr copy %+F for %+F pos %d\n",
copy, node, i2));
}
- }
+ );
/* collect all registers occurring in out constraints. */
be_foreach_definition(node, cls, def,
* and being constrained to a register which also occurs in out constraints.
*/
unsigned *const tmp = rbitset_alloca(cls->n_regs);
- for (i = 0; i < arity; ++i) {
- const arch_register_req_t *req;
- ir_node *in;
- ir_node *copy;
-
- /*
- * Check, if
+ be_foreach_use(node, cls, req, in, in_req_,
+ /* Check, if
* 1) the operand is constrained.
* 2) lives through the node.
* 3) is constrained to a register occurring in out constraints.
*/
- req = arch_get_irn_register_req_in(node, i);
- if (req->cls != cls)
- continue;
if (!arch_register_req_is(req, limited))
continue;
-
- in = get_irn_n(node, i);
- if (!arch_irn_consider_in_reg_alloc(cls, in))
- continue;
if (!be_values_interfere(lv, node, in))
continue;
if (be_is_Copy(in))
continue;
- copy = be_new_Copy(block, in);
+ ir_node *copy = be_new_Copy(block, in);
sched_add_before(node, copy);
- set_irn_n(node, i, copy);
+ set_irn_n(node, i_, copy);
DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
- copy, node, i));
+ copy, node, i_));
be_liveness_update(lv, in);
- }
+ );
}
static void pre_spill_prepare_constr_walker(ir_node *block, void *data)
new_vals = new_workset();
sched_foreach(block, irn) {
- int i, arity;
assert(workset_get_length(ws) <= n_regs);
/* Phis are no real instr (see insert_starters()) */
/* allocate all values _used_ by this instruction */
workset_clear(new_vals);
- for (i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
- ir_node *in = get_irn_n(irn, i);
- if (!arch_irn_consider_in_reg_alloc(cls, in))
- continue;
-
+ be_foreach_use(irn, cls, in_req_, in, in_req,
/* (note that "spilled" is irrelevant here) */
workset_insert(new_vals, in, false);
- }
+ );
displace(new_vals, 1);
/* allocate all values _defined_ by this instruction */
/* we need registers for the non-live argument values */
size_t free_regs_needed = 0;
- int arity = get_irn_arity(node);
- for (int i = 0; i < arity; ++i) {
- ir_node *pred = get_irn_n(node, i);
- if (arch_irn_consider_in_reg_alloc(cls, pred)
- && !ir_nodeset_contains(live_nodes, pred)) {
- free_regs_needed += get_value_width(pred);
+ be_foreach_use(node, cls, in_req_, use, pred_req_,
+ if (!ir_nodeset_contains(live_nodes, use)) {
+ free_regs_needed += get_value_width(use);
}
- }
+ );
/* we can reuse all reloaded values for the defined values, but we might
* need even more registers */
/* make sure the node is not an argument of the instruction */
bool is_use = false;
+ int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
if (in == cand_node) {
*/
static fp_liveness fp_liveness_transfer(ir_node *irn, fp_liveness live)
{
- int i, n;
const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_fp];
be_foreach_definition(irn, cls, def,
const arch_register_t *reg = x87_get_irn_register(def);
live &= ~(1 << reg->index);
);
-
- for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *op = get_irn_n(irn, i);
-
- if (mode_is_float(get_irn_mode(op)) &&
- arch_irn_consider_in_reg_alloc(cls, op)) {
- const arch_register_t *reg = x87_get_irn_register(op);
- live |= 1 << reg->index;
- }
- }
+ be_foreach_use(irn, cls, in_req_, op, op_req_,
+ const arch_register_t *reg = x87_get_irn_register(op);
+ live |= 1 << reg->index;
+ );
return live;
}