static void init_arm_SwitchJmp_attributes(ir_node *res,
const ir_switch_table *table)
{
- unsigned n_outs = arch_get_irn_n_outs(res);
- unsigned o;
-
arm_SwitchJmp_attr_t *attr = get_arm_SwitchJmp_attr(res);
attr->table = table;
- for (o = 0; o < n_outs; ++o) {
+ be_foreach_out(res, o) {
arch_set_irn_register_req_out(res, o, arch_no_register_req);
}
}
*/
static int find_out_for_reg(ir_node *node, const arch_register_t *reg)
{
- int n_outs = arch_get_irn_n_outs(node);
- int o;
-
- for (o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
if (req == reg->single_req)
return o;
arch_dump_register_req(F, req, node);
fputs("\n", F);
}
- unsigned n_outs = arch_get_irn_n_outs(node);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
fprintf(F, "outreq #%u = ", o);
arch_dump_register_req(F, req, node);
static inline unsigned arch_get_irn_n_outs(const ir_node *node)
{
- backend_info_t *info = be_get_info(node);
- if (info->out_infos == NULL)
- return 0;
-
+ backend_info_t *const info = be_get_info(node);
return (unsigned)ARR_LEN(info->out_infos);
}
+#define be_foreach_out(node, i) \
+ for (unsigned i = 0, i##__n = arch_get_irn_n_outs(node); i != i##__n; ++i)
+
/**
* Start codegeneration
*/
static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
{
ir_node *start = get_irg_start(irg);
- unsigned n_outs = arch_get_irn_n_outs(start);
- int i;
/* do a naive linear search... */
- for (i = 0; i < (int)n_outs; ++i) {
+ be_foreach_out(start, i) {
arch_register_req_t const *const out_req = arch_get_irn_register_req_out(start, i);
if (!arch_register_req_is(out_req, limited))
continue;
return false;
/* schedpoint must not overwrite registers of our inputs */
- unsigned n_outs = arch_get_irn_n_outs(schedpoint);
for (int i = 0; i < node_arity; ++i) {
ir_node *in = get_irn_n(node, i);
const arch_register_t *reg = arch_get_irn_register(in);
continue;
const arch_register_req_t *in_req
= arch_get_irn_register_req_in(node, i);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(schedpoint, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(schedpoint, o);
const arch_register_req_t *outreq
bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
const ir_node *after)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
const ir_node *node_block = get_nodes_block(node);
const ir_node *after_block = get_block_const(after);
const ir_node *schedpoint;
be_lv_foreach(lv, succ, be_lv_state_in, live_node) {
const arch_register_t *reg = arch_get_irn_register(live_node);
const arch_register_req_t *req = arch_get_irn_register_req(live_node);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(node, o);
const arch_register_req_t *outreq
break;
const arch_register_t *reg = arch_get_irn_register(phi);
const arch_register_req_t *req = arch_get_irn_register_req(phi);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(node, o);
const arch_register_req_t *outreq
continue;
const arch_register_req_t *in_req
= arch_get_irn_register_req_in(schedpoint, i);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(node, o);
const arch_register_req_t *outreq
static void assure_should_be_same_requirements(ir_node *node)
{
const arch_register_t *out_reg, *in_reg;
- int n_res, i;
ir_node *in_node, *block;
- n_res = arch_get_irn_n_outs(node);
block = get_nodes_block(node);
/* check all OUT requirements, if there is a should_be_same */
- for (i = 0; i < n_res; i++) {
+ be_foreach_out(node, i) {
int i2, arity;
int same_pos;
ir_node *uses_out_reg;
*/
static void fix_am_source(ir_node *irn)
{
- int n_res, i;
-
/* check only ia32 nodes with source address mode */
if (!is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
return;
if (get_ia32_am_support(irn) != ia32_am_binary)
return;
- n_res = arch_get_irn_n_outs(irn);
-
- for (i = 0; i < n_res; i++) {
+ be_foreach_out(irn, i) {
const arch_register_req_t *req = arch_get_irn_register_req_out(irn, i);
const arch_register_t *out_reg;
int same_pos;
static void init_ia32_switch_attributes(ir_node *node,
const ir_switch_table *table)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
- unsigned o;
-
ia32_switch_attr_t *attr = (ia32_switch_attr_t*) get_irn_generic_attr(node);
#ifndef NDEBUG
attr->attr.attr_type |= IA32_ATTR_ia32_switch_attr_t;
#endif
attr->table = table;
- for (o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
arch_set_irn_register_req_out(node, o, arch_no_register_req);
}
}
proj = pn_ia32_Call_X_regular;
} else {
arch_register_req_t const *const req = arch_get_irn_register_req(node);
- int const n_outs = arch_get_irn_n_outs(new_call);
- int i;
assert(proj >= pn_be_Call_first_res);
assert(arch_register_req_is(req, limited));
- for (i = 0; i < n_outs; ++i) {
+ be_foreach_out(new_call, i) {
arch_register_req_t const *const new_req = arch_get_irn_register_req_out(new_call, i);
if (!arch_register_req_is(new_req, limited) ||
new_req->cls != req->cls ||
continue;
proj = i;
- break;
+ goto found;
}
- assert(i < n_outs);
+ panic("no matching out requirement found");
+found:;
}
res = new_rd_Proj(dbgi, new_call, mode, proj);
panic("cannot handle %+F with x87 constraints", n);
}
- for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
+ be_foreach_out(n, i) {
arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
if (req->cls == &ia32_reg_classes[CLASS_ia32_fp])
panic("cannot handle %+F with x87 constraints", n);
static bool sparc_modifies_flags(const ir_node *node)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
if (req->cls == &sparc_reg_classes[CLASS_sparc_flags_class])
return true;
static bool sparc_modifies_fp_flags(const ir_node *node)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
if (req->cls == &sparc_reg_classes[CLASS_sparc_fpflags_class])
return true;
static bool writes_reg(const ir_node *node, unsigned reg_index, unsigned width)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_t *out_reg = arch_get_irn_register_out(node, o);
if (out_reg == NULL)
continue;
}
/* node must not write to one of the call outputs */
- unsigned n_call_outs = arch_get_irn_n_outs(to);
- for (unsigned o = 0; o < n_call_outs; ++o) {
+ be_foreach_out(to, o) {
const arch_register_t *reg = arch_get_irn_register_out(to, o);
if (reg == NULL)
continue;
ir_node *constant = create_constant_from_immediate(node, offset);
ir_node *new_load = new_bd_sparc_Ld_reg(dbgi, block, ptr, constant, mem, load_store_mode);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
- unsigned n_outs = arch_get_irn_n_outs(node);
- unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
- for (i = 0; i < n_outs; i++) {
+ be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
ir_node *new_load = new_bd_sparc_Ldf_s(dbgi, block, new_ptr, mem, load_store_mode, NULL, 0, true);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
- unsigned n_outs = arch_get_irn_n_outs(node);
- unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
- for (i = 0; i < n_outs; i++) {
+ be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
ir_node *constant = create_constant_from_immediate(node, offset);
ir_node *new_load = new_bd_sparc_St_reg(dbgi, block, value, ptr, constant, mem, load_store_mode);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
- unsigned n_outs = arch_get_irn_n_outs(node);
- unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
- for (i = 0; i < n_outs; i++) {
+ be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
ir_node *new_load = new_bd_sparc_Stf_s(dbgi, block, value, new_ptr, mem, load_store_mode, NULL, 0, true);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
- unsigned n_outs = arch_get_irn_n_outs(node);
- unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
- for (i = 0; i < n_outs; i++) {
+ be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
const ir_switch_table *table,
ir_entity *table_entity)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
- unsigned o;
-
sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr(node);
attr->table = table;
attr->table_entity = table_entity;
- for (o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
arch_set_irn_register_req_out(node, o, arch_no_register_req);
}
}