struct list_head *head = get_block_border_head(env, block);
be_lv_t *lv = be_get_irg_liveness(env->irg);
- const ir_node *irn;
border_t *b;
- int idx;
bitset_clear_all(colors);
bitset_clear_all(live);
* Since their colors have already been assigned (The dominators were
* allocated before), we have to mark their colors as used also.
*/
- be_lv_foreach(lv, block, be_lv_state_in, idx) {
- irn = be_lv_get_irn(lv, block, idx);
+ be_lv_foreach(lv, block, be_lv_state_in, irn) {
if (has_reg_class(env, irn)) {
const arch_register_t *reg = arch_get_irn_register(irn);
int col;
bitset_t *live = bitset_malloc(get_irg_last_idx(env->irg));
be_lv_t *lv = be_get_irg_liveness(env->irg);
- int i, n;
unsigned step = 0;
unsigned pressure = 0;
struct list_head *head;
* Make final uses of all values live out of the block.
* They are necessary to build up real intervals.
*/
- be_lv_foreach(lv, block, be_lv_state_end, i) {
- ir_node *irn = be_lv_get_irn(lv, block, i);
+ be_lv_foreach(lv, block, be_lv_state_end, irn) {
if (has_reg_class(env, irn)) {
DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
bitset_set(live, get_irn_idx(irn));
* If the node is no phi node we can examine the uses.
*/
if (!is_Phi(irn)) {
- for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ for (int i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
if (has_reg_class(env, op)) {
struct block_dims *dims = pmap_get(struct block_dims, env->block_dims, bl);
char buf[64];
border_t *b;
- int idx;
ir_snprintf(buf, sizeof(buf), "%F", bl);
if (dom) {
struct block_dims *dom_dims = pmap_get(struct block_dims, env->block_dims, dom);
- be_lv_foreach(lv, bl, be_lv_state_in, idx) {
- ir_node *irn = be_lv_get_irn(lv, bl, idx);
+ be_lv_foreach(lv, bl, be_lv_state_in, irn) {
if (arch_irn_consider_in_reg_alloc(env->cls, irn)) {
const arch_register_t *reg = arch_get_irn_register(irn);
int col = arch_register_get_index(reg);
const arch_register_class_t *cls,
const ir_node *block, ir_nodeset_t *live)
{
- int i;
-
assert(lv->sets_valid && "live sets must be computed");
- be_lv_foreach(lv, block, be_lv_state_end, i) {
- ir_node *node = be_lv_get_irn(lv, block, i);
+ be_lv_foreach(lv, block, be_lv_state_end, node) {
if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
struct be_lv_info_node_t node;
};
-static inline int _be_lv_next_irn(const be_lv_t *lv, const ir_node *block,
- unsigned flags, int i)
-{
- be_lv_info_t *arr = ir_nodehashmap_get(be_lv_info_t, &lv->map, block);
- if (arr != NULL) {
- int n_members = (int) arr[0].head.n_members;
- while(i < n_members) {
- if(arr[i + 1].node.flags & flags) {
- return i;
- }
- ++i;
- }
- }
-
- return -1;
-}
-
-static inline ir_node *_be_lv_get_irn(const be_lv_t *lv, const ir_node *block,
- int i)
-{
- be_lv_info_t *arr = ir_nodehashmap_get(be_lv_info_t, &lv->map, block);
- return get_idx_irn(lv->irg, arr[i + 1].node.idx);
-}
-
be_lv_info_node_t *be_lv_get(const be_lv_t *li, const ir_node *block,
const ir_node *irn);
return res;
}
-#define be_lv_foreach(lv, bl, flags, i) \
- for (i = 0; (i = _be_lv_next_irn(lv, bl, flags, i)) >= 0; ++i)
+typedef struct lv_iterator_t
+{
+ be_lv_info_t *info;
+ ir_graph *irg;
+ be_lv_state_t flags;
+ size_t i;
+} lv_iterator_t;
+
+static inline lv_iterator_t be_lv_iteration_begin(const be_lv_t *lv,
+ const ir_node *block, be_lv_state_t flags)
+{
+ lv_iterator_t res;
+ res.info = ir_nodehashmap_get(be_lv_info_t, &lv->map, block);
+ res.irg = get_Block_irg(block);
+ res.flags = flags;
+ res.i = res.info != NULL ? res.info[0].head.n_members : 0;
+ return res;
+}
+
+static inline ir_node *be_lv_iteration_next(lv_iterator_t *iterator)
+{
+ while (iterator->i != 0) {
+ const be_lv_info_t *info = iterator->info + iterator->i--;
+ if (info->node.flags & iterator->flags)
+ return get_idx_irn(iterator->irg, info->node.idx);
+ }
+ return NULL;
+}
+#define be_lv_foreach(lv, block, flags, node) \
+ for (bool once = true; once;) \
+ for (lv_iterator_t iter = be_lv_iteration_begin((lv), (block), (flags)); once; once = false) \
+ for (ir_node *node; (node = be_lv_iteration_next(&iter)) != NULL;)
static inline pset *_be_lv_pset_put(const be_lv_t *lv, const ir_node *block,
int state, pset *s)
{
- int i;
- be_lv_foreach(lv, block, state, i)
- pset_insert_ptr(s, _be_lv_get_irn(lv, block, i));
+ be_lv_foreach(lv, block, state, node)
+ pset_insert_ptr(s, node);
return s;
}
*/
static void process_block(ir_node *block, void *data)
{
- int l;
(void) data;
/* construct initial register assignment */
assert(lv->sets_valid && "live sets must be computed");
DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
- be_lv_foreach(lv, block, be_lv_state_end, l) {
- ir_node *node = be_lv_get_irn(lv, block, l);
+ be_lv_foreach(lv, block, be_lv_state_end, node) {
set_reg_value(node);
}
DB((dbg, LEVEL_1, "\nstart processing\n"));
*/
static void allocate_coalesce_block(ir_node *block, void *data)
{
- int i;
ir_nodeset_t live_nodes;
int n_preds;
block_info_t *block_info;
/* gather regalloc infos of predecessor blocks */
n_preds = get_Block_n_cfgpreds(block);
pred_block_infos = ALLOCAN(block_info_t*, n_preds);
- for (i = 0; i < n_preds; ++i) {
+ for (int i = 0; i < n_preds; ++i) {
ir_node *pred = get_Block_cfgpred_block(block, i);
block_info_t *pred_info = get_block_info(pred);
pred_block_infos[i] = pred_info;
phi_ins = ALLOCAN(ir_node*, n_preds);
/* collect live-in nodes and preassigned values */
- be_lv_foreach(lv, block, be_lv_state_in, i) {
+ be_lv_foreach(lv, block, be_lv_state_in, node) {
bool need_phi = false;
const arch_register_req_t *req;
const arch_register_t *reg;
int p;
- ir_node *node = be_lv_get_irn(lv, block, i);
req = arch_get_irn_register_req(node);
if (req->cls != cls)
continue;
/* we may not use registers used for inputs for optimistic splits */
arity = get_irn_arity(node);
- for (i = 0; i < arity; ++i) {
+ for (int i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
const arch_register_t *reg;
if (!arch_irn_consider_in_reg_alloc(cls, op))
loc_t *delayed;
unsigned len;
unsigned i;
- int in;
unsigned ws_count;
int free_slots, free_pressure_slots;
unsigned pressure;
arity = get_irn_arity(block);
pred_worksets = ALLOCAN(workset_t*, arity);
all_preds_known = true;
- for (in = 0; in < arity; ++in) {
+ for (int in = 0; in < arity; ++in) {
ir_node *pred_block = get_Block_cfgpred_block(block, in);
block_info_t *pred_info = get_block_info(pred_block);
}
/* check all Live-Ins */
- be_lv_foreach(lv, block, be_lv_state_in, in) {
- ir_node *node = be_lv_get_irn(lv, block, in);
+ be_lv_foreach(lv, block, be_lv_state_in, node) {
unsigned available;
if (all_preds_known) {
int n_cfgpreds;
unsigned best_time;
int outer_loop_allowed;
- int i;
/* Create the block info for this block. */
block_info = new_block_info(&env->obst, block);
}
/* check all Live-Ins */
- be_lv_foreach(env->lv, block, be_lv_state_in, i) {
- ir_node *const node = be_lv_get_irn(env->lv, block, i);
-
+ be_lv_foreach(env->lv, block, be_lv_state_in, node) {
if (!mode_is_data(get_irn_mode(node)))
continue;
{
unsigned i;
unsigned n_regs;
- int idx;
(void) data;
n_regs = arch_env->n_registers;
registers = ALLOCANZ(const ir_node*, n_regs);
- be_lv_foreach(lv, block, be_lv_state_end, idx) {
- ir_node *lv_node = be_lv_get_irn(lv, block, idx);
+ be_lv_foreach(lv, block, be_lv_state_end, lv_node) {
value_used(block, lv_node);
}
}
}
- be_lv_foreach(lv, block, be_lv_state_in, idx) {
- ir_node *lv_node = be_lv_get_irn(lv, block, idx);
+ be_lv_foreach(lv, block, be_lv_state_in, lv_node) {
value_def(lv_node);
}
*/
static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
{
- int i;
vfp_liveness live = 0;
const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
const be_lv_t *lv = sim->lv;
- be_lv_foreach(lv, block, be_lv_state_end, i) {
+ be_lv_foreach(lv, block, be_lv_state_end, node) {
const arch_register_t *reg;
- const ir_node *node = be_lv_get_irn(lv, block, i);
if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;