*/
/** Returns the number of successors of the node: */
-FIRM_API int get_irn_n_outs(const ir_node *node);
+FIRM_API unsigned get_irn_n_outs(const ir_node *node);
/** Returns the User of a node from the Def-Use edge at position pos. */
-FIRM_API ir_node *get_irn_out(const ir_node *def, int pos);
+FIRM_API ir_node *get_irn_out(const ir_node *def, unsigned pos);
/**
* Returns the User and its input position from the Def-Use edge of def
* at position pos.
*/
-FIRM_API ir_node *get_irn_out_ex(const ir_node *def, int pos, int *in_pos);
+FIRM_API ir_node *get_irn_out_ex(const ir_node *def, unsigned pos, int *in_pos);
/**
* Sets the User at position pos.
* @param use the Use node
* @param in_pos the number of the corresponding Use-Def edge in the use node in array
*/
-FIRM_API void set_irn_out(ir_node *def, int pos, ir_node *use, int in_pos);
+FIRM_API void set_irn_out(ir_node *def, unsigned pos, ir_node *use, int in_pos);
/** Returns the number of control flow successors, ignore keep-alives. */
-FIRM_API int get_Block_n_cfg_outs(const ir_node *node);
+FIRM_API unsigned get_Block_n_cfg_outs(const ir_node *node);
/** Returns the number of control flow successors, honor keep-alives. */
-FIRM_API int get_Block_n_cfg_outs_ka(const ir_node *node);
+FIRM_API unsigned get_Block_n_cfg_outs_ka(const ir_node *node);
/** Access predecessor n, ignore keep-alives. */
-FIRM_API ir_node *get_Block_cfg_out(const ir_node *node, int pos);
+FIRM_API ir_node *get_Block_cfg_out(const ir_node *node, unsigned pos);
/** Access predecessor n, honor keep-alives. */
-FIRM_API ir_node *get_Block_cfg_out_ka(const ir_node *node, int pos);
+FIRM_API ir_node *get_Block_cfg_out_ka(const ir_node *node, unsigned pos);
/**
* Walks over the graph starting at node. Walks also if graph is in state
#include "error.h"
#include "ircons.h"
-int get_irn_n_outs(const ir_node *node)
+unsigned get_irn_n_outs(const ir_node *node)
{
- assert(node->kind == k_ir_node);
- assert(node->out_valid);
- /* we misuse the first for the size info of the out array */
- return node->out[0].pos;
+ return node->o.out->n_edges;
}
-ir_node *get_irn_out(const ir_node *def, int pos)
+ir_node *get_irn_out(const ir_node *def, unsigned pos)
{
- assert(pos >= 0 && pos < get_irn_n_outs(def));
- assert(def->out_valid);
- return def->out[pos+1].use;
+ assert(pos < get_irn_n_outs(def));
+ return def->o.out->edges[pos].use;
}
-ir_node *get_irn_out_ex(const ir_node *def, int pos, int *in_pos)
+ir_node *get_irn_out_ex(const ir_node *def, unsigned pos, int *in_pos)
{
- assert(pos >= 0 && pos < get_irn_n_outs(def));
- assert(def->out_valid);
- *in_pos = def->out[pos+1].pos;
- return def->out[pos+1].use;
+ assert(pos < get_irn_n_outs(def));
+ *in_pos = def->o.out->edges[pos].pos;
+ return def->o.out->edges[pos].use;
}
-void set_irn_out(ir_node *def, int pos, ir_node *use, int in_pos)
+void set_irn_out(ir_node *def, unsigned pos, ir_node *use, int in_pos)
{
- assert(use);
- assert(pos >= 0 && pos < get_irn_n_outs(def));
- assert(def->out_valid);
- def->out[pos+1].use = use;
- def->out[pos+1].pos = in_pos;
+ assert(use != NULL);
+ assert(pos < get_irn_n_outs(def));
+ def->o.out->edges[pos].use = use;
+ def->o.out->edges[pos].pos = in_pos;
}
-int get_Block_n_cfg_outs(const ir_node *bl)
+unsigned get_Block_n_cfg_outs(const ir_node *bl)
{
assert(is_Block(bl));
- assert(bl->out_valid);
- int n_cfg_outs = 0;
- for (int i = 1; i <= bl->out[0].pos; ++i) {
- const ir_node *succ = bl->out[i].use;
- if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ))
- n_cfg_outs += succ->out[0].pos;
+ unsigned n_cfg_outs = 0;
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_End(succ) || is_Bad(succ))
+ continue;
+ n_cfg_outs += get_irn_n_outs(succ);
}
return n_cfg_outs;
}
-int get_Block_n_cfg_outs_ka(const ir_node *bl)
+unsigned get_Block_n_cfg_outs_ka(const ir_node *bl)
{
assert(is_Block(bl));
- assert(bl->out_valid);
- int n_cfg_outs = 0;
- for (int i = 1; i <= bl->out[0].pos; ++i) {
- const ir_node *succ = bl->out[i].use;
- if (get_irn_mode(succ) == mode_X) {
- if (is_Bad(succ))
+ unsigned n_cfg_outs = 0;
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_Bad(succ))
+ continue;
+ if (is_End(succ)) {
+ ir_node *end_bl = get_nodes_block(succ);
+ if (end_bl == bl)
continue;
- if (is_End(succ)) {
- /* ignore End if we are in the Endblock */
- if (get_nodes_block(succ) == bl)
- continue;
- else /* count Keep-alive as one */
- n_cfg_outs += 1;
- } else
- n_cfg_outs += succ->out[0].pos;
+ ++n_cfg_outs;
+ continue;
}
+ n_cfg_outs += get_irn_n_outs(succ);
}
return n_cfg_outs;
}
-ir_node *get_Block_cfg_out(const ir_node *bl, int pos)
+ir_node *get_Block_cfg_out(const ir_node *bl, unsigned pos)
{
assert(is_Block(bl));
- assert(bl->out_valid);
- for (int i = 1; i <= bl->out[0].pos; ++i) {
- const ir_node *succ = bl->out[i].use;
- if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ)) {
- int n_outs = succ->out[0].pos;
- if (pos < n_outs)
- return succ->out[pos + 1].use;
- else
- pos -= n_outs;
- }
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_End(succ) || is_Bad(succ))
+ continue;
+
+ unsigned n_outs = get_irn_n_outs(succ);
+ if (pos < n_outs)
+ return get_irn_out(succ, pos);
+ else
+ pos -= n_outs;
}
return NULL;
}
-ir_node *get_Block_cfg_out_ka(const ir_node *bl, int pos)
+ir_node *get_Block_cfg_out_ka(const ir_node *bl, unsigned pos)
{
assert(is_Block(bl));
- assert(bl->out_valid);
- for (int i = 1; i <= bl->out[0].pos; ++i) {
- const ir_node *succ = bl->out[i].use;
- if (get_irn_mode(succ) == mode_X) {
- if (is_Bad(succ))
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_Bad(succ))
+ continue;
+
+ if (is_End(succ)) {
+ ir_node *end_bl = get_nodes_block(succ);
+ if (end_bl == bl) {
+ /* ignore End if we are in the Endblock */
continue;
- if (is_End(succ)) {
- ir_node *end_bl = get_nodes_block(succ);
- if (end_bl == bl) {
- /* ignore End if we are in the Endblock */
- continue;
- }
- if (pos == 0) {
- /* handle keep-alive here: return the Endblock instead of the End node */
- return end_bl;
- } else
- --pos;
+ }
+ if (pos == 0) {
+ /* handle keep-alive here: return the Endblock instead of the End node */
+ return end_bl;
} else {
- int n_outs = succ->out[0].pos;
- if (pos < n_outs)
- return succ->out[pos + 1].use;
- else
- pos -= n_outs;
+ --pos;
+ continue;
}
}
+ unsigned n_outs = get_irn_n_outs(succ);
+ if (pos < n_outs)
+ return get_irn_out(succ, pos);
+ else
+ pos -= n_outs;
}
return NULL;
}
/** Returns the amount of out edges for not yet visited successors. */
-static int _count_outs(ir_node *n)
+static void count_outs_node(ir_node *n)
{
- mark_irn_visited(n);
- n->out = (ir_def_use_edge*) INT_TO_PTR(1); /* Space for array size. */
+ if (irn_visited_else_mark(n))
+ return;
+
+ /* initialize our counter */
+ n->o.n_outs = 0;
int start = is_Block(n) ? 0 : -1;
int irn_arity = get_irn_arity(n);
- int res = irn_arity - start + 1; /* --1 or --0; 1 for array size. */
-
for (int i = start; i < irn_arity; ++i) {
- /* Optimize Tuples. They annoy if walking the cfg. */
- ir_node *pred = get_irn_n(n, i);
- ir_node *skipped_pred = skip_Tuple(pred);
-
- if (skipped_pred != pred) {
- set_irn_n(n, i, skipped_pred);
- }
-
- /* count Def-Use edges for predecessors */
- if (!irn_visited(skipped_pred))
- res += _count_outs(skipped_pred);
-
- /*count my Def-Use edges */
- skipped_pred->out = (ir_def_use_edge*) INT_TO_PTR(PTR_TO_INT(skipped_pred->out) + 1);
+ ir_node *def = get_irn_n(n, i);
+ /* optimize Tuples */
+ ir_node *skipped = skip_Tuple(def);
+ if (skipped != def)
+ set_irn_n(n, i, skipped);
+
+ count_outs_node(skipped);
+ ++skipped->o.n_outs;
}
- return res;
}
/** Returns the amount of out edges for not yet visited successors.
- * This version handles some special nodes like irg_frame, irg_args etc.
- */
-static int count_outs(ir_graph *irg)
+ * This version handles some special nodes like irg_frame, irg_args etc. */
+static void count_outs(ir_graph *irg)
{
inc_irg_visited(irg);
- int res = _count_outs(get_irg_end(irg));
-
- /* Now handle anchored nodes. We need the out count of those
- even if they are not visible. */
- for (int i = anchor_last; i >= anchor_first; --i) {
+ count_outs_node(get_irg_end(irg));
+ for (int i = anchor_first; i <= anchor_last; ++i) {
ir_node *n = get_irg_anchor(irg, i);
- if (!irn_visited_else_mark(n)) {
- n->out = (ir_def_use_edge*) INT_TO_PTR(1);
- ++res;
- }
+ if (irn_visited_else_mark(n))
+ continue;
+ n->o.n_outs = 0;
}
- return res;
}
-/**
- * Enter memory for the outs to a node.
- *
- * @param use current node
- * @param free current free address in the chunk allocated for the outs
- *
- * @return The next free address
- */
-static ir_def_use_edge *_set_out_edges(ir_node *use, ir_def_use_edge *free)
+static void set_out_edges_node(ir_node *node, struct obstack *obst)
{
- mark_irn_visited(use);
+ if (irn_visited_else_mark(node))
+ return;
/* Allocate my array */
- size_t n_outs = PTR_TO_INT(use->out);
- use->out = free;
-#ifdef DEBUG_libfirm
- use->out_valid = 1;
-#endif /* defined DEBUG_libfirm */
- free += n_outs;
- /* We count the successors again, the space will be sufficient.
- We use this counter to remember the position for the next back
- edge. */
- use->out[0].pos = 0;
-
- int start = is_Block(use) ? 0 : -1;
- int irn_arity = get_irn_arity(use);
+ unsigned n_outs = node->o.n_outs;
+ node->o.out = OALLOCF(obst, ir_def_use_edges, edges, n_outs);
+ node->o.out->n_edges = 0;
+ /* add def->use edges from my predecessors to me */
+ int start = is_Block(node) ? 0 : -1;
+ int irn_arity = get_irn_arity(node);
for (int i = start; i < irn_arity; ++i) {
- ir_node *def = get_irn_n(use, i);
+ ir_node *def = get_irn_n(node, i);
- /* Recursion */
- if (!irn_visited(def))
- free = _set_out_edges(def, free);
+ /* recurse, ensures that out array of pred is already allocated */
+ set_out_edges_node(def, obst);
/* Remember this Def-Use edge */
- int pos = def->out[0].pos + 1;
- def->out[pos].use = use;
- def->out[pos].pos = i;
-
- /* increase the number of Def-Use edges so far */
- def->out[0].pos = pos;
+ unsigned pos = def->o.out->n_edges++;
+ def->o.out->edges[pos].use = node;
+ def->o.out->edges[pos].pos = i;
}
- return free;
}
-/**
- * Enter memory for the outs to a node. Handles special nodes
- *
- * @param irg the graph
- * @param free current free address in the chunk allocated for the outs
- *
- * @return The next free address
- */
-static ir_def_use_edge *set_out_edges(ir_graph *irg, ir_def_use_edge *free)
+static void set_out_edges(ir_graph *irg)
{
- inc_irg_visited(irg);
- free = _set_out_edges(get_irg_end(irg), free);
+ struct obstack *obst = &irg->out_obst;
+
+ obstack_init(obst);
+ irg->out_obst_allocated = true;
- /* handle anchored nodes */
- for (int i = anchor_last; i >= anchor_first; --i) {
+ inc_irg_visited(irg);
+ set_out_edges_node(get_irg_end(irg), obst);
+ for (int i = anchor_first; i <= anchor_last; ++i) {
ir_node *n = get_irg_anchor(irg, i);
- if (!irn_visited_else_mark(n)) {
- size_t n_outs = PTR_TO_INT(n->out);
- n->out = free;
-#ifdef DEBUG_libfirm
- n->out_valid = 1;
-#endif
- free += n_outs;
- }
+ if (irn_visited_else_mark(n))
+ continue;
+ n->o.out = OALLOCF(obst, ir_def_use_edges, edges, 0);
+ n->o.out->n_edges = 0;
}
-
- return free;
}
void compute_irg_outs(ir_graph *irg)
{
- int n_out_edges = 0;
- ir_def_use_edge *end = NULL; /* Only for debugging */
-
- /* Update graph state */
- assert(get_irg_phase_state(irg) != phase_building);
-
free_irg_outs(irg);
/* This first iteration counts the overall number of out edges and the
number of out edges for each node. */
- n_out_edges = count_outs(irg);
-
- /* allocate memory for all out edges. */
- irg->outs = XMALLOCNZ(ir_def_use_edge, n_out_edges);
-#ifdef DEBUG_libfirm
- irg->n_outs = n_out_edges;
-#endif
+ count_outs(irg);
/* The second iteration splits the irg->outs array into smaller arrays
for each node and writes the back edges into this array. */
- end = set_out_edges(irg, irg->outs);
-
- /* Check how much memory we have used */
- assert (end == (irg->outs + n_out_edges));
+ set_out_edges(irg);
- add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
+ add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS
+ | IR_GRAPH_PROPERTY_NO_TUPLES);
}
void assure_irg_outs(ir_graph *irg)
{
- if (! irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))
+ if (!irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))
compute_irg_outs(irg);
}
static void reset_outs(ir_node *node, void *unused)
{
(void) unused;
- node->out = NULL;
- node->out_valid = 0;
+ node->o.out = NULL;
}
#endif
void free_irg_outs(ir_graph *irg)
{
- if (irg->outs != NULL) {
-#ifdef DEBUG_libfirm
- memset(irg->outs, 0, irg->n_outs);
- irg->n_outs = 0;
-#endif
- free(irg->outs);
- irg->outs = NULL;
+ if (irg->out_obst_allocated) {
+ obstack_free(&irg->out_obst, NULL);
+ irg->out_obst_allocated = false;
}
#ifdef DEBUG_libfirm
res->last_node_idx = 0;
new_identities(res);
- res->outs = NULL;
res->inline_property = irg_inline_any;
res->additional_properties = mtp_property_inherited; /* inherited from type */
res->in[0] = block;
set_irn_dbg_info(res, db);
- res->out = NULL;
res->node_nr = get_irp_new_node_nr();
for (i = 0; i < EDGE_KIND_LAST; ++i) {
int pos; /** The position of this edge in use's input array. */
} ir_def_use_edge;
+typedef struct ir_def_use_edges {
+ unsigned n_edges;
+ ir_def_use_edge edges[];
+} ir_def_use_edges;
+
/**
* The common structure of an irnode.
* If the node has some attributes, they are stored in the attr field.
shall replace a node. */
long node_nr; /**< A globally unique node number for each node. */
/* ------- Fields for optimizations / analysis information ------- */
- ir_def_use_edge *out; /**< array of def-use edges. */
- struct dbg_info *dbi; /**< A pointer to information for debug support. */
- /* ------- For debugging ------- */
-#ifdef DEBUG_libfirm
- unsigned out_valid : 1;
- unsigned flags : 31;
-#endif
+ union {
+ ir_def_use_edges *out; /**< array of def-use edges. */
+ unsigned n_outs; /**< number of def-use edges (temporarily used
+ during construction of datastructure ) */
+ } o;
+ struct dbg_info *dbi; /**< A pointer to information for debug support. */
/* ------- For analyses -------- */
ir_loop *loop; /**< the loop the node is in. Access routines in irloop.h */
struct ir_node **deps; /**< Additional dependencies induced by state. */
/* -- Fields for optimizations / analysis information -- */
pset *value_table; /**< Hash table for global value numbering (cse)
for optimizing use in iropt.c */
- ir_def_use_edge *outs; /**< Space for the Def-Use arrays. */
+ struct obstack out_obst; /**< Space for the Def-Use arrays. */
+ bool out_obst_allocated;
ir_vrp_info vrp; /**< vrp info */
ir_loop *loop; /**< The outermost loop for this graph. */
unsigned dump_nr; /**< number of graph dumps */
#ifdef DEBUG_libfirm
- int n_outs; /**< Size wasted for outs */
long graph_nr; /**< a unique graph number for each
graph to make output readable. */
#endif
*/
static ir_node *get_deepest_common_dom_ancestor(ir_node *node, ir_node *dca)
{
- int i;
-
- for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(node); i-- > 0; ) {
ir_node *succ = get_irn_out(node, i);
/* keepalive edges are special and don't respect the dominance */
*/
static void set_projs_block(ir_node *node, ir_node *block)
{
- int i;
-
- for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(node); i-- > 0; ) {
ir_node *succ = get_irn_out(node, i);
assert(is_Proj(succ));
*/
static void place_floats_late(ir_node *n, pdeq *worklist)
{
- int n_outs;
- int i;
ir_node *block;
ir_node *dca;
if (irn_visited_else_mark(n))
return;
- n_outs = get_irn_n_outs(n);
+ unsigned n_outs = get_irn_n_outs(n);
/* break cycles at pinned nodes (see place place_floats_early) as to why */
if (get_irn_pinned(n) != op_pin_state_floats) {
- for (i = 0; i < n_outs; ++i) {
+ for (unsigned i = 0; i < n_outs; ++i) {
ir_node *succ = get_irn_out(n, i);
pdeq_putr(worklist, succ);
}
}
/* place our users */
- for (i = 0; i < n_outs; ++i) {
+ for (unsigned i = 0; i < n_outs; ++i) {
ir_node *succ = get_irn_out(n, i);
place_floats_late(succ, worklist);
}
node_t *race_next; /**< Next node on race list. */
lattice_elem_t type; /**< The associated lattice element "type". */
int max_user_input; /**< Maximum input number of Def-Use edges. */
- int next_edge; /**< Index of the next Def-Use edge to use. */
- int n_followers; /**< Number of Follower in the outs set. */
+ unsigned next_edge; /**< Index of the next Def-Use edge to use. */
+ unsigned n_followers; /**< Number of Follower in the outs set. */
unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
static void sort_irn_outs(node_t *node)
{
ir_node *irn = node->node;
- int n_outs = get_irn_n_outs(irn);
-
- if (n_outs > 1) {
- qsort(&irn->out[1], n_outs, sizeof(irn->out[0]), cmp_def_use_edge);
- }
- node->max_user_input = irn->out[n_outs].pos;
+ unsigned n_outs = get_irn_n_outs(irn);
+ qsort(irn->o.out->edges, n_outs, sizeof(irn->o.out->edges[0]),
+ cmp_def_use_edge);
+ node->max_user_input = irn->o.out->edges[n_outs-1].pos;
} /* sort_irn_outs */
/**
if (get_irn_mode(irn) == mode_T) {
/* mode_T nodes always produce tarval_bottom, so we must explicitly
* add its Projs to get constant evaluation to work */
- int i;
-
- for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
node_t *proj = get_irn_node(get_irn_out(irn, i));
add_to_cprop(proj, env);
*/
static void move_edges_to_leader(node_t *x)
{
- ir_node *irn = x->node;
- int i, j, k;
-
- for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
+ ir_node *irn = x->node;
+ for (int i = get_irn_arity(irn) - 1; i >= 0; --i) {
node_t *pred = get_irn_node(get_irn_n(irn, i));
- ir_node *p;
- int n;
-
- p = pred->node;
- n = get_irn_n_outs(p);
- for (j = 1; j <= pred->n_followers; ++j) {
- if (p->out[j].pos == i && p->out[j].use == irn) {
+ ir_node *p = pred->node;
+ unsigned n = get_irn_n_outs(p);
+ for (unsigned j = 0; j < pred->n_followers; ++j) {
+ ir_def_use_edge edge = p->o.out->edges[j];
+ if (edge.pos == i && edge.use == irn) {
/* found a follower edge to x, move it to the Leader */
- ir_def_use_edge edge = p->out[j];
-
/* remove this edge from the Follower set */
- p->out[j] = p->out[pred->n_followers];
--pred->n_followers;
+ p->o.out->edges[j] = p->o.out->edges[pred->n_followers];
/* sort it into the leader set */
- for (k = pred->n_followers + 2; k <= n; ++k) {
- if (p->out[k].pos >= edge.pos)
+ unsigned k;
+ for (k = pred->n_followers+1; k < n; ++k) {
+ if (p->o.out->edges[k].pos >= edge.pos)
break;
- p->out[k - 1] = p->out[k];
+ p->o.out->edges[k-1] = p->o.out->edges[k];
}
/* place the new edge here */
- p->out[k - 1] = edge;
+ p->o.out->edges[k-1] = edge;
/* edge found and moved */
break;
node_t *initial; /**< The initial node list. */
node_t *unwalked; /**< The unwalked node list. */
node_t *walked; /**< The walked node list. */
- int index; /**< Next index of Follower use_def edge. */
+ unsigned index; /**< Next index of Follower use_def edge. */
unsigned side; /**< side number. */
} step_env;
/* let n be the first node in unwalked */
n = env->unwalked;
while (env->index < n->n_followers) {
- const ir_def_use_edge *edge = &n->node->out[1 + env->index];
+ const ir_def_use_edge *edge = &n->node->o.out->edges[env->index];
/* let m be n.F.def_use[index] */
node_t *m = get_irn_node(edge->use);
int end_idx = env->end_idx;
list_for_each_entry(node_t, x, list, node_list) {
- int num_edges;
-
if (idx == -1) {
/* leader edges start AFTER follower edges */
- x->next_edge = x->n_followers + 1;
+ x->next_edge = x->n_followers;
}
- num_edges = get_irn_n_outs(x->node);
+ unsigned num_edges = get_irn_n_outs(x->node);
/* for all edges in x.L.def_use_{idx} */
- while (x->next_edge <= num_edges) {
- const ir_def_use_edge *edge = &x->node->out[x->next_edge];
+ while (x->next_edge < num_edges) {
+ const ir_def_use_edge *edge = &x->node->o.out->edges[x->next_edge];
ir_node *succ;
/* check if we have necessary edges */
node_t *y;
list_for_each_entry(node_t, x, list, node_list) {
- int num_edges;
-
- num_edges = get_irn_n_outs(x->node);
+ unsigned num_edges = get_irn_n_outs(x->node);
- x->next_edge = x->n_followers + 1;
+ x->next_edge = x->n_followers;
/* for all edges in x.L.def_use_{idx} */
- while (x->next_edge <= num_edges) {
- const ir_def_use_edge *edge = &x->node->out[x->next_edge];
+ while (x->next_edge < num_edges) {
+ const ir_def_use_edge *edge = &x->node->o.out->edges[x->next_edge];
ir_node *succ;
/* check if we have necessary edges */
*/
static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader)
{
- ir_node *l = leader->node;
- int j, i, n = get_irn_n_outs(l);
-
DB((dbg, LEVEL_2, "%+F is a follower of %+F\n", follower, leader->node));
/* The leader edges must remain sorted, but follower edges can
be unsorted. */
- for (i = leader->n_followers + 1; i <= n; ++i) {
- if (l->out[i].use == follower) {
- ir_def_use_edge t = l->out[i];
-
- for (j = i - 1; j >= leader->n_followers + 1; --j)
- l->out[j + 1] = l->out[j];
+ ir_node *l = leader->node;
+ unsigned n = get_irn_n_outs(l);
+ for (unsigned i = leader->n_followers; i < n; ++i) {
+ if (l->o.out->edges[i].use == follower) {
+ ir_def_use_edge t = l->o.out->edges[i];
+
+ for (unsigned j = i; j-- > leader->n_followers; )
+ l->o.out->edges[j+1] = l->o.out->edges[j];
+ l->o.out->edges[leader->n_followers] = t;
++leader->n_followers;
- l->out[leader->n_followers] = t;
break;
}
}
lattice_elem_t old_type;
node_t *fallen;
unsigned n_fallen, old_type_was_T_or_C;
- int i;
while (env->cprop != NULL) {
void *oldopcode = NULL;
++n_fallen;
DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
}
- for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(x->node); i-- > 0; ) {
ir_node *succ = get_irn_out(x->node, i);
node_t *y = get_irn_node(succ);
*/
static int only_one_reachable_proj(ir_node *n)
{
- int i, k = 0;
+ int k = 0;
- for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(n); i-- > 0; ) {
ir_node *proj = get_irn_out(n, i);
node_t *node;
*/
static int all_users_are_dead(const ir_node *irn)
{
- int i, n = get_irn_n_outs(irn);
-
- for (i = 1; i <= n; ++i) {
- const ir_node *succ = irn->out[i].use;
+ unsigned n = get_irn_n_outs(irn);
+ for (unsigned i = 0; i < n; ++i) {
+ const ir_node *succ = get_irn_out(irn, i);
const node_t *block = get_irn_node(get_nodes_block(succ));
const node_t *node;
*/
static unsigned calc_method_local_weight(ir_node *arg)
{
- int i, j, k;
+ int j;
unsigned v, weight = 0;
- for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(arg); i-- > 0; ) {
ir_node *succ = get_irn_out(arg, i);
switch (get_irn_opcode(succ)) {
ir_node *pred = get_Tuple_pred(succ, j);
if (pred == arg) {
/* look for Proj(j) */
- for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
+ for (unsigned k = get_irn_n_outs(succ); k-- > 0; ) {
ir_node *succ_succ = get_irn_out(succ, k);
if (is_Proj(succ_succ)) {
if (get_Proj_proj(succ_succ) == j) {
ir_entity *ent = get_irg_entity(irg);
ir_type *mtp;
size_t nparams;
- int i;
long proj_nr;
ir_node *irg_args, *arg;
assure_irg_outs(irg);
irg_args = get_irg_args(irg);
- for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irg_args); i-- > 0; ) {
arg = get_irn_out(irg_args, i);
proj_nr = get_Proj_proj(arg);
env->local_weights[proj_nr] = calc_method_local_weight(arg);
*/
static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
{
- int i;
ir_mode *mode;
mark_irn_visited(irn);
mode = get_irn_mode(irn);
if (mode == mode_M) {
/* every successor uses memory */
- for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
ir_node *succ = get_irn_out(irn, i);
if (! irn_visited(succ))
}
} else if (mode == mode_T) {
/* only some Proj's uses memory */
- for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
ir_node *proj = get_irn_out(irn, i);
if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
*/
static void update_Load_memop(memop_t *m)
{
- int i;
ir_node *load = m->node;
ir_node *ptr;
ir_entity *ent;
m->value.address = ptr;
- for (i = get_irn_n_outs(load) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(load); i-- > 0; ) {
ir_node *proj = get_irn_out(load, i);
long pn;
*/
static void update_Store_memop(memop_t *m)
{
- int i;
ir_node *store = m->node;
ir_node *adr = get_Store_ptr(store);
m->value.address = adr;
- for (i = get_irn_n_outs(store) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(store); i-- > 0; ) {
ir_node *proj = get_irn_out(store, i);
long pn;
{
ir_node *call = m->node;
unsigned prop = get_Call_memory_properties(call);
- int i;
if (prop & mtp_property_const) {
/* A constant call did NOT use memory at all, we
} else
m->flags = FLAG_KILL_ALL;
- for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(call); i-- > 0; ) {
ir_node *proj = get_irn_out(call, i);
/* beware of keep edges */
static void update_Div_memop(memop_t *m)
{
ir_node *div = m->node;
- int i;
- for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
ir_node *proj = get_irn_out(div, i);
/* beware of keep edges */
static void update_Mod_memop(memop_t *m)
{
ir_node *div = m->node;
- int i;
- for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
ir_node *proj = get_irn_out(div, i);
/* beware of keep edges */
*/
static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
{
- int i;
-
- for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(omem); i-- > 0; ) {
int n_pos;
ir_node *user = get_irn_out_ex(omem, i, &n_pos);
}
/* all edges previously point to omem now point to nmem */
- nmem->out = omem->out;
+ nmem->o.out = omem->o.out;
} /* reroute_all_mem_users */
/**
*/
static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
{
- int i, j, n = get_irn_n_outs(omem);
- ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
+ unsigned n = get_irn_n_outs(omem);
+ ir_def_use_edges *new_out = OALLOCF(&env.obst, ir_def_use_edges, edges, n);
- for (i = j = 0; i < n; ++i) {
+ unsigned j = 0;
+ for (unsigned i = 0; i < n; ++i) {
int n_pos;
ir_node *user = get_irn_out_ex(omem, i, &n_pos);
ir_node *use_bl = get_nodes_block(user);
}
if (block_dominates(pass_bl, use_bl)) {
/* found an user that is dominated */
+ new_out->edges[j].pos = n_pos;
+ new_out->edges[j].use = user;
++j;
- edges[j].pos = n_pos;
- edges[j].use = user;
set_irn_n(user, n_pos, nmem);
}
}
+ new_out->n_edges = j;
/* Modify the out structure: we create a new out edge array on our
- temporary obstack here. This should be no problem, as we invalidate the edges
- at the end either. */
+ temporary obstack here. This should be no problem, as we invalidate the
+ edges at the end either. */
/* first entry is used for the length */
- edges[0].pos = j;
- nmem->out = edges;
+ nmem->o.out = new_out;
} /* reroute_mem_through */
/**
static ir_node *get_irg_arg(ir_graph *irg, size_t pos)
{
ir_node *irg_args = get_irg_args(irg), *arg = NULL;
- int i;
/* Call algorithm that computes the out edges */
assure_irg_outs(irg);
/* Search the argument with the number pos.*/
- for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irg_args); i-- > 0; ) {
ir_node *proj = get_irn_out(irg_args, i);
if ((int)pos == get_Proj_proj(proj)) {
if (arg) {
*/
bool is_address_taken(ir_node *sel)
{
- int i, input_nr, k;
+ int input_nr;
ir_mode *emode, *mode;
ir_node *value;
ir_entity *ent;
if (! is_const_sel(sel))
return true;
- for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(sel); i-- > 0; ) {
ir_node *succ = get_irn_out(sel, i);
switch (get_irn_opcode(succ)) {
if (pred == sel) {
/* we found one input */
- for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
+ for (unsigned k = get_irn_n_outs(succ); k-- > 0; ) {
ir_node *proj = get_irn_out(succ, k);
if (is_Proj(proj) && get_Proj_proj(proj) == input_nr) {
*/
static bool link_all_leave_sels(ir_entity *ent, ir_node *sel)
{
- int i;
bool is_leave = true;
- for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(sel); i-- > 0; ) {
ir_node *succ = get_irn_out(sel, i);
if (is_Sel(succ)) {
ir_node *irg_frame;
ir_type *frame_tp;
size_t mem_idx;
- int i;
long static_link_arg;
int res = 0;
if (is_method_entity(ent)) {
ir_graph *inner_irg = get_entity_irg(ent);
ir_node *args;
- int j;
assure_irg_properties(inner_irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
args = get_irg_args(inner_irg);
- for (j = get_irn_n_outs(args) - 1; j >= 0; --j) {
+ for (unsigned j = get_irn_n_outs(args); j-- > 0; ) {
ir_node *arg = get_irn_out(args, j);
if (get_Proj_proj(arg) == static_link_arg) {
- int k;
- for (k = get_irn_n_outs(arg) - 1; k >= 0; --k) {
+ for (unsigned k = get_irn_n_outs(arg); k-- > 0; ) {
ir_node *succ = get_irn_out(arg, k);
if (is_Sel(succ)) {
* equal ADDRESS_TAKEN.
*/
irg_frame = get_irg_frame(irg);
- for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irg_frame); i-- > 0; ) {
ir_node *succ = get_irn_out(irg_frame, i);
if (is_Sel(succ)) {
void scalar_replacement_opt(ir_graph *irg)
{
unsigned nvals;
- int i;
scalars_t key;
ir_node *irg_frame;
ir_mode **modes;
sels = pset_new_ptr(8);
frame_tp = get_irg_frame_type(irg);
- for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irg_frame); i-- > 0; ) {
ir_node *succ = get_irn_out(irg_frame, i);
if (is_Sel(succ)) {