+/** Removes Tuple nodes! **/
+/*--------------------------------------------------------------------*/
+
+
+/** Returns the amount of out edges for not yet visited successors. */
+static int _count_outs(ir_node *n) {
+ int start, i, res, irn_arity;
+
+ mark_irn_visited(n);
+ n->out = INT_TO_PTR(1); /* Space for array size. */
+
+ start = is_Block(n) ? 0 : -1;
+ irn_arity = get_irn_arity(n);
+ res = irn_arity - start + 1; /* --1 or --0; 1 for array size. */
+
+ for (i = start; i < irn_arity; ++i) {
+ /* Optimize Tuples. They annoy if walking the cfg. */
+ ir_node *pred = get_irn_n(n, i);
+ ir_node *skipped_pred = skip_Tuple(pred);
+
+ if (skipped_pred != pred) {
+ set_irn_n(n, i, skipped_pred);
+ }
+
+ /* count Def-Use edges for predecessors */
+ if (!irn_visited(skipped_pred))
+ res += _count_outs(skipped_pred);
+
+ /*count my Def-Use edges */
+ skipped_pred->out = INT_TO_PTR(PTR_TO_INT(skipped_pred->out) + 1);
+ }
+ return res;
+}
+
+
+/** Returns the amount of out edges for not yet visited successors.
+ * This version handles some special nodes like irg_frame, irg_args etc.
+ */
+static int count_outs(ir_graph *irg) {
+ ir_node *n;
+ int i, res;
+
+ inc_irg_visited(irg);
+ res = _count_outs(get_irg_end(irg));
+
+ /* Now handle anchored nodes. We need the out count of those
+ even if they are not visible. */
+ for (i = anchor_last - 1; i >= 0; --i) {
+ n = get_irg_anchor(irg, i);
+ if (!irn_visited(n)) {
+ mark_irn_visited(n);
+
+ n->out = INT_TO_PTR(1);
+ ++res;
+ }
+ }
+ return res;
+}
+
+/**
+ * Enter memory for the outs to a node.
+ *
+ * @param use current node
+ * @param free current free address in the chunk allocated for the outs
+ *
+ * @return The next free address
+ */
+static ir_def_use_edge *_set_out_edges(ir_node *use, ir_def_use_edge *free) {
+ int n_outs, start, i, irn_arity, pos;
+
+ mark_irn_visited(use);
+
+ /* Allocate my array */
+ n_outs = PTR_TO_INT(use->out);
+ use->out = free;
+#ifdef DEBUG_libfirm
+ use->out_valid = 1;
+#endif /* defined DEBUG_libfirm */
+ free += n_outs;
+ /* We count the successors again, the space will be sufficient.
+ We use this counter to remember the position for the next back
+ edge. */
+ use->out[0].pos = 0;
+
+ start = is_Block(use) ? 0 : -1;
+ irn_arity = get_irn_arity(use);
+
+ for (i = start; i < irn_arity; ++i) {
+ ir_node *def = get_irn_n(use, i);
+
+ /* Recursion */
+ if (!irn_visited(def))
+ free = _set_out_edges(def, free);
+
+ /* Remember this Def-Use edge */
+ pos = def->out[0].pos + 1;
+ def->out[pos].use = use;
+ def->out[pos].pos = i;
+
+ /* increase the number of Def-Use edges so far */
+ def->out[0].pos = pos;
+ }
+ return free;
+}
+
+/**
+ * Enter memory for the outs to a node. Handles special nodes
+ *
+ * @param irg the graph
+ * @param free current free address in the chunk allocated for the outs
+ *
+ * @return The next free address
+ */
+static ir_def_use_edge *set_out_edges(ir_graph *irg, ir_def_use_edge *free) {
+ ir_node *n;
+ int i, n_outs;
+
+ inc_irg_visited(irg);
+ free = _set_out_edges(get_irg_end(irg), free);
+
+ /* handle anchored nodes */
+ for (i = anchor_last - 1; i >= 0; --i) {
+ n = get_irg_anchor(irg, i);
+ if (!irn_visited(n)) {
+ mark_irn_visited(n);
+
+ n_outs = PTR_TO_INT(n->out);
+ n->out = free;
+#ifdef DEBUG_libfirm
+ n->out_valid = 1;
+#endif /* defined DEBUG_libfirm */
+ free += n_outs;
+ }
+ }
+
+ return free;
+}
+
+
+/**
+ * We want that the out of ProjX from Start contains the next block at
+ * position 0, the Start block at position 1. This is necessary for
+ * the out block walker.
+ */
+static INLINE void fix_start_proj(ir_graph *irg) {
+ ir_node *startbl = get_irg_start_block(irg);
+
+ if (get_Block_n_cfg_outs(startbl)) {
+ ir_node *proj = get_irg_initial_exec(irg);
+ ir_node *irn;
+ int block_pos, other_pos;
+
+ if (get_irn_n_outs(proj) == 2) {
+ if (get_irn_out_ex(proj, 0, &block_pos) == startbl) {
+ irn = get_irn_out_ex(proj, 1, &other_pos);
+ set_irn_out(proj, 0, irn, other_pos);
+ set_irn_out(proj, 1, startbl, block_pos);
+ }
+ } else {
+ assert(get_irg_phase_state(irg) == phase_backend);
+ }
+ }
+}
+
+/* compute the outs for a given graph */
+void compute_irg_outs(ir_graph *irg) {
+ ir_graph *rem = current_ir_graph;
+ int n_out_edges = 0;
+ ir_def_use_edge *end = NULL; /* Only for debugging */
+
+ current_ir_graph = irg;
+
+ /* Update graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+
+ if (current_ir_graph->outs_state != outs_none)
+ free_irg_outs(current_ir_graph);
+
+ /* This first iteration counts the overall number of out edges and the
+ number of out edges for each node. */
+ n_out_edges = count_outs(irg);
+
+ /* allocate memory for all out edges. */
+ irg->outs = XMALLOCNZ(ir_def_use_edge, n_out_edges);
+#ifdef DEBUG_libfirm
+ irg->n_outs = n_out_edges;
+#endif /* defined DEBUG_libfirm */
+
+ /* The second iteration splits the irg->outs array into smaller arrays
+ for each node and writes the back edges into this array. */
+ end = set_out_edges(irg, irg->outs);
+
+ /* Check how much memory we have used */
+ assert (end == (irg->outs + n_out_edges));
+
+ /* We want that the out of ProjX from Start contains the next block at
+ position 0, the Start block at position 1. This is necessary for
+ code placement (place_early() ONLY if started GCSE on graphs with dead blocks) */
+ fix_start_proj(irg);
+
+ current_ir_graph->outs_state = outs_consistent;
+ current_ir_graph = rem;
+}
+
+void assure_irg_outs(ir_graph *irg) {
+ if (get_irg_outs_state(irg) != outs_consistent)
+ compute_irg_outs(irg);
+}