- /* Copyright (C) 2002 by Universitaet Karlsruhe
-* All rights reserved.
-*
-* Authors: Goetz Lindenmaier
-*
-* irouts.c --- Compute out edges for ir nodes (also called def-use
-* edges).
-*
-*/
-
-/* $Id$ */
-
+/*
+ * This file is part of libFirm.
+ * Copyright (C) 2012 University of Karlsruhe.
+ */
+
+/**
+ * @file
+ * @brief Compute and access out edges (also called def-use edges).
+ * @author Goetz Lindenmaier, Michael Beck
+ * @date 1.2002
+ */
+#include "config.h"
+
+#include <string.h>
+
+#include "xmalloc.h"
#include "irouts.h"
#include "irnode_t.h"
-#include "irgraph_t.h" /* To access irg->outs field (which is private to this module)
- without public access routine */
-
-/**********************************************************************/
-/** Accessing the out datastructures **/
-/**********************************************************************/
-
-/* returns the number of successors of the node: */
-INLINE int get_irn_n_outs (ir_node *node) {
- return (int)(node->out[0]);
+#include "irgraph_t.h"
+#include "irprog_t.h"
+#include "irgwalk.h"
+#include "util.h"
+#include "irprintf.h"
+#include "error.h"
+#include "ircons.h"
+
+unsigned get_irn_n_outs(const ir_node *node)
+{
+ return node->o.out->n_edges;
}
-/* Access successor n */
-INLINE ir_node *get_irn_out (ir_node *node, int pos) {
- assert(node);
- assert(pos >= 0 && pos < get_irn_n_outs(node));
- return node->out[pos+1];
+ir_node *get_irn_out(const ir_node *def, unsigned pos)
+{
+ assert(pos < get_irn_n_outs(def));
+ return def->o.out->edges[pos].use;
}
-INLINE void set_irn_out (ir_node *node, int pos, ir_node *out) {
- assert(node && out);
- assert(pos >= 0 && pos < get_irn_n_outs(node));
- node->out[pos+1] = out;
+ir_node *get_irn_out_ex(const ir_node *def, unsigned pos, int *in_pos)
+{
+ assert(pos < get_irn_n_outs(def));
+ *in_pos = def->o.out->edges[pos].pos;
+ return def->o.out->edges[pos].use;
}
-
-INLINE int get_Block_n_cfg_outs (ir_node *bl) {
- int i, n_cfg_outs = 0;
- assert(bl && (get_irn_op(bl) == op_Block));
- for (i = 0; i < (int)bl->out[0]; i++)
- if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
- (get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++;
- return n_cfg_outs;
+void set_irn_out(ir_node *def, unsigned pos, ir_node *use, int in_pos)
+{
+ assert(use != NULL);
+ assert(pos < get_irn_n_outs(def));
+ def->o.out->edges[pos].use = use;
+ def->o.out->edges[pos].pos = in_pos;
}
+unsigned get_Block_n_cfg_outs(const ir_node *bl)
+{
+ assert(is_Block(bl));
+ unsigned n_cfg_outs = 0;
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_End(succ) || is_Bad(succ))
+ continue;
+ n_cfg_outs += get_irn_n_outs(succ);
+ }
+ return n_cfg_outs;
+}
-INLINE ir_node *get_Block_cfg_out (ir_node *bl, int pos) {
- int i, out_pos = 0;
- assert(bl && (get_irn_op(bl) == op_Block));
- for (i = 0; i < (int)bl->out[0]; i++)
- if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
- (get_irn_op(bl->out[i+1]) != op_End)) {
- if (out_pos == pos) {
- ir_node *cfop = bl->out[i+1];
- return cfop->out[0+1];
- } else {
- out_pos++;
- }
- }
- return NULL;
+unsigned get_Block_n_cfg_outs_ka(const ir_node *bl)
+{
+ assert(is_Block(bl));
+ unsigned n_cfg_outs = 0;
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_Bad(succ))
+ continue;
+ if (is_End(succ)) {
+ ir_node *end_bl = get_nodes_block(succ);
+ if (end_bl == bl)
+ continue;
+ ++n_cfg_outs;
+ continue;
+ }
+ n_cfg_outs += get_irn_n_outs(succ);
+ }
+ return n_cfg_outs;
}
-void irg_out_walk_2(ir_node *node, irg_walk_func *pre,
- irg_walk_func *post, void *env) {
- int i;
- ir_node *succ;
+ir_node *get_Block_cfg_out(const ir_node *bl, unsigned pos)
+{
+ assert(is_Block(bl));
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_End(succ) || is_Bad(succ))
+ continue;
+
+ unsigned n_outs = get_irn_n_outs(succ);
+ if (pos < n_outs)
+ return get_irn_out(succ, pos);
+ else
+ pos -= n_outs;
+ }
+ return NULL;
+}
- assert(node);
- assert(get_irn_visited(node) < get_irg_visited(current_ir_graph));
+ir_node *get_Block_cfg_out_ka(const ir_node *bl, unsigned pos)
+{
+ assert(is_Block(bl));
+ for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
+ const ir_node *succ = get_irn_out(bl, i);
+ if (get_irn_mode(succ) != mode_X)
+ continue;
+ if (is_Bad(succ))
+ continue;
+
+ if (is_End(succ)) {
+ ir_node *end_bl = get_nodes_block(succ);
+ if (end_bl == bl) {
+ /* ignore End if we are in the Endblock */
+ continue;
+ }
+ if (pos == 0) {
+ /* handle keep-alive here: return the Endblock instead of the End node */
+ return end_bl;
+ } else {
+ --pos;
+ continue;
+ }
+ }
+ unsigned n_outs = get_irn_n_outs(succ);
+ if (pos < n_outs)
+ return get_irn_out(succ, pos);
+ else
+ pos -= n_outs;
+ }
+ return NULL;
+}
- set_irn_visited(node, get_irg_visited(current_ir_graph));
+static void irg_out_walk_2(ir_node *node, irg_walk_func *pre,
+ irg_walk_func *post, void *env)
+{
+ assert(get_irn_visited(node) < get_irg_visited(current_ir_graph));
- if (pre) pre(node, env);
+ set_irn_visited(node, get_irg_visited(current_ir_graph));
- for (i = 0; i < get_irn_n_outs(node); i++) {
- succ = get_irn_out(node, i);
- if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
- irg_out_walk_2(succ, pre, post, env);
- }
+ if (pre) pre(node, env);
- if (post) post(node, env);
+ int n = get_irn_n_outs(node);
+ for (int i = 0; i < n; ++i) {
+ ir_node *succ = get_irn_out(node, i);
+ if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
+ irg_out_walk_2(succ, pre, post, env);
+ }
- return;
+ if (post) post(node, env);
}
-void irg_out_walk(ir_node *node,
- irg_walk_func *pre, irg_walk_func *post,
- void *env) {
- assert(node);
- if (get_irg_outs_state(current_ir_graph) != no_outs) {
- inc_irg_visited (current_ir_graph);
- irg_out_walk_2(node, pre, post, env);
- }
- return;
+void irg_out_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
+ void *env)
+{
+ assert(node);
+ ir_graph *irg = get_irn_irg(node);
+ if (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS)) {
+ inc_irg_visited(irg);
+ irg_out_walk_2(node, pre, post, env);
+ }
}
-void irg_out_block_walk2(ir_node *bl,
- irg_walk_func *pre, irg_walk_func *post,
- void *env) {
- int i;
+static void irg_out_block_walk2(ir_node *bl, irg_walk_func *pre,
+ irg_walk_func *post, void *env)
+{
+ if (Block_block_visited(bl))
+ return;
- assert(get_irn_opcode(bl) == iro_Block);
+ mark_Block_block_visited(bl);
- if(get_Block_block_visited(bl) < get_irg_block_visited(current_ir_graph)) {
- set_Block_block_visited(bl, get_irg_block_visited(current_ir_graph));
+ if (pre)
+ pre(bl, env);
- if(pre)
- pre(bl, env);
+ int n = get_Block_n_cfg_outs(bl);
+ for (int i = 0; i < n; ++i) {
+ /* find the corresponding predecessor block. */
+ ir_node *pred = get_Block_cfg_out(bl, i);
+ /* recursion */
+ irg_out_block_walk2(pred, pre, post, env);
+ }
- for(i = 0; i < get_Block_n_cfg_outs(bl); i++) {
- /* find the corresponding predecessor block. */
- ir_node *pred = get_Block_cfg_out(bl, i);
- assert(get_irn_opcode(pred) == iro_Block);
- /* recursion */
- irg_out_block_walk2(pred, pre, post, env);
- }
-
- if(post)
- post(bl, env);
- }
- return;
+ if (post)
+ post(bl, env);
}
-/* Walks only over Block nodes in the graph. Has it's own visited
- flag, so that it can be interleaved with the other walker. */
-void irg_out_block_walk(ir_node *node,
- irg_walk_func *pre, irg_walk_func *post,
- void *env) {
-
- assert((get_irn_op(node) == op_Block) || (get_irn_mode(node) == mode_X));
+void irg_out_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
+ void *env)
+{
+ ir_graph *irg = get_irn_irg(node);
+ assert(is_Block(node) || (get_irn_mode(node) == mode_X));
- inc_irg_block_visited(current_ir_graph);
+ ir_graph *rem = current_ir_graph;
+ current_ir_graph = irg;
- if (get_irn_mode(node) == mode_X) node = node->out[1];
- assert(get_irn_opcode(node) == iro_Block);
+ inc_irg_block_visited(irg);
- irg_out_block_walk2(node, pre, post, env);
-
- return;
+ if (get_irn_mode(node) == mode_X) {
+ int n = get_irn_n_outs(node);
+ for (int i = 0; i < n; ++i) {
+ ir_node *succ = get_irn_out(node, i);
+ irg_out_block_walk2(succ, pre, post, env);
+ }
+ } else {
+ irg_out_block_walk2(node, pre, post, env);
+ }
+ current_ir_graph = rem;
}
-/**********************************************************************/
-/** Building and Removing the out datasturcture **/
+/*--------------------------------------------------------------------*/
+/** Building and Removing the out datastructure **/
/** **/
/** The outs of a graph are allocated in a single, large array. **/
/** This allows to allocate and deallocate the memory for the outs **/
/** Then the large array is allocated. The second iteration chops **/
/** the large array into smaller parts, sets the out edges and **/
/** recounts the out edges. **/
-/**********************************************************************/
-
-
-/* Returns the amount of out edges for not yet visited successors. */
-static int count_outs(ir_node *n) {
- int start, i, res;
- ir_node *succ;
-
- set_irn_visited(n, get_irg_visited(current_ir_graph));
- n->out = (ir_node **) 1; /* Space for array size. */
-
- if ((get_irn_op(n) == op_Block)) start = 0; else start = -1;
- res = get_irn_arity(n) - start +1; /* --1 or --0; 1 for array size. */
- for (i = start; i < get_irn_arity(n); i++) {
- /* Optimize Tuples. They annoy if walking the cfg. */
- succ = skip_Tuple(get_irn_n(n, i));
- set_irn_n(n, i, succ);
- /* count outs for successors */
- if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
- res += count_outs(succ);
- /* Count my outs */
- succ->out = (ir_node **)( (int)succ->out +1);
- }
- return res;
+/** Removes Tuple nodes! **/
+/*--------------------------------------------------------------------*/
+
+
+/** Returns the amount of out edges for not yet visited successors. */
+static void count_outs_node(ir_node *n)
+{
+ if (irn_visited_else_mark(n))
+ return;
+
+ /* initialize our counter */
+ n->o.n_outs = 0;
+
+ int start = is_Block(n) ? 0 : -1;
+ int irn_arity = get_irn_arity(n);
+ for (int i = start; i < irn_arity; ++i) {
+ ir_node *def = get_irn_n(n, i);
+ /* optimize Tuples */
+ ir_node *skipped = skip_Tuple(def);
+ if (skipped != def)
+ set_irn_n(n, i, skipped);
+
+ count_outs_node(skipped);
+ ++skipped->o.n_outs;
+ }
}
-static ir_node **set_out_edges(ir_node *n, ir_node **free) {
- int n_outs, start, i;
- ir_node *succ;
-
- set_irn_visited(n, get_irg_visited(current_ir_graph));
-
- /* Allocate my array */
- n_outs = (int) n->out;
- n->out = free;
- free = &free[n_outs];
- /* We count the successors again, the space will be sufficient.
- We use this counter to remember the position for the next back
- edge. */
- n->out[0] = (ir_node *)0;
-
- if (get_irn_op(n) == op_Block) start = 0; else start = -1;
- for (i = start; i < get_irn_arity(n); i++) {
- succ = get_irn_n(n, i);
- /* Recursion */
- if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
- free = set_out_edges(succ, free);
- /* Remember our back edge */
- succ->out[get_irn_n_outs(succ)+1] = n;
- succ->out[0] = (ir_node *) (get_irn_n_outs(succ) + 1);
- }
- return free;
-}
-static INLINE void fix_start_proj(ir_graph *irg) {
- ir_node *proj = NULL, *startbl;
- int i;
- if (get_Block_n_cfg_outs(get_irg_start_block(irg))) {
- startbl = get_irg_start_block(irg);
- for (i = 0; i < get_irn_n_outs(startbl); i++)
- if (get_irn_mode(get_irn_out(startbl, i)) == mode_X)
- proj = get_irn_out(startbl, i);
- if (get_irn_out(proj, 0) == startbl) {
- assert(get_irn_n_outs(proj) == 2);
- set_irn_out(proj, 0, get_irn_out(proj, 1));
- set_irn_out(proj, 1, startbl);
- }
- }
+/** Returns the amount of out edges for not yet visited successors.
+ * This version handles some special nodes like irg_frame, irg_args etc. */
+static void count_outs(ir_graph *irg)
+{
+ inc_irg_visited(irg);
+ count_outs_node(get_irg_end(irg));
+ for (int i = anchor_first; i <= anchor_last; ++i) {
+ ir_node *n = get_irg_anchor(irg, i);
+ if (irn_visited_else_mark(n))
+ continue;
+ n->o.n_outs = 0;
+ }
}
-void compute_outs(ir_graph *irg) {
- ir_graph *rem = current_ir_graph;
- int n_out_edges = 0;
-
- current_ir_graph = irg;
-
- /* Update graph state */
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
- current_ir_graph->outs_state = outs_consistent;
+static void set_out_edges_node(ir_node *node, struct obstack *obst)
+{
+ if (irn_visited_else_mark(node))
+ return;
+
+ /* Allocate my array */
+ unsigned n_outs = node->o.n_outs;
+ node->o.out = OALLOCF(obst, ir_def_use_edges, edges, n_outs);
+ node->o.out->n_edges = 0;
+
+ /* add def->use edges from my predecessors to me */
+ int start = is_Block(node) ? 0 : -1;
+ int irn_arity = get_irn_arity(node);
+ for (int i = start; i < irn_arity; ++i) {
+ ir_node *def = get_irn_n(node, i);
+
+ /* recurse, ensures that out array of pred is already allocated */
+ set_out_edges_node(def, obst);
+
+ /* Remember this Def-Use edge */
+ unsigned pos = def->o.out->n_edges++;
+ def->o.out->edges[pos].use = node;
+ def->o.out->edges[pos].pos = i;
+ }
+}
- /* This first iteration counts the overall number of out edges and the
- number of out edges for each node. */
- inc_irg_visited(irg);
- n_out_edges = count_outs(get_irg_end(irg));
+static void set_out_edges(ir_graph *irg)
+{
+ struct obstack *obst = &irg->out_obst;
+
+ obstack_init(obst);
+ irg->out_obst_allocated = true;
+
+ inc_irg_visited(irg);
+ set_out_edges_node(get_irg_end(irg), obst);
+ for (int i = anchor_first; i <= anchor_last; ++i) {
+ ir_node *n = get_irg_anchor(irg, i);
+ if (irn_visited_else_mark(n))
+ continue;
+ n->o.out = OALLOCF(obst, ir_def_use_edges, edges, 0);
+ n->o.out->n_edges = 0;
+ }
+}
- /* allocate memory for all out edges. */
- irg->outs = (ir_node **) malloc (n_out_edges * sizeof(ir_node *));
+void compute_irg_outs(ir_graph *irg)
+{
+ free_irg_outs(irg);
- /* The second iteration splits the irg->outs array into smaller arrays
- for each node and writes the back edges into this array. */
- inc_irg_visited(irg);
- set_out_edges(get_irg_end(irg), irg->outs);
+ /* This first iteration counts the overall number of out edges and the
+ number of out edges for each node. */
+ count_outs(irg);
- /* We want that the out of ProjX from Start contains the next block at
- position 1, the Start block at position 2. This is necessary for
- the out block walker. */
- fix_start_proj(irg);
+ /* The second iteration splits the irg->outs array into smaller arrays
+ for each node and writes the back edges into this array. */
+ set_out_edges(irg);
- current_ir_graph = rem;
+ add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS
+ | IR_GRAPH_PROPERTY_NO_TUPLES);
}
-void free_outs(ir_graph *irg) {
-
- /* Update graph state */
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
- current_ir_graph->outs_state = no_outs;
+void assure_irg_outs(ir_graph *irg)
+{
+ if (!irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))
+ compute_irg_outs(irg);
+}
- if (irg->outs) free(irg->outs);
- irg->outs = NULL;
+#ifdef DEBUG_libfirm
+/** Clear the outs of a node */
+static void reset_outs(ir_node *node, void *unused)
+{
+ (void) unused;
+ node->o.out = NULL;
+}
+#endif
+
+void free_irg_outs(ir_graph *irg)
+{
+ if (irg->out_obst_allocated) {
+ obstack_free(&irg->out_obst, NULL);
+ irg->out_obst_allocated = false;
+ }
+
+#ifdef DEBUG_libfirm
+ /* when debugging, *always* reset all nodes' outs! irg->outs might
+ have been lying to us */
+ irg_walk_graph (irg, reset_outs, NULL, NULL);
+#endif
}