2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Compute and access out edges (also called def-use edges).
9 * @author Goetz Lindenmaier, Michael Beck
19 #include "irgraph_t.h"
27 unsigned get_irn_n_outs(const ir_node *node)
29 return node->o.out->n_edges;
32 ir_node *get_irn_out(const ir_node *def, unsigned pos)
34 assert(pos < get_irn_n_outs(def));
35 return def->o.out->edges[pos].use;
38 ir_node *get_irn_out_ex(const ir_node *def, unsigned pos, int *in_pos)
40 assert(pos < get_irn_n_outs(def));
41 *in_pos = def->o.out->edges[pos].pos;
42 return def->o.out->edges[pos].use;
45 void set_irn_out(ir_node *def, unsigned pos, ir_node *use, int in_pos)
48 assert(pos < get_irn_n_outs(def));
49 def->o.out->edges[pos].use = use;
50 def->o.out->edges[pos].pos = in_pos;
53 unsigned get_Block_n_cfg_outs(const ir_node *bl)
56 unsigned n_cfg_outs = 0;
57 for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
58 const ir_node *succ = get_irn_out(bl, i);
59 if (get_irn_mode(succ) != mode_X)
61 if (is_End(succ) || is_Bad(succ))
63 n_cfg_outs += get_irn_n_outs(succ);
68 unsigned get_Block_n_cfg_outs_ka(const ir_node *bl)
71 unsigned n_cfg_outs = 0;
72 for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
73 const ir_node *succ = get_irn_out(bl, i);
74 if (get_irn_mode(succ) != mode_X)
79 ir_node *end_bl = get_nodes_block(succ);
85 n_cfg_outs += get_irn_n_outs(succ);
90 ir_node *get_Block_cfg_out(const ir_node *bl, unsigned pos)
93 for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
94 const ir_node *succ = get_irn_out(bl, i);
95 if (get_irn_mode(succ) != mode_X)
97 if (is_End(succ) || is_Bad(succ))
100 unsigned n_outs = get_irn_n_outs(succ);
102 return get_irn_out(succ, pos);
109 ir_node *get_Block_cfg_out_ka(const ir_node *bl, unsigned pos)
111 assert(is_Block(bl));
112 for (unsigned i = 0; i < get_irn_n_outs(bl); ++i) {
113 const ir_node *succ = get_irn_out(bl, i);
114 if (get_irn_mode(succ) != mode_X)
120 ir_node *end_bl = get_nodes_block(succ);
122 /* ignore End if we are in the Endblock */
126 /* handle keep-alive here: return the Endblock instead of the End node */
133 unsigned n_outs = get_irn_n_outs(succ);
135 return get_irn_out(succ, pos);
142 static void irg_out_walk_2(ir_node *node, irg_walk_func *pre,
143 irg_walk_func *post, void *env)
145 assert(get_irn_visited(node) < get_irg_visited(current_ir_graph));
147 set_irn_visited(node, get_irg_visited(current_ir_graph));
149 if (pre) pre(node, env);
151 int n = get_irn_n_outs(node);
152 for (int i = 0; i < n; ++i) {
153 ir_node *succ = get_irn_out(node, i);
154 if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
155 irg_out_walk_2(succ, pre, post, env);
158 if (post) post(node, env);
161 void irg_out_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
165 ir_graph *irg = get_irn_irg(node);
166 if (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS)) {
167 inc_irg_visited(irg);
168 irg_out_walk_2(node, pre, post, env);
172 static void irg_out_block_walk2(ir_node *bl, irg_walk_func *pre,
173 irg_walk_func *post, void *env)
175 if (Block_block_visited(bl))
178 mark_Block_block_visited(bl);
183 int n = get_Block_n_cfg_outs(bl);
184 for (int i = 0; i < n; ++i) {
185 /* find the corresponding predecessor block. */
186 ir_node *pred = get_Block_cfg_out(bl, i);
188 irg_out_block_walk2(pred, pre, post, env);
195 void irg_out_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
198 ir_graph *irg = get_irn_irg(node);
199 assert(is_Block(node) || (get_irn_mode(node) == mode_X));
201 ir_graph *rem = current_ir_graph;
202 current_ir_graph = irg;
204 inc_irg_block_visited(irg);
206 if (get_irn_mode(node) == mode_X) {
207 int n = get_irn_n_outs(node);
208 for (int i = 0; i < n; ++i) {
209 ir_node *succ = get_irn_out(node, i);
210 irg_out_block_walk2(succ, pre, post, env);
213 irg_out_block_walk2(node, pre, post, env);
216 current_ir_graph = rem;
219 /*--------------------------------------------------------------------*/
220 /** Building and Removing the out datastructure **/
222 /** The outs of a graph are allocated in a single, large array. **/
223 /** This allows to allocate and deallocate the memory for the outs **/
224 /** on demand. The large array is separated into many small ones **/
225 /** for each node. Only a single field to reference the out array **/
226 /** is stored in each node and a field referencing the large out **/
227 /** array in irgraph. The 0 field of each out array contains the **/
228 /** size of this array. This saves memory in the irnodes themselves.**/
229 /** The construction does two passes over the graph. The first pass **/
230 /** counts the overall number of outs and the outs of each node. It **/
231 /** stores the outs of each node in the out reference of the node. **/
232 /** Then the large array is allocated. The second iteration chops **/
233 /** the large array into smaller parts, sets the out edges and **/
234 /** recounts the out edges. **/
235 /** Removes Tuple nodes! **/
236 /*--------------------------------------------------------------------*/
239 /** Returns the amount of out edges for not yet visited successors. */
240 static void count_outs_node(ir_node *n)
242 if (irn_visited_else_mark(n))
245 /* initialize our counter */
248 int start = is_Block(n) ? 0 : -1;
249 int irn_arity = get_irn_arity(n);
250 for (int i = start; i < irn_arity; ++i) {
251 ir_node *def = get_irn_n(n, i);
252 /* optimize Tuples */
253 ir_node *skipped = skip_Tuple(def);
255 set_irn_n(n, i, skipped);
257 count_outs_node(skipped);
263 /** Returns the amount of out edges for not yet visited successors.
264 * This version handles some special nodes like irg_frame, irg_args etc. */
265 static void count_outs(ir_graph *irg)
267 inc_irg_visited(irg);
268 count_outs_node(get_irg_end(irg));
269 for (int i = anchor_first; i <= anchor_last; ++i) {
270 ir_node *n = get_irg_anchor(irg, i);
271 if (irn_visited_else_mark(n))
277 static void set_out_edges_node(ir_node *node, struct obstack *obst)
279 if (irn_visited_else_mark(node))
282 /* Allocate my array */
283 unsigned n_outs = node->o.n_outs;
284 node->o.out = OALLOCF(obst, ir_def_use_edges, edges, n_outs);
285 node->o.out->n_edges = 0;
287 /* add def->use edges from my predecessors to me */
288 int start = is_Block(node) ? 0 : -1;
289 int irn_arity = get_irn_arity(node);
290 for (int i = start; i < irn_arity; ++i) {
291 ir_node *def = get_irn_n(node, i);
293 /* recurse, ensures that out array of pred is already allocated */
294 set_out_edges_node(def, obst);
296 /* Remember this Def-Use edge */
297 unsigned pos = def->o.out->n_edges++;
298 def->o.out->edges[pos].use = node;
299 def->o.out->edges[pos].pos = i;
303 static void set_out_edges(ir_graph *irg)
305 struct obstack *obst = &irg->out_obst;
308 irg->out_obst_allocated = true;
310 inc_irg_visited(irg);
311 set_out_edges_node(get_irg_end(irg), obst);
312 for (int i = anchor_first; i <= anchor_last; ++i) {
313 ir_node *n = get_irg_anchor(irg, i);
314 if (irn_visited_else_mark(n))
316 n->o.out = OALLOCF(obst, ir_def_use_edges, edges, 0);
317 n->o.out->n_edges = 0;
321 void compute_irg_outs(ir_graph *irg)
325 /* This first iteration counts the overall number of out edges and the
326 number of out edges for each node. */
329 /* The second iteration splits the irg->outs array into smaller arrays
330 for each node and writes the back edges into this array. */
333 add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS
334 | IR_GRAPH_PROPERTY_NO_TUPLES);
337 void assure_irg_outs(ir_graph *irg)
339 if (!irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))
340 compute_irg_outs(irg);
344 /** Clear the outs of a node */
345 static void reset_outs(ir_node *node, void *unused)
352 void free_irg_outs(ir_graph *irg)
354 if (irg->out_obst_allocated) {
355 obstack_free(&irg->out_obst, NULL);
356 irg->out_obst_allocated = false;
360 /* when debugging, *always* reset all nodes' outs! irg->outs might
361 have been lying to us */
362 irg_walk_graph (irg, reset_outs, NULL, NULL);