2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Compute and access out edges (also called def-use edges).
23 * @author Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
41 /* Note: ir_node.out_valid and ir_graph.n_outs are only present when DEBUG_libfirm is defined */
42 /* Accesses to out_valid and n_outs are fenced out to avoid breakage
43 when compiling with neither DEBUG_libfirm or NDEBUG defined */
44 #endif /* defined DEBUG_libfirm */
46 /*--------------------------------------------------------------------*/
47 /** Accessing the out datastructures **/
48 /*--------------------------------------------------------------------*/
51 /** Clear the outs of a node */
52 static void reset_outs(ir_node *node, void *unused)
60 int get_irn_outs_computed(const ir_node *node)
62 return node->out != NULL;
65 /* returns the number of successors of the node: */
66 int get_irn_n_outs(const ir_node *node)
68 assert(node && node->kind == k_ir_node);
70 assert(node->out_valid);
71 #endif /* defined DEBUG_libfirm */
72 /* we misuse the first for the size info of the out array */
73 return node->out[0].pos;
76 /* Access successor n */
77 ir_node *get_irn_out(const ir_node *def, int pos)
79 assert(pos >= 0 && pos < get_irn_n_outs(def));
81 assert(def->out_valid);
82 #endif /* defined DEBUG_libfirm */
83 return def->out[pos+1].use;
86 /* Access successor n */
87 ir_node *get_irn_out_ex(const ir_node *def, int pos, int *in_pos)
89 assert(pos >= 0 && pos < get_irn_n_outs(def));
91 assert(def->out_valid);
92 #endif /* defined DEBUG_libfirm */
93 *in_pos = def->out[pos+1].pos;
94 return def->out[pos+1].use;
97 void set_irn_out(ir_node *def, int pos, ir_node *use, int in_pos)
100 assert(pos >= 0 && pos < get_irn_n_outs(def));
102 assert(def->out_valid);
103 #endif /* defined DEBUG_libfirm */
104 def->out[pos+1].use = use;
105 def->out[pos+1].pos = in_pos;
108 /* Return the number of control flow successors, ignore keep-alives. */
109 int get_Block_n_cfg_outs(const ir_node *bl)
111 int i, n_cfg_outs = 0;
112 assert(bl && is_Block(bl));
114 assert(bl->out_valid);
115 #endif /* defined DEBUG_libfirm */
116 for (i = 1; i <= bl->out[0].pos; ++i) {
117 ir_node *succ = bl->out[i].use;
118 if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ))
119 n_cfg_outs += succ->out[0].pos;
124 /* Return the number of control flow successors, honor keep-alives. */
125 int get_Block_n_cfg_outs_ka(const ir_node *bl)
127 int i, n_cfg_outs = 0;
128 assert(bl && is_Block(bl));
130 assert(bl->out_valid);
131 #endif /* defined DEBUG_libfirm */
132 for (i = 1; i <= bl->out[0].pos; ++i) {
133 ir_node *succ = bl->out[i].use;
134 if (get_irn_mode(succ) == mode_X) {
138 /* ignore End if we are in the Endblock */
139 if (get_nodes_block(succ) == bl)
141 else /* count Keep-alive as one */
144 n_cfg_outs += succ->out[0].pos;
150 /* Access predecessor n, ignore keep-alives. */
151 ir_node *get_Block_cfg_out(const ir_node *bl, int pos)
154 assert(bl && is_Block(bl));
156 assert(bl->out_valid);
157 #endif /* defined DEBUG_libfirm */
158 for (i = 1; i <= bl->out[0].pos; ++i) {
159 ir_node *succ = bl->out[i].use;
160 if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ)) {
161 int n_outs = succ->out[0].pos;
163 return succ->out[pos + 1].use;
171 /* Access predecessor n, honor keep-alives. */
172 ir_node *get_Block_cfg_out_ka(const ir_node *bl, int pos)
175 assert(bl && is_Block(bl));
177 assert (bl->out_valid);
178 #endif /* defined DEBUG_libfirm */
179 for (i = 1; i <= bl->out[0].pos; ++i) {
180 ir_node *succ = bl->out[i].use;
181 if (get_irn_mode(succ) == mode_X) {
185 ir_node *end_bl = get_nodes_block(succ);
187 /* ignore End if we are in the Endblock */
191 /* handle keep-alive here: return the Endblock instead of the End node */
196 n_outs = succ->out[0].pos;
198 return succ->out[pos + 1].use;
207 static void irg_out_walk_2(ir_node *node, irg_walk_func *pre,
208 irg_walk_func *post, void *env)
214 assert(get_irn_visited(node) < get_irg_visited(current_ir_graph));
216 set_irn_visited(node, get_irg_visited(current_ir_graph));
218 if (pre) pre(node, env);
220 for (i = 0, n = get_irn_n_outs(node); i < n; ++i) {
221 succ = get_irn_out(node, i);
222 if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
223 irg_out_walk_2(succ, pre, post, env);
226 if (post) post(node, env);
229 void irg_out_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
233 ir_graph *irg = get_irn_irg(node);
234 if (is_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS)) {
235 inc_irg_visited (irg);
236 irg_out_walk_2(node, pre, post, env);
240 static void irg_out_block_walk2(ir_node *bl, irg_walk_func *pre,
241 irg_walk_func *post, void *env)
245 if (!Block_block_visited(bl)) {
246 mark_Block_block_visited(bl);
251 for (i = 0, n = get_Block_n_cfg_outs(bl); i < n; ++i) {
252 /* find the corresponding predecessor block. */
253 ir_node *pred = get_Block_cfg_out(bl, i);
255 irg_out_block_walk2(pred, pre, post, env);
263 /* Walks only over Block nodes in the graph. Has its own visited
264 flag, so that it can be interleaved with the other walker. */
265 void irg_out_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
269 assert(is_Block(node) || (get_irn_mode(node) == mode_X));
271 inc_irg_block_visited(current_ir_graph);
273 if (get_irn_mode(node) == mode_X) {
276 for (i = 0, n = get_irn_n_outs(node); i < n; ++i) {
277 ir_node *succ = get_irn_out(node, i);
278 irg_out_block_walk2(succ, pre, post, env);
282 irg_out_block_walk2(node, pre, post, env);
286 /*--------------------------------------------------------------------*/
287 /** Building and Removing the out datastructure **/
289 /** The outs of a graph are allocated in a single, large array. **/
290 /** This allows to allocate and deallocate the memory for the outs **/
291 /** on demand. The large array is separated into many small ones **/
292 /** for each node. Only a single field to reference the out array **/
293 /** is stored in each node and a field referencing the large out **/
294 /** array in irgraph. The 0 field of each out array contains the **/
295 /** size of this array. This saves memory in the irnodes themselves.**/
296 /** The construction does two passes over the graph. The first pass **/
297 /** counts the overall number of outs and the outs of each node. It **/
298 /** stores the outs of each node in the out reference of the node. **/
299 /** Then the large array is allocated. The second iteration chops **/
300 /** the large array into smaller parts, sets the out edges and **/
301 /** recounts the out edges. **/
302 /** Removes Tuple nodes! **/
303 /*--------------------------------------------------------------------*/
306 /** Returns the amount of out edges for not yet visited successors. */
307 static int _count_outs(ir_node *n)
309 int start, i, res, irn_arity;
312 n->out = (ir_def_use_edge*) INT_TO_PTR(1); /* Space for array size. */
314 start = is_Block(n) ? 0 : -1;
315 irn_arity = get_irn_arity(n);
316 res = irn_arity - start + 1; /* --1 or --0; 1 for array size. */
318 for (i = start; i < irn_arity; ++i) {
319 /* Optimize Tuples. They annoy if walking the cfg. */
320 ir_node *pred = get_irn_n(n, i);
321 ir_node *skipped_pred = skip_Tuple(pred);
323 if (skipped_pred != pred) {
324 set_irn_n(n, i, skipped_pred);
327 /* count Def-Use edges for predecessors */
328 if (!irn_visited(skipped_pred))
329 res += _count_outs(skipped_pred);
331 /*count my Def-Use edges */
332 skipped_pred->out = (ir_def_use_edge*) INT_TO_PTR(PTR_TO_INT(skipped_pred->out) + 1);
338 /** Returns the amount of out edges for not yet visited successors.
339 * This version handles some special nodes like irg_frame, irg_args etc.
341 static int count_outs(ir_graph *irg)
346 inc_irg_visited(irg);
347 res = _count_outs(get_irg_end(irg));
349 /* Now handle anchored nodes. We need the out count of those
350 even if they are not visible. */
351 for (i = anchor_last; i >= anchor_first; --i) {
352 n = get_irg_anchor(irg, i);
353 if (!irn_visited_else_mark(n)) {
354 n->out = (ir_def_use_edge*) INT_TO_PTR(1);
362 * Enter memory for the outs to a node.
364 * @param use current node
365 * @param free current free address in the chunk allocated for the outs
367 * @return The next free address
369 static ir_def_use_edge *_set_out_edges(ir_node *use, ir_def_use_edge *free)
371 int start, i, irn_arity, pos;
374 mark_irn_visited(use);
376 /* Allocate my array */
377 n_outs = PTR_TO_INT(use->out);
381 #endif /* defined DEBUG_libfirm */
383 /* We count the successors again, the space will be sufficient.
384 We use this counter to remember the position for the next back
388 start = is_Block(use) ? 0 : -1;
389 irn_arity = get_irn_arity(use);
391 for (i = start; i < irn_arity; ++i) {
392 ir_node *def = get_irn_n(use, i);
395 if (!irn_visited(def))
396 free = _set_out_edges(def, free);
398 /* Remember this Def-Use edge */
399 pos = def->out[0].pos + 1;
400 def->out[pos].use = use;
401 def->out[pos].pos = i;
403 /* increase the number of Def-Use edges so far */
404 def->out[0].pos = pos;
410 * Enter memory for the outs to a node. Handles special nodes
412 * @param irg the graph
413 * @param free current free address in the chunk allocated for the outs
415 * @return The next free address
417 static ir_def_use_edge *set_out_edges(ir_graph *irg, ir_def_use_edge *free)
422 inc_irg_visited(irg);
423 free = _set_out_edges(get_irg_end(irg), free);
425 /* handle anchored nodes */
426 for (i = anchor_last; i >= anchor_first; --i) {
427 n = get_irg_anchor(irg, i);
428 if (!irn_visited_else_mark(n)) {
429 size_t n_outs = PTR_TO_INT(n->out);
433 #endif /* defined DEBUG_libfirm */
441 /* compute the outs for a given graph */
442 void compute_irg_outs(ir_graph *irg)
444 ir_graph *rem = current_ir_graph;
446 ir_def_use_edge *end = NULL; /* Only for debugging */
448 current_ir_graph = irg;
450 /* Update graph state */
451 assert(get_irg_phase_state(current_ir_graph) != phase_building);
453 free_irg_outs(current_ir_graph);
455 /* This first iteration counts the overall number of out edges and the
456 number of out edges for each node. */
457 n_out_edges = count_outs(irg);
459 /* allocate memory for all out edges. */
460 irg->outs = XMALLOCNZ(ir_def_use_edge, n_out_edges);
462 irg->n_outs = n_out_edges;
463 #endif /* defined DEBUG_libfirm */
465 /* The second iteration splits the irg->outs array into smaller arrays
466 for each node and writes the back edges into this array. */
467 end = set_out_edges(irg, irg->outs);
469 /* Check how much memory we have used */
470 assert (end == (irg->outs + n_out_edges));
472 set_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
473 current_ir_graph = rem;
476 void assure_irg_outs(ir_graph *irg)
478 if (! is_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS))
479 compute_irg_outs(irg);
482 void compute_irp_outs(void)
485 for (i = 0, n = get_irp_n_irgs(); i < n; ++i)
486 compute_irg_outs(get_irp_irg(i));
489 void free_irp_outs(void)
492 for (i = 0, n = get_irp_n_irgs(); i < n; ++i)
493 free_irg_outs(get_irp_irg(i));
496 void free_irg_outs(ir_graph *irg)
498 /* current_ir_graph->outs_state = outs_none; */
502 memset(irg->outs, 0, irg->n_outs);
503 #endif /* defined DEBUG_libfirm */
508 #endif /* defined DEBUG_libfirm */
512 /* when debugging, *always* reset all nodes' outs! irg->outs might
513 have been lying to us */
514 irg_walk_graph (irg, reset_outs, NULL, NULL);
515 #endif /* defined DEBUG_libfirm */