2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Compute and access out edges (also called def-use edges).
23 * @author Goetz Lindenmaier, Michael Beck
34 #include "irgraph_t.h"
42 /* Note: ir_node.out_valid and ir_graph.n_outs are only present when DEBUG_libfirm is defined */
43 /* Accesses to out_valid and n_outs are fenced out to avoid breakage
44 when compiling with neither DEBUG_libfirm or NDEBUG defined */
45 #endif /* defined DEBUG_libfirm */
47 /*--------------------------------------------------------------------*/
48 /** Accessing the out datastructures **/
49 /*--------------------------------------------------------------------*/
52 /** Clear the outs of a node */
53 static void reset_outs(ir_node *node, void *unused) {
60 int get_irn_outs_computed(const ir_node *node)
62 return node->out != NULL;
65 /* returns the number of successors of the node: */
66 int get_irn_n_outs(const ir_node *node) {
67 assert(node && node->kind == k_ir_node);
69 /* assert(node->out_valid); */
70 #endif /* defined DEBUG_libfirm */
71 /* we misuse the first for the size info of the out array */
72 return node->out[0].pos;
75 /* Access successor n */
76 ir_node *get_irn_out(const ir_node *def, int pos) {
77 assert(pos >= 0 && pos < get_irn_n_outs(def));
79 /* assert(def->out_valid); */
80 #endif /* defined DEBUG_libfirm */
81 return def->out[pos+1].use;
84 /* Access successor n */
85 ir_node *get_irn_out_ex(const ir_node *def, int pos, int *in_pos) {
86 assert(pos >= 0 && pos < get_irn_n_outs(def));
88 /* assert(def->out_valid); */
89 #endif /* defined DEBUG_libfirm */
90 *in_pos = def->out[pos+1].pos;
91 return def->out[pos+1].use;
94 void set_irn_out(ir_node *def, int pos, ir_node *use, int in_pos) {
96 assert(pos >= 0 && pos < get_irn_n_outs(def));
98 def->out_valid = 1; /* assume that this function is used correctly */
99 #endif /* defined DEBUG_libfirm */
100 def->out[pos+1].use = use;
101 def->out[pos+1].pos = in_pos;
104 /* Return the number of control flow successors, ignore keep-alives. */
105 int get_Block_n_cfg_outs(const ir_node *bl) {
106 int i, n_cfg_outs = 0;
107 assert(bl && is_Block(bl));
109 assert(bl->out_valid);
110 #endif /* defined DEBUG_libfirm */
111 for (i = 1; i <= bl->out[0].pos; ++i) {
112 ir_node *succ = bl->out[i].use;
113 if (get_irn_mode(succ) == mode_X && !is_End(succ))
114 n_cfg_outs += succ->out[0].pos;
119 /* Return the number of control flow successors, honor keep-alives. */
120 int get_Block_n_cfg_outs_ka(const ir_node *bl) {
121 int i, n_cfg_outs = 0;
122 assert(bl && is_Block(bl));
124 assert(bl->out_valid);
125 #endif /* defined DEBUG_libfirm */
126 for (i = 1; i <= bl->out[0].pos; ++i) {
127 ir_node *succ = bl->out[i].use;
128 if (get_irn_mode(succ) == mode_X) {
131 /* ignore End if we are in the Endblock */
132 if (get_nodes_block(succ) == bl)
134 else /* count Keep-alive as one */
137 n_cfg_outs += succ->out[0].pos;
143 /* Access predecessor n, ignore keep-alives. */
144 ir_node *get_Block_cfg_out(const ir_node *bl, int pos) {
146 assert(bl && is_Block(bl));
148 assert(bl->out_valid);
149 #endif /* defined DEBUG_libfirm */
150 for (i = 1; i <= bl->out[0].pos; ++i) {
151 ir_node *succ = bl->out[i].use;
152 if (get_irn_mode(succ) == mode_X && !is_End(succ)) {
153 int n_outs = succ->out[0].pos;
155 return succ->out[pos + 1].use;
163 /* Access predecessor n, honor keep-alives. */
164 ir_node *get_Block_cfg_out_ka(const ir_node *bl, int pos) {
166 assert(bl && is_Block(bl));
168 assert (bl->out_valid);
169 #endif /* defined DEBUG_libfirm */
170 for (i = 1; i <= bl->out[0].pos; ++i) {
171 ir_node *succ = bl->out[i].use;
172 if (get_irn_mode(succ) == mode_X) {
174 ir_node *end_bl = get_nodes_block(succ);
176 /* ignore End if we are in the Endblock */
180 /* handle keep-alive here: return the Endblock instead of the End node */
185 n_outs = succ->out[0].pos;
187 return succ->out[pos + 1].use;
196 static void irg_out_walk_2(ir_node *node, irg_walk_func *pre,
197 irg_walk_func *post, void *env) {
202 assert(get_irn_visited(node) < get_irg_visited(current_ir_graph));
204 set_irn_visited(node, get_irg_visited(current_ir_graph));
206 if (pre) pre(node, env);
208 for (i = 0, n = get_irn_n_outs(node); i < n; ++i) {
209 succ = get_irn_out(node, i);
210 if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
211 irg_out_walk_2(succ, pre, post, env);
214 if (post) post(node, env);
217 void irg_out_walk(ir_node *node,
218 irg_walk_func *pre, irg_walk_func *post,
221 if (get_irg_outs_state(current_ir_graph) != outs_none) {
222 inc_irg_visited (current_ir_graph);
223 irg_out_walk_2(node, pre, post, env);
227 static void irg_out_block_walk2(ir_node *bl,
228 irg_walk_func *pre, irg_walk_func *post,
232 if (!Block_block_visited(bl)) {
233 mark_Block_block_visited(bl);
238 for (i = 0, n = get_Block_n_cfg_outs(bl); i < n; ++i) {
239 /* find the corresponding predecessor block. */
240 ir_node *pred = get_Block_cfg_out(bl, i);
242 irg_out_block_walk2(pred, pre, post, env);
250 /* Walks only over Block nodes in the graph. Has it's own visited
251 flag, so that it can be interleaved with the other walker. */
252 void irg_out_block_walk(ir_node *node,
253 irg_walk_func *pre, irg_walk_func *post,
256 assert(is_Block(node) || (get_irn_mode(node) == mode_X));
258 inc_irg_block_visited(current_ir_graph);
260 if (get_irn_mode(node) == mode_X) {
263 for (i = 0, n = get_irn_n_outs(node); i < n; ++i) {
264 ir_node *succ = get_irn_out(node, i);
265 irg_out_block_walk2(succ, pre, post, env);
269 irg_out_block_walk2(node, pre, post, env);
273 /*--------------------------------------------------------------------*/
274 /** Building and Removing the out datastructure **/
276 /** The outs of a graph are allocated in a single, large array. **/
277 /** This allows to allocate and deallocate the memory for the outs **/
278 /** on demand. The large array is separated into many small ones **/
279 /** for each node. Only a single field to reference the out array **/
280 /** is stored in each node and a field referencing the large out **/
281 /** array in irgraph. The 0 field of each out array contains the **/
282 /** size of this array. This saves memory in the irnodes themselves.**/
283 /** The construction does two passes over the graph. The first pass **/
284 /** counts the overall number of outs and the outs of each node. It **/
285 /** stores the outs of each node in the out reference of the node. **/
286 /** Then the large array is allocated. The second iteration chops **/
287 /** the large array into smaller parts, sets the out edges and **/
288 /** recounts the out edges. **/
289 /** Removes Tuple nodes! **/
290 /*--------------------------------------------------------------------*/
293 /** Returns the amount of out edges for not yet visited successors. */
294 static int _count_outs(ir_node *n) {
295 int start, i, res, irn_arity;
298 n->out = INT_TO_PTR(1); /* Space for array size. */
300 start = is_Block(n) ? 0 : -1;
301 irn_arity = get_irn_arity(n);
302 res = irn_arity - start + 1; /* --1 or --0; 1 for array size. */
304 for (i = start; i < irn_arity; ++i) {
305 /* Optimize Tuples. They annoy if walking the cfg. */
306 ir_node *pred = get_irn_n(n, i);
307 ir_node *skipped_pred = skip_Tuple(pred);
309 if (skipped_pred != pred) {
310 set_irn_n(n, i, skipped_pred);
313 /* count Def-Use edges for predecessors */
314 if (!irn_visited(skipped_pred))
315 res += _count_outs(skipped_pred);
317 /*count my Def-Use edges */
318 skipped_pred->out = INT_TO_PTR(PTR_TO_INT(skipped_pred->out) + 1);
324 /** Returns the amount of out edges for not yet visited successors.
325 * This version handles some special nodes like irg_frame, irg_args etc.
327 static int count_outs(ir_graph *irg) {
331 inc_irg_visited(irg);
332 res = _count_outs(get_irg_end(irg));
334 /* Now handle anchored nodes. We need the out count of those
335 even if they are not visible. */
336 for (i = anchor_last - 1; i >= 0; --i) {
337 n = get_irg_anchor(irg, i);
338 if (!irn_visited_else_mark(n)) {
339 n->out = INT_TO_PTR(1);
347 * Enter memory for the outs to a node.
349 * @param use current node
350 * @param free current free address in the chunk allocated for the outs
352 * @return The next free address
354 static ir_def_use_edge *_set_out_edges(ir_node *use, ir_def_use_edge *free) {
355 int n_outs, start, i, irn_arity, pos;
357 mark_irn_visited(use);
359 /* Allocate my array */
360 n_outs = PTR_TO_INT(use->out);
364 #endif /* defined DEBUG_libfirm */
366 /* We count the successors again, the space will be sufficient.
367 We use this counter to remember the position for the next back
371 start = is_Block(use) ? 0 : -1;
372 irn_arity = get_irn_arity(use);
374 for (i = start; i < irn_arity; ++i) {
375 ir_node *def = get_irn_n(use, i);
378 if (!irn_visited(def))
379 free = _set_out_edges(def, free);
381 /* Remember this Def-Use edge */
382 pos = def->out[0].pos + 1;
383 def->out[pos].use = use;
384 def->out[pos].pos = i;
386 /* increase the number of Def-Use edges so far */
387 def->out[0].pos = pos;
393 * Enter memory for the outs to a node. Handles special nodes
395 * @param irg the graph
396 * @param free current free address in the chunk allocated for the outs
398 * @return The next free address
400 static ir_def_use_edge *set_out_edges(ir_graph *irg, ir_def_use_edge *free) {
404 inc_irg_visited(irg);
405 free = _set_out_edges(get_irg_end(irg), free);
407 /* handle anchored nodes */
408 for (i = anchor_last - 1; i >= 0; --i) {
409 n = get_irg_anchor(irg, i);
410 if (!irn_visited_else_mark(n)) {
411 n_outs = PTR_TO_INT(n->out);
415 #endif /* defined DEBUG_libfirm */
423 /* compute the outs for a given graph */
424 void compute_irg_outs(ir_graph *irg) {
425 ir_graph *rem = current_ir_graph;
427 ir_def_use_edge *end = NULL; /* Only for debugging */
429 current_ir_graph = irg;
431 /* Update graph state */
432 assert(get_irg_phase_state(current_ir_graph) != phase_building);
434 if (current_ir_graph->outs_state != outs_none)
435 free_irg_outs(current_ir_graph);
437 /* This first iteration counts the overall number of out edges and the
438 number of out edges for each node. */
439 n_out_edges = count_outs(irg);
441 /* allocate memory for all out edges. */
442 irg->outs = XMALLOCNZ(ir_def_use_edge, n_out_edges);
444 irg->n_outs = n_out_edges;
445 #endif /* defined DEBUG_libfirm */
447 /* The second iteration splits the irg->outs array into smaller arrays
448 for each node and writes the back edges into this array. */
449 end = set_out_edges(irg, irg->outs);
451 /* Check how much memory we have used */
452 assert (end == (irg->outs + n_out_edges));
454 current_ir_graph->outs_state = outs_consistent;
455 current_ir_graph = rem;
458 void assure_irg_outs(ir_graph *irg) {
459 if (get_irg_outs_state(irg) != outs_consistent)
460 compute_irg_outs(irg);
463 void compute_irp_outs(void) {
465 for (i = get_irp_n_irgs() -1; i >= 0; --i)
466 compute_irg_outs(get_irp_irg(i));
469 void free_irp_outs(void) {
471 for (i = get_irp_n_irgs() -1; i >= 0; --i)
472 free_irg_outs(get_irp_irg(i));
476 /*------------------------------------------------------------*
477 * This computes the outedges for in interprocedural graph. *
478 * There is one quirk: *
479 * The number of the outedges for each node is saved in *
480 * the first member of the ir_node** array. Maybe we should *
481 * change this to make it more portable... *
482 *------------------------------------------------------------*/
485 #ifdef INTERPROCEDURAL_VIEW
487 * Inits the number of outedges for each node
490 static void init_count(ir_node * node, void *env) {
492 node->out = (ir_node **) 1; /* 1 for the array size */
497 * Adjusts the out edge count for its predecessors
498 * and adds the current arity to the overall count,
499 * which is saved in "env"
501 static void node_arity_count(ir_node * node, void * env) {
502 int *anz = (int *) env, arity, n_outs, i, start;
505 arity = get_irn_arity(node);
506 start = (is_Block(node)) ? 0 : -1;
508 n_outs = 1 + arity + (-start); // ((is_Block(node)) ? 0 : 1); // Why + 1??
511 for(i = start; i < arity; i++) {
512 succ = get_irn_n(node, i);
513 succ->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(succ->out) + 1);
518 * Inits all nodes for setting the outedges
519 * Returns the overall count of edges
521 int count_ip_outs(void) {
524 cg_walk(init_count, node_arity_count, &res);
529 static int dummy_count = 0, global_count; /* Only for debugging */
532 * For each node: Sets the pointer to array
533 * in which the outedges are written later.
534 * The current array start is transported in env
536 static void set_array_pointer(ir_node *node, void *env) {
538 ir_node ***free = (ir_node ***) env;
540 /* Allocate my array */
541 n_outs = PTR_TO_INT(node->out); /* We wrote the count here in count_ip_outs */
542 dummy_count += n_outs;
543 assert(dummy_count <= global_count && "More outedges than initially counted!");
545 *free = &((*free)[n_outs]);
546 /* We count the successors again, the space will be sufficient.
547 We use this counter to remember the position for the next back
549 node -> out[0] = (ir_node *) 0;
554 * Adds an outedge from the predecessor to the
557 static void set_out_pointer(ir_node * node, void *env) {
558 int i, arity = get_irn_arity(node);
560 int start = (!is_Block(node)) ? -1 : 0;
563 for (i = start; i < arity; ++i) {
564 succ = get_irn_n(node, i);
565 succ->out[get_irn_n_outs(succ)+1] = node;
566 succ->out[0] = INT_TO_PTR(get_irn_n_outs(succ) + 1);
572 * Sets the outedges for all nodes.
574 void set_ip_outs(void) {
575 ir_node **outedge_array = get_irp_ip_outedges();
576 cg_walk(set_array_pointer, set_out_pointer, (void *) &outedge_array);
582 * Counts the outedges, allocates memory to save the
583 * outedges and fills this outedge array in interprocedural
586 void compute_ip_outs(void) {
590 assert(get_irp_ip_view_state() == ip_view_valid &&
591 "Cannot construct outs for invalid ip view.");
593 if (irp->outs_state != outs_none) {
597 global_count = n_out_edges = count_ip_outs();
598 out_edges = XMALLOCNZ(ir_node*, n_out_edges);
599 set_irp_ip_outedges(out_edges);
603 void free_ip_outs(void) {
604 ir_node **out_edges = get_irp_ip_outedges();
605 if (out_edges != NULL) {
607 set_irp_ip_outedges(NULL);
609 irp->outs_state = outs_none;
614 void free_irg_outs(ir_graph *irg) {
615 /* current_ir_graph->outs_state = outs_none; */
616 irg->outs_state = outs_none;
620 memset(irg->outs, 0, irg->n_outs);
621 #endif /* defined DEBUG_libfirm */
626 #endif /* defined DEBUG_libfirm */
630 /* when debugging, *always* reset all nodes' outs! irg->outs might
631 have been lying to us */
632 irg_walk_graph (irg, reset_outs, NULL, NULL);
633 #endif /* defined DEBUG_libfirm */
636 static void check_out_edges(ir_node *irn, void *env) {
640 int last = is_Block(irn) ? 0 : -1;
642 /* check forward: every input must have an out edge */
643 for (i = get_irn_arity(irn) - 1; i >= last; --i) {
644 ir_node *pred = get_irn_n(irn, i);
646 for (j = get_irn_n_outs(pred) - 1; j >= 0; --j) {
647 ir_node *user = get_irn_out_ex(pred, j, &pos);
649 if (user == irn && pos == i) {
654 ir_fprintf(stderr, "Missing out edge from %+F input %d to %+F", irn, i, pred);
659 /* checking backward */
660 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
661 ir_node *user = get_irn_out_ex(irn, i, &pos);
663 if (get_irn_n(user, pos) != irn) {
664 ir_fprintf(stderr, "Spurious out edge from %+F output %d to %+F", irn, i, user);
671 /* verify outs edges. */
672 void verify_outs(ir_graph *irg) {
674 irg_walk_graph(irg, NULL, check_out_edges, &errors);
676 panic("Out edges are corrupt");