2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Always available outs.
23 * @author Sebastian Hack, Michael Beck, Andreas Schoesser
27 * This are out-edges (also called def-use edges) that are dynamically
28 * updated as the graph changes.
34 #include "iredgekinds.h"
35 #include "iredges_t.h"
45 #include "iredgeset.h"
50 #define HashSet ir_edgeset_t
51 #define HashSetIterator ir_edgeset_iterator_t
52 #define ValueType ir_edge_t*
53 #define NullValue NULL
54 #define DeletedValue ((ir_edge_t*)-1)
55 #define Hash(this,key) (HASH_PTR(key->src) ^ (key->pos * 40013))
56 #define KeysEqual(this,key1,key2) ((key1->src) == (key2->src) && (key1->pos == key2->pos))
57 #define SetRangeEmpty(ptr,size) memset(ptr, 0, (size) * sizeof((ptr)[0]))
59 #define hashset_init ir_edgeset_init
60 #define hashset_init_size ir_edgeset_init_size
61 #define hashset_destroy ir_edgeset_destroy
62 #define hashset_insert ir_edgeset_insert
63 #define hashset_remove ir_edgeset_remove
64 #define hashset_find ir_edgeset_find
65 #define hashset_size ir_edgeset_size
66 #define hashset_iterator_init ir_edgeset_iterator_init
67 #define hashset_iterator_next ir_edgeset_iterator_next
68 #define hashset_remove_iterator ir_edgeset_remove_iterator
73 * A function that allows for setting an edge.
74 * This abstraction is necessary since different edge kind have
75 * different methods of setting edges.
77 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
80 * A function that returns the "arity" of a given edge kind
83 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
86 * A function that returns the pos'th edge of a given edge kind for a node.
88 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
91 * Additional data for an edge kind.
94 const char *name; /**< name of this edge kind */
95 set_edge_func_t *set_edge; /**< the set_edge function */
96 int first_idx; /**< index of the first possible edge */
97 get_edge_src_arity_func_t *get_arity; /**< the get_arity function */
98 get_edge_src_n_func_t *get_n; /**< the get_n function */
99 } ir_edge_kind_info_t;
102 * Get the predecessor block.
104 static ir_node *get_block_n(const ir_node *block, int pos) {
106 return get_Block_cfgpred_block(block, pos);
111 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
112 { "normal" , set_irn_n, -1, get_irn_arity, get_irn_n },
113 { "block succs", NULL, 0, get_irn_arity, get_block_n },
114 { "dependency", set_irn_dep, 0, get_irn_deps, get_irn_dep }
117 #define foreach_tgt(irn, i, n, kind) for(i = edge_kind_info[kind].first_idx, n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
118 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
119 #define get_kind_str(kind) (edge_kind_info[kind].name)
121 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
124 * This flag is set to 1, if the edges get initialized for an irg.
125 * Then register additional data is forbidden.
127 static int edges_used = 0;
130 * Summed size of all users private data
133 static int edges_private_size = 0;
134 #define EDGE_SIZE (sizeof(ir_edge_t) + edges_private_size)
137 * If set to 1, the list heads are checked every time an edge is changed.
139 static int edges_dbg = 0;
142 /* a static variable holding the last number assigned to a new edge */
143 static long last_edge_num = -1;
147 * Returns an ID for the given edge.
149 static inline long edge_get_id(const ir_edge_t *e) {
152 #else /* DEBUG_libfirm */
154 #endif /* DEBUG_libfirm */
158 * Announce to reserve extra space for each edge to be allocated.
160 * @param n: Size of the space to reserve
162 * @return Offset at which the private data will begin
164 * Several users can reserve extra space for private usage.
165 * Each user has to remember his given offset and the size of his private data.
166 * To be called before FIRM is initialized.
168 int edges_register_private_data(size_t n) {
169 int res = edges_private_size;
171 assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
173 edges_private_size += n;
178 * Reset the user's private data at offset 'offset'
179 * The user has to remember his offset and the size of his data!
180 * Caution: Using wrong values here can destroy other users private data!
182 void edges_reset_private_data(ir_graph *irg, int offset, unsigned size) {
183 irg_edge_info_t *info = _get_irg_edge_info(irg, EDGE_KIND_NORMAL);
185 ir_edgeset_iterator_t iter;
187 foreach_ir_edgeset(&info->edges, edge, iter) {
188 memset(edge + sizeof(*edge) + offset, 0, size);
192 #define TIMES37(x) (((x) << 5) + ((x) << 2) + (x))
194 #define get_irn_out_list_head(irn) (&get_irn_out_info(irn)->outs)
196 #define edge_hash(edge) (TIMES37((edge)->pos) + HASH_PTR((edge)->src))
199 * Initialize the out information for a graph.
200 * @note Dead node elimination can call this on an already initialized graph.
202 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind) {
203 if (edges_activated_kind(irg, kind)) {
204 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
205 size_t amount = irg->estimated_node_count * 2;
208 if(info->allocated) {
209 amount = ir_edgeset_size(&info->edges);
210 ir_edgeset_destroy(&info->edges);
211 obstack_free(&info->edges_obst, NULL);
213 obstack_init(&info->edges_obst);
214 INIT_LIST_HEAD(&info->free_edges);
215 ir_edgeset_init_size(&info->edges, amount);
221 * Get the edge object of an outgoing edge at a node.
222 * @param irg The graph, the node is in.
223 * @param src The node at which the edge originates.
224 * @param pos The position of the edge.
225 * @param kind The kind of the edge.
226 * @return The corresponding edge object or NULL,
227 * if no such edge exists.
229 const ir_edge_t *get_irn_edge_kind(ir_graph *irg, const ir_node *src, int pos, ir_edge_kind_t kind)
231 if (edges_activated_kind(irg, kind)) {
232 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
235 key.src = (ir_node *)src;
238 return ir_edgeset_find(&info->edges, &key);
245 * Get the edge object of an outgoing edge at a node.
246 * Looks for an edge for all kinds.
248 const ir_edge_t *get_irn_edge(ir_graph *irg, const ir_node *src, int pos) {
249 const ir_edge_t *edge;
250 if((edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_NORMAL)) == NULL)
251 edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_BLOCK);
256 * Change the out count
258 * @param tgt the edge target
259 * @param kind the kind of the edge
261 static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs) {
262 irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
263 info->out_count += ofs;
266 assert(info->out_count >= 0);
267 if (info->out_count == 0 && kind == EDGE_KIND_NORMAL) {
268 /* tgt lost it's last user */
271 for (i = get_irn_arity(tgt) - 1; i >= -1; --i) {
272 ir_node *prev = get_irn_n(tgt, i);
274 edges_notify_edge(tgt, i, NULL, prev, current_ir_graph);
276 for (i = get_irn_deps(tgt) - 1; i >= 0; --i) {
277 ir_node *prev = get_irn_dep(tgt, i);
279 edges_notify_edge_kind(tgt, i, NULL, prev, EDGE_KIND_DEP, current_ir_graph);
287 * Verify the edge list of a node, ie. ensure it's a loop:
288 * head -> e_1 -> ... -> e_n -> head
290 static inline void vrfy_list_head(ir_node *irn, ir_edge_kind_t kind) {
293 pset *lh_set = pset_new_ptr(16);
294 const struct list_head *head = _get_irn_outs_head(irn, kind);
295 const struct list_head *pos;
297 list_for_each(pos, head) {
298 if (pset_find_ptr(lh_set, pos)) {
299 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
301 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
302 fprintf(stderr, "- at list entry %d\n", num);
304 fprintf(stderr, "- edge(%ld) is invalid\n", edge_get_id(edge));
306 ir_fprintf(stderr, "- edge(%ld) %+F(%d)\n", edge_get_id(edge), edge->src, edge->pos);
311 pset_insert_ptr(lh_set, pos);
321 * Helper function to dump the edge set of a graph,
322 * unused in normal code.
324 void edges_dump_kind(ir_graph *irg, ir_edge_kind_t kind)
326 irg_edge_info_t *info;
328 ir_edgeset_iterator_t iter;
331 if (!edges_activated_kind(irg, kind))
334 info = _get_irg_edge_info(irg, kind);
335 edges = &info->edges;
336 foreach_ir_edgeset(edges, e, iter) {
337 ir_printf("%+F %d %d\n", e->src, e->pos, e->invalid);
342 /* The edge from (src, pos) -> old_tgt is redirected to tgt */
343 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
344 ir_node *old_tgt, ir_edge_kind_t kind,
347 const char *msg = "";
348 irg_edge_info_t *info;
353 assert(edges_activated_kind(irg, kind));
356 * Only do something, if the old and new target differ.
361 info = _get_irg_edge_info(irg, kind);
362 edges = &info->edges;
364 /* Initialize the edge template to search in the set. */
369 * If the target is NULL, the edge shall be deleted.
372 /* search the edge in the set. */
373 edge = ir_edgeset_find(edges, &templ);
375 /* mark the edge invalid if it was found */
378 list_del(&edge->list);
379 ir_edgeset_remove(edges, edge);
380 list_add(&edge->list, &info->free_edges);
386 #endif /* DEBUG_libfirm */
387 edge_change_cnt(old_tgt, kind, -1);
389 /* If the edge was not found issue a warning on the debug stream */
390 msg = "edge to delete not found!\n";
394 * The target is not NULL and the old target differs
395 * from the new target, the edge shall be moved (if the
396 * old target was != NULL) or added (if the old target was
399 struct list_head *head = _get_irn_outs_head(tgt, kind);
401 assert(head->next && head->prev &&
402 "target list head must have been initialized");
404 /* If the old target is not null, the edge is moved. */
406 edge = ir_edgeset_find(edges, &templ);
407 assert(edge && "edge to redirect not found!");
408 assert(! edge->invalid && "Invalid edge encountered");
412 list_move(&edge->list, head);
413 edge_change_cnt(old_tgt, kind, -1);
415 /* The old target was NULL, thus, the edge is newly created. */
419 if (list_empty(&info->free_edges)) {
420 edge = obstack_alloc(&info->edges_obst, EDGE_SIZE);
422 edge = list_entry(info->free_edges.next, ir_edge_t, list);
423 list_del(&edge->list);
431 edge->list.next = NULL;
432 edge->list.prev = NULL;
433 memset(edge + 1, 0, edges_private_size);
434 DEBUG_ONLY(edge->src_nr = get_irn_node_nr(src));
436 new_edge = ir_edgeset_insert(edges, edge);
437 if (new_edge != edge) {
438 panic("new edge exists already");
442 list_add(&edge->list, head);
443 DEBUG_ONLY(edge->edge_nr = ++last_edge_num);
446 edge_change_cnt(tgt, kind, +1);
449 #ifndef DEBUG_libfirm
450 /* verify list heads */
453 vrfy_list_head(tgt, kind);
455 vrfy_list_head(old_tgt, kind);
459 DBG((dbg, LEVEL_5, "announce out edge: %+F %d-> %+F(%+F): %s\n", src, pos, tgt, old_tgt, msg));
462 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt, ir_graph *irg)
464 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
465 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
468 if (edges_activated_kind(irg, EDGE_KIND_BLOCK) && is_Block(src)) {
470 /* a MacroBlock edge: ignore it here */
472 ir_node *bl_old = old_tgt ? get_nodes_block(skip_Proj(old_tgt)) : NULL;
473 ir_node *bl_tgt = NULL;
476 bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(skip_Proj(tgt));
478 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
484 * Delete all in edges of a given kind from the node old.
486 * @param old the node
487 * @param kind the kind of edges to remove
488 * @param irg the irg of the old node
490 static void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind, ir_graph *irg)
494 if (!edges_activated_kind(irg, kind))
497 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
499 foreach_tgt(old, i, n, kind) {
500 ir_node *old_tgt = get_n(old, i, kind);
501 edges_notify_edge_kind(old, i, NULL, old_tgt, kind, irg);
506 * A node might be revivaled by CSE. Assure its edges.
508 * @param irn the node
509 * @param kind the kind of edges to remove
510 * @param irg the irg of the old node
512 static void edges_node_revival_kind(ir_node *irn, ir_edge_kind_t kind, ir_graph *irg)
514 irn_edge_info_t *info;
517 if (!edges_activated_kind(irg, kind))
520 info = _get_irn_edge_info(irn, kind);
521 if (info->edges_built)
524 DBG((dbg, LEVEL_5, "node revivaled (kind: %s): %+F\n", get_kind_str(kind), irn));
526 foreach_tgt(irn, i, n, kind) {
527 ir_node *tgt = get_n(irn, i, kind);
528 edges_notify_edge_kind(irn, i, tgt, NULL, kind, irg);
530 info->edges_built = 1;
533 struct build_walker {
537 unsigned problem_found;
541 * Post-Walker: notify all edges
543 static void build_edges_walker(ir_node *irn, void *data) {
544 struct build_walker *w = data;
546 ir_edge_kind_t kind = w->kind;
547 ir_graph *irg = w->irg;
548 get_edge_src_n_func_t *get_n;
550 get_n = edge_kind_info[kind].get_n;
551 foreach_tgt(irn, i, n, kind) {
552 ir_node *pred = get_n(irn, i, kind);
553 edges_notify_edge_kind(irn, i, pred, NULL, kind, irg);
555 _get_irn_edge_info(irn, kind)->edges_built = 1;
559 * Pre-Walker: initializes the list-heads and set the out-count
562 static void init_lh_walker(ir_node *irn, void *data) {
563 struct build_walker *w = data;
564 ir_edge_kind_t kind = w->kind;
565 list_head *head = _get_irn_outs_head(irn, kind);
566 INIT_LIST_HEAD(head);
567 _get_irn_edge_info(irn, kind)->edges_built = 0;
568 _get_irn_edge_info(irn, kind)->out_count = 0;
572 * Pre-Walker: initializes the list-heads and set the out-count
575 * Additionally touches DEP nodes, as they might be DEAD.
576 * THIS IS UGLY, but I don't find a better way until we
578 * a) ensure that dead nodes are not used as input
579 * b) it might be sufficient to add those stupid NO_REG nodes
582 static void init_lh_walker_dep(ir_node *irn, void *data) {
583 struct build_walker *w = data;
584 ir_edge_kind_t kind = w->kind;
585 list_head *head = _get_irn_outs_head(irn, kind);
588 INIT_LIST_HEAD(head);
589 _get_irn_edge_info(irn, kind)->edges_built = 0;
590 _get_irn_edge_info(irn, kind)->out_count = 0;
592 for (i = get_irn_deps(irn) - 1; i >= 0; --i) {
593 ir_node *dep = get_irn_dep(irn, i);
595 head = _get_irn_outs_head(dep, kind);
597 INIT_LIST_HEAD(head);
598 _get_irn_edge_info(dep, kind)->edges_built = 0;
599 _get_irn_edge_info(dep, kind)->out_count = 0;
603 typedef struct visitor_info_t {
604 irg_walk_func *visit;
609 * Visitor: initializes the list-heads and set the out-count
610 * of all nodes to 0 of nodes that are not seen so far.
612 static void visitor(ir_node *irn, void *data) {
613 visitor_info_t *info = data;
615 if (!irn_visited_else_mark(irn)) {
616 info->visit(irn, info->data);
621 * Build the initial edge set.
622 * Beware, this is not a simple task because it suffers from two
624 * - the anchor set allows access to Nodes that may not be reachable from
626 * - the identities add nodes to the "root set" that are not yet reachable
627 * from End. However, after some transformations, the CSE may revival these
630 * These problems can be fixed using different strategies:
631 * - Add an age flag to every node. Whenever the edge of a node is older
632 * then the current edge, invalidate the edges of this node.
633 * While this would help for revivaled nodes, it increases memory and runtime.
634 * - Delete the identities set.
635 * Solves the revival problem, but may increase the memory consumption, as
636 * nodes cannot be revivaled at all.
637 * - Manually iterate over the identities root set. This did not consume more memory
638 * but increase the computation time because the |identities| >= |V|
640 * Currently, we use the last option.
642 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
644 struct build_walker w;
645 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
646 visitor_info_t visit;
653 assert(!info->activated);
656 edges_init_graph_kind(irg, kind);
657 if (kind == EDGE_KIND_DEP) {
658 irg_walk_anchors(irg, init_lh_walker_dep, NULL, &w);
659 /* Argh: Dep nodes might be dead, so we MUST visit identities first */
660 visit.visit = init_lh_walker_dep;
661 visit_all_identities(irg, visitor, &visit);
662 irg_walk_anchors(irg, NULL, build_edges_walker, &w);
664 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
665 visit.visit = init_lh_walker;
666 visit_all_identities(irg, visitor, &visit);
670 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
672 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
675 if (info->allocated) {
676 obstack_free(&info->edges_obst, NULL);
677 ir_edgeset_destroy(&info->edges);
682 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
684 return _edges_activated_kind(irg, kind);
689 * Reroute all use-edges from a node to another.
690 * @param from The node whose use-edges shall be withdrawn.
691 * @param to The node to which all the use-edges of @p from shall be
693 * @param irg The graph.
695 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind, ir_graph *irg)
697 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
699 if(set_edge && edges_activated_kind(irg, kind)) {
700 struct list_head *head = _get_irn_outs_head(from, kind);
702 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
704 while (head != head->next) {
705 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
706 assert(edge->pos >= -1);
707 set_edge(edge->src, edge->pos, to);
712 static void verify_set_presence(ir_node *irn, void *data)
714 struct build_walker *w = data;
715 ir_edgeset_t *edges = &_get_irg_edge_info(w->irg, w->kind)->edges;
718 foreach_tgt(irn, i, n, w->kind) {
724 e = ir_edgeset_find(edges, &templ);
728 w->problem_found = 1;
730 ir_fprintf(stderr, "Edge Verifier: edge %+F,%d -> %+F (kind: \"%s\") is missing\n",
731 irn, i, get_n(irn, i, w->kind), get_kind_str(w->kind));
737 static void verify_list_presence(ir_node *irn, void *data)
739 struct build_walker *w = data;
742 bitset_set(w->reachable, get_irn_idx(irn));
744 /* check list heads */
745 vrfy_list_head(irn, w->kind);
747 foreach_out_edge_kind(irn, e, w->kind) {
750 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
751 w->problem_found = 1;
753 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F -> %+F recorded at src position %d, but src has arity %d\n",
754 edge_get_id(e), e->src, irn, e->pos, get_irn_arity(e->src));
759 tgt = get_n(e->src, e->pos, w->kind);
762 w->problem_found = 1;
764 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d (kind \"%s\") is no out edge of %+F but of %+F\n",
765 edge_get_id(e), e->src, e->pos, get_kind_str(w->kind), irn, tgt);
771 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
773 struct build_walker w;
774 ir_edgeset_t *edges = &_get_irg_edge_info(irg, kind)->edges;
776 ir_edgeset_iterator_t iter;
780 w.reachable = bitset_alloca(get_irg_last_idx(irg));
783 /* Clear the present bit in all edges available. */
784 foreach_ir_edgeset(edges, e, iter) {
788 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
791 * Dump all edges which are not invalid and not present.
792 * These edges are superfluous and their presence in the
795 foreach_ir_edgeset(edges, e, iter) {
796 if (! e->invalid && ! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
798 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d is superfluous\n", edge_get_id(e), e->src, e->pos);
802 return w.problem_found;
805 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
808 * Clear link field of all nodes.
810 static void clear_links(ir_node *irn, void *env) {
811 struct build_walker *w = env;
814 if (IGNORE_NODE(irn)) {
815 set_irn_link(irn, NULL);
819 bs = bitset_malloc(get_irg_last_idx(w->irg));
820 set_irn_link(irn, bs);
824 * Increases count (stored in link field) for all operands of a node.
826 static void count_user(ir_node *irn, void *env) {
832 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
833 ir_node *op = get_irn_n(irn, i);
834 bitset_t *bs = get_irn_link(op);
837 bitset_set(bs, get_irn_idx(irn));
842 * Verifies if collected count, number of edges in list and stored edge count are in sync.
844 static void verify_edge_counter(ir_node *irn, void *env) {
845 struct build_walker *w = env;
851 const struct list_head *head;
852 const struct list_head *pos;
854 if (IGNORE_NODE(irn))
857 bs = get_irn_link(irn);
860 edge_cnt = _get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
861 head = _get_irn_outs_head(irn, EDGE_KIND_NORMAL);
863 /* We can iterate safely here, list heads have already been verified. */
864 list_for_each(pos, head) {
868 /* check all nodes that reference us and count edges that point number
869 * of ins that actually point to us */
871 bitset_foreach(bs, idx) {
873 ir_node *src = get_idx_irn(w->irg, idx);
875 arity = get_irn_arity(src);
876 for (i = 0; i < arity; ++i) {
877 ir_node *in = get_irn_n(src, i);
883 if (edge_cnt != list_cnt) {
884 w->problem_found = 1;
885 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
886 edge_cnt, list_cnt, irn);
889 if (ref_cnt != list_cnt) {
890 w->problem_found = 1;
891 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but the list contains %d edge(s)\n",
892 irn, ref_cnt, list_cnt);
894 /* Matze: buggy if a node has multiple ins pointing at irn */
896 list_for_each(pos, head) {
897 ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
898 bitset_flip(bs, get_irn_idx(edge->src));
901 if (ref_cnt < list_cnt)
902 fprintf(stderr," following nodes are recorded in list, but not as user:\n");
904 fprintf(stderr," following nodes are user, but not recorded in list:\n");
907 bitset_foreach(bs, idx) {
908 ir_node *src = get_idx_irn(w->irg, idx);
909 ir_fprintf(stderr, " %+F", src);
911 fprintf(stderr, "\n");
919 * Verifies the out edges of an irg.
921 int edges_verify(ir_graph *irg) {
922 struct build_walker w;
923 int problem_found = 0;
925 /* verify normal edges only */
926 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
929 w.kind = EDGE_KIND_NORMAL;
933 irg_walk_anchors(irg, clear_links, count_user, &w);
934 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
936 return problem_found ? 1 : w.problem_found;
940 ir_graph_pass_t pass;
941 unsigned assert_on_problem;
945 * Wrapper to edges_verify to be run as an ir_graph pass.
947 static int edges_verify_wrapper(ir_graph *irg, void *context) {
948 struct pass_t *pass = context;
949 int problems_found = edges_verify(irg);
950 /* do NOT rerun the pass if verify is ok :-) */
951 assert(problems_found && pass->assert_on_problem);
955 /* Creates an ir_graph pass for edges_verify(). */
956 ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem) {
957 struct pass_t *pass = XMALLOCZ(struct pass_t);
959 def_graph_pass_constructor(
960 &pass->pass, name ? name : "edges_verify", edges_verify_wrapper);
962 /* neither dump nor verify */
963 pass->pass.dump_irg = (DUMP_ON_IRG_FUNC)ir_prog_no_dump;
964 pass->pass.verify_irg = (RUN_ON_IRG_FUNC)ir_prog_no_verify;
966 pass->assert_on_problem = assert_on_problem;
970 void init_edges(void) {
971 FIRM_DBG_REGISTER(dbg, DBG_EDGES);
972 /* firm_dbg_set_mask(dbg, -1); */
975 void edges_init_dbg(int do_dbg) {
979 void edges_activate(ir_graph *irg) {
980 edges_activate_kind(irg, EDGE_KIND_NORMAL);
981 edges_activate_kind(irg, EDGE_KIND_BLOCK);
982 if (get_irg_phase_state(irg) == phase_backend)
983 edges_activate_kind(irg, EDGE_KIND_DEP);
986 void edges_deactivate(ir_graph *irg) {
987 if (get_irg_phase_state(irg) == phase_backend)
988 edges_deactivate_kind(irg, EDGE_KIND_DEP);
989 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
990 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
993 int edges_assure(ir_graph *irg)
997 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
1000 edges_activate_kind(irg, EDGE_KIND_BLOCK);
1002 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
1005 edges_activate_kind(irg, EDGE_KIND_NORMAL);
1011 int edges_assure_kind(ir_graph *irg, ir_edge_kind_t kind) {
1012 int activated = edges_activated_kind(irg, kind);
1015 edges_activate_kind(irg, kind);
1020 void edges_node_deleted(ir_node *irn, ir_graph *irg) {
1021 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL, irg);
1022 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK, irg);
1025 void edges_node_revival(ir_node *irn, ir_graph *irg) {
1026 edges_node_revival_kind(irn, EDGE_KIND_NORMAL, irg);
1027 edges_node_revival_kind(irn, EDGE_KIND_BLOCK, irg);
1030 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind) {
1031 return _get_irn_out_edge_first_kind(irn, kind);
1034 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last) {
1035 return _get_irn_out_edge_next(irn, last);
1038 ir_node *(get_edge_src_irn)(const ir_edge_t *edge) {
1039 return _get_edge_src_irn(edge);
1042 int (get_edge_src_pos)(const ir_edge_t *edge) {
1043 return _get_edge_src_pos(edge);
1046 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind) {
1047 return _get_irn_n_edges_kind(irn, kind);
1050 void dump_all_out_edges(ir_node *irn) {
1052 for (i = 0; i < EDGE_KIND_LAST; ++i) {
1053 const ir_edge_t *edge;
1055 printf("kind \"%s\"\n", get_kind_str(i));
1056 foreach_out_edge_kind(irn, edge, i) {
1057 ir_printf("\t%+F(%d)\n", edge->src, edge->pos);
1062 static void irg_block_edges_walk2(ir_node *bl,
1063 irg_walk_func *pre, irg_walk_func *post,
1065 const ir_edge_t *edge, *next;
1067 if (!Block_block_visited(bl)) {
1068 mark_Block_block_visited(bl);
1073 foreach_out_edge_kind_safe(bl, edge, next, EDGE_KIND_BLOCK) {
1074 /* find the corresponding successor block. */
1075 ir_node *pred = get_edge_src_irn(edge);
1076 irg_block_edges_walk2(pred, pre, post, env);
1084 void irg_block_edges_walk(ir_node *node,
1085 irg_walk_func *pre, irg_walk_func *post,
1088 assert(edges_activated(current_ir_graph));
1089 assert(is_Block(node));
1091 ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
1093 inc_irg_block_visited(current_ir_graph);
1094 irg_block_edges_walk2(node, pre, post, env);
1096 ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);