2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Always available outs.
23 * @author Sebastian Hack, Michael Beck, Andreas Schoesser
27 * This are out-edges (also called def-use edges) that are dynamically
28 * updated as the graph changes.
34 #include "iredgekinds.h"
35 #include "iredges_t.h"
45 #include "iredgeset.h"
50 #define HashSet ir_edgeset_t
51 #define HashSetIterator ir_edgeset_iterator_t
52 #define ValueType ir_edge_t*
53 #define NullValue NULL
54 #define DeletedValue ((ir_edge_t*)-1)
55 #define Hash(this,key) (HASH_PTR(key->src) ^ (key->pos * 40013))
56 #define KeysEqual(this,key1,key2) ((key1->src) == (key2->src) && (key1->pos == key2->pos))
57 #define SetRangeEmpty(ptr,size) memset(ptr, 0, (size) * sizeof((ptr)[0]))
59 #define hashset_init ir_edgeset_init
60 void ir_edgeset_init_size(ir_edgeset_t *self, size_t size);
61 #define hashset_init_size ir_edgeset_init_size
62 #define hashset_destroy ir_edgeset_destroy
63 #define hashset_insert ir_edgeset_insert
64 #define hashset_remove ir_edgeset_remove
65 ir_edge_t *ir_edgeset_find(const ir_edgeset_t *self, const ir_edge_t*);
66 #define hashset_find ir_edgeset_find
67 size_t ir_edgeset_size(const ir_edgeset_t *self);
68 #define hashset_size ir_edgeset_size
69 #define hashset_iterator_init ir_edgeset_iterator_init
70 #define hashset_iterator_next ir_edgeset_iterator_next
71 #define hashset_remove_iterator ir_edgeset_remove_iterator
76 * A function that allows for setting an edge.
77 * This abstraction is necessary since different edge kind have
78 * different methods of setting edges.
80 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
83 * A function that returns the "arity" of a given edge kind
86 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
89 * A function that returns the pos'th edge of a given edge kind for a node.
91 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
94 * Additional data for an edge kind.
97 const char *name; /**< name of this edge kind */
98 set_edge_func_t *set_edge; /**< the set_edge function */
99 int first_idx; /**< index of the first possible edge */
100 get_edge_src_arity_func_t *get_arity; /**< the get_arity function */
101 get_edge_src_n_func_t *get_n; /**< the get_n function */
102 } ir_edge_kind_info_t;
105 * Get the predecessor block.
107 static ir_node *get_block_n(const ir_node *block, int pos)
110 return get_Block_cfgpred_block(block, pos);
115 static ir_node *get_irn_safe_n(const ir_node *node, int n)
117 if (n == -1 && is_Block(node))
119 return get_irn_n(node, n);
122 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
123 { "normal" , set_irn_n, -1, get_irn_arity, get_irn_safe_n },
124 { "block succs", NULL, 0, get_irn_arity, get_block_n },
125 { "dependency", set_irn_dep, 0, get_irn_deps, get_irn_dep }
128 #define foreach_tgt(irn, i, n, kind) for (i = edge_kind_info[kind].first_idx, n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
129 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
130 #define get_kind_str(kind) (edge_kind_info[kind].name)
132 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
135 * This flag is set to 1, if the edges get initialized for an irg.
136 * Then register additional data is forbidden.
138 static int edges_used = 0;
141 * Summed size of all users private data
144 static size_t edges_private_size = 0;
145 #define EDGE_SIZE (sizeof(ir_edge_t) + edges_private_size)
148 * If set to 1, the list heads are checked every time an edge is changed.
150 static int edges_dbg = 0;
153 * Returns an ID for the given edge.
155 static inline long edge_get_id(const ir_edge_t *e)
161 * Announce to reserve extra space for each edge to be allocated.
163 * @param n: Size of the space to reserve
165 * @return Offset at which the private data will begin
167 * Several users can reserve extra space for private usage.
168 * Each user has to remember his given offset and the size of his private data.
169 * To be called before FIRM is initialized.
171 size_t edges_register_private_data(size_t n)
173 size_t res = edges_private_size;
175 assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
177 edges_private_size += n;
182 * Reset the user's private data at offset 'offset'
183 * The user has to remember his offset and the size of his data!
184 * Caution: Using wrong values here can destroy other users private data!
186 void edges_reset_private_data(ir_graph *irg, int offset, unsigned size)
188 irg_edge_info_t *info = _get_irg_edge_info(irg, EDGE_KIND_NORMAL);
190 ir_edgeset_iterator_t iter;
192 foreach_ir_edgeset(&info->edges, edge, iter) {
193 memset(edge + sizeof(*edge) + offset, 0, size);
197 #define TIMES37(x) (((x) << 5) + ((x) << 2) + (x))
199 #define get_irn_out_list_head(irn) (&get_irn_out_info(irn)->outs)
201 #define edge_hash(edge) (TIMES37((edge)->pos) + HASH_PTR((edge)->src))
204 * Initialize the out information for a graph.
205 * @note Dead node elimination can call this on an already initialized graph.
207 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
209 if (edges_activated_kind(irg, kind)) {
210 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
211 size_t amount = irg->estimated_node_count * 2;
214 if (info->allocated) {
215 amount = ir_edgeset_size(&info->edges);
216 ir_edgeset_destroy(&info->edges);
217 obstack_free(&info->edges_obst, NULL);
219 obstack_init(&info->edges_obst);
220 INIT_LIST_HEAD(&info->free_edges);
221 ir_edgeset_init_size(&info->edges, amount);
227 * Get the edge object of an outgoing edge at a node.
228 * @param irg The graph, the node is in.
229 * @param src The node at which the edge originates.
230 * @param pos The position of the edge.
231 * @param kind The kind of the edge.
232 * @return The corresponding edge object or NULL,
233 * if no such edge exists.
235 const ir_edge_t *get_irn_edge_kind(const ir_node *src, int pos, ir_edge_kind_t kind)
237 ir_graph *irg = get_irn_irg(src);
238 if (edges_activated_kind(irg, kind)) {
239 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
242 key.src = (ir_node *)src;
245 return ir_edgeset_find(&info->edges, &key);
252 * Change the out count
254 * @param tgt the edge target
255 * @param kind the kind of the edge
257 static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs)
259 irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
260 info->out_count += ofs;
264 * Verify the edge list of a node, ie. ensure it's a loop:
265 * head -> e_1 -> ... -> e_n -> head
267 static inline void verify_list_head(ir_node *irn, ir_edge_kind_t kind)
271 pset *lh_set = pset_new_ptr(16);
272 const struct list_head *head = _get_irn_outs_head(irn, kind);
273 const struct list_head *pos;
275 list_for_each(pos, head) {
276 if (pset_find_ptr(lh_set, pos)) {
277 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
279 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
280 fprintf(stderr, "- at list entry %d\n", num);
282 fprintf(stderr, "- edge(%ld) is invalid\n", edge_get_id(edge));
284 ir_fprintf(stderr, "- edge(%ld) %+F(%d)\n", edge_get_id(edge), edge->src, edge->pos);
289 pset_insert_ptr(lh_set, pos);
297 void edges_dump_kind(ir_graph *irg, ir_edge_kind_t kind)
299 irg_edge_info_t *info;
301 ir_edgeset_iterator_t iter;
304 if (!edges_activated_kind(irg, kind))
307 info = _get_irg_edge_info(irg, kind);
308 edges = &info->edges;
309 foreach_ir_edgeset(edges, e, iter) {
310 ir_printf("%+F %d %d\n", e->src, e->pos, e->invalid);
314 /* The edge from (src, pos) -> old_tgt is redirected to tgt */
315 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
316 ir_node *old_tgt, ir_edge_kind_t kind,
319 const char *msg = "";
320 irg_edge_info_t *info;
324 assert(edges_activated_kind(irg, kind));
327 * Only do something, if the old and new target differ.
332 info = _get_irg_edge_info(irg, kind);
333 edges = &info->edges;
335 /* Initialize the edge template to search in the set. */
340 * If the target is NULL, the edge shall be deleted.
343 /* search the edge in the set. */
344 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
346 /* mark the edge invalid if it was found */
349 list_del(&edge->list);
350 ir_edgeset_remove(edges, edge);
351 list_add(&edge->list, &info->free_edges);
355 edge_change_cnt(old_tgt, kind, -1);
357 /* If the edge was not found issue a warning on the debug stream */
358 msg = "edge to delete not found!\n";
362 * The target is not NULL and the old target differs
363 * from the new target, the edge shall be moved (if the
364 * old target was != NULL) or added (if the old target was
367 struct list_head *head = _get_irn_outs_head(tgt, kind);
369 assert(head->next && head->prev &&
370 "target list head must have been initialized");
372 /* If the old target is not null, the edge is moved. */
374 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
375 assert(edge && "edge to redirect not found!");
376 assert(! edge->invalid && "Invalid edge encountered");
380 list_move(&edge->list, head);
381 edge_change_cnt(old_tgt, kind, -1);
383 /* The old target was NULL, thus, the edge is newly created. */
387 if (list_empty(&info->free_edges)) {
388 edge = (ir_edge_t*)obstack_alloc(&info->edges_obst, EDGE_SIZE);
390 edge = list_entry(info->free_edges.next, ir_edge_t, list);
391 list_del(&edge->list);
399 edge->list.next = NULL;
400 edge->list.prev = NULL;
401 memset(edge + 1, 0, edges_private_size);
403 new_edge = ir_edgeset_insert(edges, edge);
404 if (new_edge != edge) {
405 panic("new edge exists already");
409 list_add(&edge->list, head);
412 edge_change_cnt(tgt, kind, +1);
415 #ifndef DEBUG_libfirm
416 /* verify list heads */
419 verify_list_head(tgt, kind);
421 verify_list_head(old_tgt, kind);
425 DBG((dbg, LEVEL_5, "announce out edge: %+F %d-> %+F(%+F): %s\n", src, pos, tgt, old_tgt, msg));
428 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt,
431 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
432 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
435 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
437 ir_node *bl_old = old_tgt ? get_nodes_block(old_tgt) : NULL;
438 ir_node *bl_tgt = NULL;
441 bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(tgt);
443 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
444 } else if (get_irn_mode(src) == mode_X && old_tgt != NULL && is_Block(old_tgt)) {
445 /* moving a jump node from one block to another */
446 const ir_edge_t *edge;
447 const ir_edge_t *next;
448 foreach_out_edge_kind_safe(old_tgt, edge, next, EDGE_KIND_BLOCK) {
449 ir_node *succ = get_edge_src_irn(edge);
450 int succ_pos = get_edge_src_pos(edge);
451 ir_node *block_pred = get_Block_cfgpred(succ, succ_pos);
452 if (block_pred != src)
454 edges_notify_edge_kind(succ, succ_pos, tgt, old_tgt,
455 EDGE_KIND_BLOCK, irg);
462 * Delete all in edges of a given kind from the node old.
464 * @param old the node
465 * @param kind the kind of edges to remove
466 * @param irg the irg of the old node
468 static void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind)
471 ir_graph *irg = get_irn_irg(old);
473 if (!edges_activated_kind(irg, kind))
476 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
478 foreach_tgt(old, i, n, kind) {
479 ir_node *old_tgt = get_n(old, i, kind);
480 edges_notify_edge_kind(old, i, NULL, old_tgt, kind, irg);
485 * A node might be revivaled by CSE. Assure its edges.
487 * @param irn the node
488 * @param kind the kind of edges to remove
489 * @param irg the irg of the old node
491 static void edges_node_revival_kind(ir_node *irn, ir_edge_kind_t kind)
493 irn_edge_info_t *info;
495 ir_graph *irg = get_irn_irg(irn);
497 if (!edges_activated_kind(irg, kind))
500 info = _get_irn_edge_info(irn, kind);
501 if (info->edges_built)
504 DBG((dbg, LEVEL_5, "node revivaled (kind: %s): %+F\n", get_kind_str(kind), irn));
506 foreach_tgt(irn, i, n, kind) {
507 ir_node *tgt = get_n(irn, i, kind);
508 edges_notify_edge_kind(irn, i, tgt, NULL, kind, irg);
510 info->edges_built = 1;
513 typedef struct build_walker {
516 unsigned problem_found;
520 * Post-Walker: notify all edges
522 static void build_edges_walker(ir_node *irn, void *data)
524 build_walker *w = (build_walker*)data;
526 ir_edge_kind_t kind = w->kind;
527 ir_graph *irg = get_irn_irg(irn);
529 foreach_tgt(irn, i, n, kind) {
530 ir_node *pred = get_n(irn, i, kind);
531 edges_notify_edge_kind(irn, i, pred, NULL, kind, irg);
533 _get_irn_edge_info(irn, kind)->edges_built = 1;
537 * Pre-Walker: initializes the list-heads and set the out-count
540 static void init_lh_walker(ir_node *irn, void *data)
542 build_walker *w = (build_walker*)data;
543 ir_edge_kind_t kind = w->kind;
544 list_head *head = _get_irn_outs_head(irn, kind);
545 INIT_LIST_HEAD(head);
546 _get_irn_edge_info(irn, kind)->edges_built = 0;
547 _get_irn_edge_info(irn, kind)->out_count = 0;
551 * Pre-Walker: initializes the list-heads and set the out-count
554 * Additionally touches DEP nodes, as they might be DEAD.
555 * THIS IS UGLY, but I don't find a better way until we
557 * a) ensure that dead nodes are not used as input
558 * b) it might be sufficient to add those stupid NO_REG nodes
561 static void init_lh_walker_dep(ir_node *irn, void *data)
563 build_walker *w = (build_walker*)data;
564 ir_edge_kind_t kind = w->kind;
565 list_head *head = _get_irn_outs_head(irn, kind);
568 INIT_LIST_HEAD(head);
569 _get_irn_edge_info(irn, kind)->edges_built = 0;
570 _get_irn_edge_info(irn, kind)->out_count = 0;
572 for (i = get_irn_deps(irn) - 1; i >= 0; --i) {
573 ir_node *dep = get_irn_dep(irn, i);
575 head = _get_irn_outs_head(dep, kind);
577 INIT_LIST_HEAD(head);
578 _get_irn_edge_info(dep, kind)->edges_built = 0;
579 _get_irn_edge_info(dep, kind)->out_count = 0;
583 typedef struct visitor_info_t {
584 irg_walk_func *visit;
589 * Visitor: initializes the list-heads and set the out-count
590 * of all nodes to 0 of nodes that are not seen so far.
592 static void visitor(ir_node *irn, void *data)
594 visitor_info_t *info = (visitor_info_t*)data;
598 if (!is_Block(irn) && is_Deleted(get_nodes_block(irn)))
601 if (!irn_visited_else_mark(irn)) {
602 info->visit(irn, info->data);
607 * Build the initial edge set.
608 * Beware, this is not a simple task because it suffers from two
610 * - the anchor set allows access to Nodes that may not be reachable from
612 * - the identities add nodes to the "root set" that are not yet reachable
613 * from End. However, after some transformations, the CSE may revival these
616 * These problems can be fixed using different strategies:
617 * - Add an age flag to every node. Whenever the edge of a node is older
618 * then the current edge, invalidate the edges of this node.
619 * While this would help for revivaled nodes, it increases memory and runtime.
620 * - Delete the identities set.
621 * Solves the revival problem, but may increase the memory consumption, as
622 * nodes cannot be revivaled at all.
623 * - Manually iterate over the identities root set. This did not consume more memory
624 * but increase the computation time because the |identities| >= |V|
626 * Currently, we use the last option.
628 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
630 struct build_walker w;
631 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
632 visitor_info_t visit;
638 assert(!info->activated);
641 edges_init_graph_kind(irg, kind);
642 if (kind == EDGE_KIND_DEP) {
643 irg_walk_anchors(irg, init_lh_walker_dep, NULL, &w);
644 /* Argh: Dep nodes might be dead, so we MUST visit identities first */
645 visit.visit = init_lh_walker_dep;
646 visit_all_identities(irg, visitor, &visit);
647 irg_walk_anchors(irg, NULL, build_edges_walker, &w);
649 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
650 visit.visit = init_lh_walker;
651 visit_all_identities(irg, visitor, &visit);
655 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
657 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
660 if (info->allocated) {
661 obstack_free(&info->edges_obst, NULL);
662 ir_edgeset_destroy(&info->edges);
667 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
669 return _edges_activated_kind(irg, kind);
674 * Reroute all use-edges from a node to another.
675 * @param from The node whose use-edges shall be withdrawn.
676 * @param to The node to which all the use-edges of @p from shall be
678 * @param irg The graph.
680 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind)
682 ir_graph *irg = get_irn_irg(from);
683 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
685 if (set_edge && edges_activated_kind(irg, kind)) {
686 struct list_head *head = _get_irn_outs_head(from, kind);
688 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
690 while (head != head->next) {
691 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
692 assert(edge->pos >= -1);
693 set_edge(edge->src, edge->pos, to);
698 static void verify_set_presence(ir_node *irn, void *data)
700 build_walker *w = (build_walker*)data;
701 ir_graph *irg = get_irn_irg(irn);
702 ir_edgeset_t *edges = &_get_irg_edge_info(irg, w->kind)->edges;
705 foreach_tgt(irn, i, n, w->kind) {
711 e = ir_edgeset_find(edges, &templ);
715 w->problem_found = 1;
720 static void verify_list_presence(ir_node *irn, void *data)
722 build_walker *w = (build_walker*)data;
725 bitset_set(w->reachable, get_irn_idx(irn));
727 /* check list heads */
728 verify_list_head(irn, w->kind);
730 foreach_out_edge_kind(irn, e, w->kind) {
733 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
734 w->problem_found = 1;
738 tgt = get_n(e->src, e->pos, w->kind);
741 w->problem_found = 1;
746 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
748 struct build_walker w;
749 ir_edgeset_t *edges = &_get_irg_edge_info(irg, kind)->edges;
751 ir_edgeset_iterator_t iter;
754 w.reachable = bitset_alloca(get_irg_last_idx(irg));
757 /* Clear the present bit in all edges available. */
758 foreach_ir_edgeset(edges, e, iter) {
762 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
765 * Dump all edges which are not invalid and not present.
766 * These edges are superfluous and their presence in the
769 foreach_ir_edgeset(edges, e, iter) {
770 if (! e->invalid && ! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
772 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d is superfluous\n", edge_get_id(e), e->src, e->pos);
776 return w.problem_found;
779 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
782 * Clear link field of all nodes.
784 static void clear_links(ir_node *irn, void *env)
790 if (IGNORE_NODE(irn)) {
791 set_irn_link(irn, NULL);
795 irg = get_irn_irg(irn);
796 bs = bitset_malloc(get_irg_last_idx(irg));
797 set_irn_link(irn, bs);
801 * Increases count (stored in link field) for all operands of a node.
803 static void count_user(ir_node *irn, void *env)
809 first = is_Block(irn) ? 0 : -1;
810 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
811 ir_node *op = get_irn_n(irn, i);
812 bitset_t *bs = (bitset_t*)get_irn_link(op);
815 bitset_set(bs, get_irn_idx(irn));
820 * Verifies if collected count, number of edges in list and stored edge count are in sync.
822 static void verify_edge_counter(ir_node *irn, void *env)
824 build_walker *w = (build_walker*)env;
830 const struct list_head *head;
831 const struct list_head *pos;
834 if (IGNORE_NODE(irn))
837 bs = (bitset_t*)get_irn_link(irn);
840 edge_cnt = _get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
841 head = _get_irn_outs_head(irn, EDGE_KIND_NORMAL);
843 /* We can iterate safely here, list heads have already been verified. */
844 list_for_each(pos, head) {
848 /* check all nodes that reference us and count edges that point number
849 * of ins that actually point to us */
850 irg = get_irn_irg(irn);
852 bitset_foreach(bs, idx) {
854 ir_node *src = get_idx_irn(irg, idx);
856 arity = get_irn_arity(src);
857 for (i = 0; i < arity; ++i) {
858 ir_node *in = get_irn_n(src, i);
864 if (edge_cnt != list_cnt) {
865 w->problem_found = 1;
866 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
867 edge_cnt, list_cnt, irn);
870 if (ref_cnt != list_cnt) {
871 w->problem_found = 1;
872 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but the list contains %d edge(s)\n",
873 irn, ref_cnt, list_cnt);
880 * Verifies the out edges of an irg.
882 int edges_verify(ir_graph *irg)
884 struct build_walker w;
885 int problem_found = 0;
887 /* verify normal edges only */
888 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
890 w.kind = EDGE_KIND_NORMAL;
894 irg_walk_anchors(irg, clear_links, count_user, &w);
895 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
897 return problem_found ? 1 : w.problem_found;
900 typedef struct pass_t {
901 ir_graph_pass_t pass;
902 unsigned assert_on_problem;
906 * Wrapper to edges_verify to be run as an ir_graph pass.
908 static int edges_verify_wrapper(ir_graph *irg, void *context)
910 pass_t *pass = (pass_t*)context;
911 int problems_found = edges_verify(irg);
912 /* do NOT rerun the pass if verify is ok :-) */
913 assert(problems_found && pass->assert_on_problem);
917 /* Creates an ir_graph pass for edges_verify(). */
918 ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem)
920 pass_t *pass = XMALLOCZ(pass_t);
922 def_graph_pass_constructor(
923 &pass->pass, name ? name : "edges_verify", edges_verify_wrapper);
925 /* neither dump nor verify */
926 pass->pass.dump_irg = (DUMP_ON_IRG_FUNC)ir_prog_no_dump;
927 pass->pass.verify_irg = (RUN_ON_IRG_FUNC)ir_prog_no_verify;
929 pass->assert_on_problem = assert_on_problem;
933 void init_edges(void)
935 FIRM_DBG_REGISTER(dbg, DBG_EDGES);
938 void edges_init_dbg(int do_dbg)
943 void edges_activate(ir_graph *irg)
945 edges_activate_kind(irg, EDGE_KIND_NORMAL);
946 edges_activate_kind(irg, EDGE_KIND_BLOCK);
947 if (get_irg_phase_state(irg) == phase_backend)
948 edges_activate_kind(irg, EDGE_KIND_DEP);
951 void edges_deactivate(ir_graph *irg)
953 if (get_irg_phase_state(irg) == phase_backend)
954 edges_deactivate_kind(irg, EDGE_KIND_DEP);
955 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
956 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
959 int edges_assure(ir_graph *irg)
963 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
966 edges_activate_kind(irg, EDGE_KIND_BLOCK);
968 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
971 edges_activate_kind(irg, EDGE_KIND_NORMAL);
977 int edges_assure_kind(ir_graph *irg, ir_edge_kind_t kind)
979 int activated = edges_activated_kind(irg, kind);
982 edges_activate_kind(irg, kind);
987 void edges_node_deleted(ir_node *irn)
989 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL);
990 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK);
993 void edges_node_revival(ir_node *irn)
995 edges_node_revival_kind(irn, EDGE_KIND_NORMAL);
996 edges_node_revival_kind(irn, EDGE_KIND_BLOCK);
999 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind)
1001 return _get_irn_out_edge_first_kind(irn, kind);
1004 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last)
1006 return _get_irn_out_edge_next(irn, last);
1009 ir_node *(get_edge_src_irn)(const ir_edge_t *edge)
1011 return _get_edge_src_irn(edge);
1014 int (get_edge_src_pos)(const ir_edge_t *edge)
1016 return _get_edge_src_pos(edge);
1019 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind)
1021 return _get_irn_n_edges_kind(irn, kind);
1024 static void irg_walk_edges2(ir_node *node, irg_walk_func *pre,
1025 irg_walk_func *post, void *env)
1027 const ir_edge_t *edge, *next;
1029 if (irn_visited_else_mark(node))
1035 foreach_out_edge_kind_safe(node, edge, next, EDGE_KIND_NORMAL) {
1036 /* find the corresponding successor block. */
1037 ir_node *pred = get_edge_src_irn(edge);
1038 irg_walk_edges2(pred, pre, post, env);
1045 void irg_walk_edges(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
1048 ir_graph *irg = get_irn_irg(node);
1050 assert(edges_activated(irg));
1051 assert(is_Block(node));
1053 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1055 inc_irg_visited(irg);
1056 irg_walk_edges2(node, pre, post, env);
1058 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1061 static void irg_block_edges_walk2(ir_node *bl, irg_walk_func *pre,
1062 irg_walk_func *post, void *env)
1064 const ir_edge_t *edge, *next;
1066 if (!Block_block_visited(bl)) {
1067 mark_Block_block_visited(bl);
1072 foreach_out_edge_kind_safe(bl, edge, next, EDGE_KIND_BLOCK) {
1073 /* find the corresponding successor block. */
1074 ir_node *pred = get_edge_src_irn(edge);
1075 irg_block_edges_walk2(pred, pre, post, env);
1083 void irg_block_edges_walk(ir_node *node, irg_walk_func *pre,
1084 irg_walk_func *post, void *env)
1086 ir_graph *irg = get_irn_irg(node);
1088 assert(edges_activated(irg));
1089 assert(is_Block(node));
1091 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1093 inc_irg_block_visited(irg);
1094 irg_block_edges_walk2(node, pre, post, env);
1096 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);