2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Always available outs.
23 * @author Sebastian Hack, Michael Beck, Andreas Schoesser
27 * This are out-edges (also called def-use edges) that are dynamically
28 * updated as the graph changes.
34 #include "iredgekinds.h"
35 #include "iredges_t.h"
45 #include "iredgeset.h"
50 #define HashSet ir_edgeset_t
51 #define HashSetIterator ir_edgeset_iterator_t
52 #define ValueType ir_edge_t*
53 #define NullValue NULL
54 #define DeletedValue ((ir_edge_t*)-1)
55 #define Hash(this,key) (HASH_PTR(key->src) ^ (key->pos * 40013))
56 #define KeysEqual(this,key1,key2) ((key1->src) == (key2->src) && (key1->pos == key2->pos))
57 #define SetRangeEmpty(ptr,size) memset(ptr, 0, (size) * sizeof((ptr)[0]))
59 #define hashset_init ir_edgeset_init
60 void ir_edgeset_init_size(ir_edgeset_t *self, size_t size);
61 #define hashset_init_size ir_edgeset_init_size
62 #define hashset_destroy ir_edgeset_destroy
63 #define hashset_insert ir_edgeset_insert
64 #define hashset_remove ir_edgeset_remove
65 ir_edge_t *ir_edgeset_find(const ir_edgeset_t *self, const ir_edge_t*);
66 #define hashset_find ir_edgeset_find
67 size_t ir_edgeset_size(const ir_edgeset_t *self);
68 #define hashset_size ir_edgeset_size
69 #define hashset_iterator_init ir_edgeset_iterator_init
70 #define hashset_iterator_next ir_edgeset_iterator_next
71 #define hashset_remove_iterator ir_edgeset_remove_iterator
76 * A function that allows for setting an edge.
77 * This abstraction is necessary since different edge kind have
78 * different methods of setting edges.
80 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
83 * A function that returns the "arity" of a given edge kind
86 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
89 * A function that returns the pos'th edge of a given edge kind for a node.
91 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
94 * Additional data for an edge kind.
97 const char *name; /**< name of this edge kind */
98 set_edge_func_t *set_edge; /**< the set_edge function */
99 int first_idx; /**< index of the first possible edge */
100 get_edge_src_arity_func_t *get_arity; /**< the get_arity function */
101 get_edge_src_n_func_t *get_n; /**< the get_n function */
102 } ir_edge_kind_info_t;
105 * Get the predecessor block.
107 static ir_node *get_block_n(const ir_node *block, int pos)
110 return get_Block_cfgpred_block(block, pos);
115 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
116 { "normal" , set_irn_n, -1, get_irn_arity, get_irn_n },
117 { "block succs", NULL, 0, get_irn_arity, get_block_n },
118 { "dependency", set_irn_dep, 0, get_irn_deps, get_irn_dep }
121 #define foreach_tgt(irn, i, n, kind) for (i = edge_kind_info[kind].first_idx, n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
122 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
123 #define get_kind_str(kind) (edge_kind_info[kind].name)
125 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
128 * This flag is set to 1, if the edges get initialized for an irg.
129 * Then register additional data is forbidden.
131 static int edges_used = 0;
134 * Summed size of all users private data
137 static size_t edges_private_size = 0;
138 #define EDGE_SIZE (sizeof(ir_edge_t) + edges_private_size)
141 * If set to 1, the list heads are checked every time an edge is changed.
143 static int edges_dbg = 0;
146 * Returns an ID for the given edge.
148 static inline long edge_get_id(const ir_edge_t *e)
154 * Announce to reserve extra space for each edge to be allocated.
156 * @param n: Size of the space to reserve
158 * @return Offset at which the private data will begin
160 * Several users can reserve extra space for private usage.
161 * Each user has to remember his given offset and the size of his private data.
162 * To be called before FIRM is initialized.
164 size_t edges_register_private_data(size_t n)
166 size_t res = edges_private_size;
168 assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
170 edges_private_size += n;
175 * Reset the user's private data at offset 'offset'
176 * The user has to remember his offset and the size of his data!
177 * Caution: Using wrong values here can destroy other users private data!
179 void edges_reset_private_data(ir_graph *irg, int offset, unsigned size)
181 irg_edge_info_t *info = _get_irg_edge_info(irg, EDGE_KIND_NORMAL);
183 ir_edgeset_iterator_t iter;
185 foreach_ir_edgeset(&info->edges, edge, iter) {
186 memset(edge + sizeof(*edge) + offset, 0, size);
190 #define TIMES37(x) (((x) << 5) + ((x) << 2) + (x))
192 #define get_irn_out_list_head(irn) (&get_irn_out_info(irn)->outs)
194 #define edge_hash(edge) (TIMES37((edge)->pos) + HASH_PTR((edge)->src))
197 * Initialize the out information for a graph.
198 * @note Dead node elimination can call this on an already initialized graph.
200 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
202 if (edges_activated_kind(irg, kind)) {
203 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
204 size_t amount = irg->estimated_node_count * 2;
207 if (info->allocated) {
208 amount = ir_edgeset_size(&info->edges);
209 ir_edgeset_destroy(&info->edges);
210 obstack_free(&info->edges_obst, NULL);
212 obstack_init(&info->edges_obst);
213 INIT_LIST_HEAD(&info->free_edges);
214 ir_edgeset_init_size(&info->edges, amount);
220 * Get the edge object of an outgoing edge at a node.
221 * @param irg The graph, the node is in.
222 * @param src The node at which the edge originates.
223 * @param pos The position of the edge.
224 * @param kind The kind of the edge.
225 * @return The corresponding edge object or NULL,
226 * if no such edge exists.
228 const ir_edge_t *get_irn_edge_kind(const ir_node *src, int pos, ir_edge_kind_t kind)
230 ir_graph *irg = get_irn_irg(src);
231 if (edges_activated_kind(irg, kind)) {
232 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
235 key.src = (ir_node *)src;
238 return ir_edgeset_find(&info->edges, &key);
245 * Change the out count
247 * @param tgt the edge target
248 * @param kind the kind of the edge
250 static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs)
252 irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
253 info->out_count += ofs;
257 * Verify the edge list of a node, ie. ensure it's a loop:
258 * head -> e_1 -> ... -> e_n -> head
260 static inline void verify_list_head(ir_node *irn, ir_edge_kind_t kind)
264 pset *lh_set = pset_new_ptr(16);
265 const struct list_head *head = _get_irn_outs_head(irn, kind);
266 const struct list_head *pos;
268 list_for_each(pos, head) {
269 if (pset_find_ptr(lh_set, pos)) {
270 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
272 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
273 fprintf(stderr, "- at list entry %d\n", num);
275 fprintf(stderr, "- edge(%ld) is invalid\n", edge_get_id(edge));
277 ir_fprintf(stderr, "- edge(%ld) %+F(%d)\n", edge_get_id(edge), edge->src, edge->pos);
282 pset_insert_ptr(lh_set, pos);
290 void edges_dump_kind(ir_graph *irg, ir_edge_kind_t kind)
292 irg_edge_info_t *info;
294 ir_edgeset_iterator_t iter;
297 if (!edges_activated_kind(irg, kind))
300 info = _get_irg_edge_info(irg, kind);
301 edges = &info->edges;
302 foreach_ir_edgeset(edges, e, iter) {
303 ir_printf("%+F %d %d\n", e->src, e->pos, e->invalid);
307 /* The edge from (src, pos) -> old_tgt is redirected to tgt */
308 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
309 ir_node *old_tgt, ir_edge_kind_t kind,
312 const char *msg = "";
313 irg_edge_info_t *info;
317 assert(edges_activated_kind(irg, kind));
320 * Only do something, if the old and new target differ.
325 info = _get_irg_edge_info(irg, kind);
326 edges = &info->edges;
328 /* Initialize the edge template to search in the set. */
333 * If the target is NULL, the edge shall be deleted.
336 /* search the edge in the set. */
337 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
339 /* mark the edge invalid if it was found */
342 list_del(&edge->list);
343 ir_edgeset_remove(edges, edge);
344 list_add(&edge->list, &info->free_edges);
348 edge_change_cnt(old_tgt, kind, -1);
350 /* If the edge was not found issue a warning on the debug stream */
351 msg = "edge to delete not found!\n";
355 * The target is not NULL and the old target differs
356 * from the new target, the edge shall be moved (if the
357 * old target was != NULL) or added (if the old target was
360 struct list_head *head = _get_irn_outs_head(tgt, kind);
362 assert(head->next && head->prev &&
363 "target list head must have been initialized");
365 /* If the old target is not null, the edge is moved. */
367 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
368 assert(edge && "edge to redirect not found!");
369 assert(! edge->invalid && "Invalid edge encountered");
373 list_move(&edge->list, head);
374 edge_change_cnt(old_tgt, kind, -1);
376 /* The old target was NULL, thus, the edge is newly created. */
380 if (list_empty(&info->free_edges)) {
381 edge = (ir_edge_t*)obstack_alloc(&info->edges_obst, EDGE_SIZE);
383 edge = list_entry(info->free_edges.next, ir_edge_t, list);
384 list_del(&edge->list);
392 edge->list.next = NULL;
393 edge->list.prev = NULL;
394 memset(edge + 1, 0, edges_private_size);
396 new_edge = ir_edgeset_insert(edges, edge);
397 if (new_edge != edge) {
398 panic("new edge exists already");
402 list_add(&edge->list, head);
405 edge_change_cnt(tgt, kind, +1);
408 #ifndef DEBUG_libfirm
409 /* verify list heads */
412 verify_list_head(tgt, kind);
414 verify_list_head(old_tgt, kind);
418 DBG((dbg, LEVEL_5, "announce out edge: %+F %d-> %+F(%+F): %s\n", src, pos, tgt, old_tgt, msg));
421 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt,
424 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
425 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
428 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
430 ir_node *bl_old = old_tgt ? get_nodes_block(old_tgt) : NULL;
431 ir_node *bl_tgt = NULL;
434 bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(tgt);
436 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
437 } else if (get_irn_mode(src) == mode_X && old_tgt != NULL && is_Block(old_tgt)) {
438 /* moving a jump node from one block to another */
439 const ir_edge_t *edge;
440 const ir_edge_t *next;
441 foreach_out_edge_kind_safe(old_tgt, edge, next, EDGE_KIND_BLOCK) {
442 ir_node *succ = get_edge_src_irn(edge);
443 int succ_pos = get_edge_src_pos(edge);
444 ir_node *block_pred = get_Block_cfgpred(succ, succ_pos);
445 if (block_pred != src)
447 edges_notify_edge_kind(succ, succ_pos, tgt, old_tgt,
448 EDGE_KIND_BLOCK, irg);
455 * Delete all in edges of a given kind from the node old.
457 * @param old the node
458 * @param kind the kind of edges to remove
459 * @param irg the irg of the old node
461 static void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind)
464 ir_graph *irg = get_irn_irg(old);
466 if (!edges_activated_kind(irg, kind))
469 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
471 foreach_tgt(old, i, n, kind) {
472 ir_node *old_tgt = get_n(old, i, kind);
473 edges_notify_edge_kind(old, i, NULL, old_tgt, kind, irg);
478 * A node might be revivaled by CSE. Assure its edges.
480 * @param irn the node
481 * @param kind the kind of edges to remove
482 * @param irg the irg of the old node
484 static void edges_node_revival_kind(ir_node *irn, ir_edge_kind_t kind)
486 irn_edge_info_t *info;
488 ir_graph *irg = get_irn_irg(irn);
490 if (!edges_activated_kind(irg, kind))
493 info = _get_irn_edge_info(irn, kind);
494 if (info->edges_built)
497 DBG((dbg, LEVEL_5, "node revivaled (kind: %s): %+F\n", get_kind_str(kind), irn));
499 foreach_tgt(irn, i, n, kind) {
500 ir_node *tgt = get_n(irn, i, kind);
501 edges_notify_edge_kind(irn, i, tgt, NULL, kind, irg);
503 info->edges_built = 1;
506 typedef struct build_walker {
509 unsigned problem_found;
513 * Post-Walker: notify all edges
515 static void build_edges_walker(ir_node *irn, void *data)
517 build_walker *w = (build_walker*)data;
519 ir_edge_kind_t kind = w->kind;
520 ir_graph *irg = get_irn_irg(irn);
522 foreach_tgt(irn, i, n, kind) {
523 ir_node *pred = get_n(irn, i, kind);
524 edges_notify_edge_kind(irn, i, pred, NULL, kind, irg);
526 _get_irn_edge_info(irn, kind)->edges_built = 1;
530 * Pre-Walker: initializes the list-heads and set the out-count
533 static void init_lh_walker(ir_node *irn, void *data)
535 build_walker *w = (build_walker*)data;
536 ir_edge_kind_t kind = w->kind;
537 list_head *head = _get_irn_outs_head(irn, kind);
538 INIT_LIST_HEAD(head);
539 _get_irn_edge_info(irn, kind)->edges_built = 0;
540 _get_irn_edge_info(irn, kind)->out_count = 0;
544 * Pre-Walker: initializes the list-heads and set the out-count
547 * Additionally touches DEP nodes, as they might be DEAD.
548 * THIS IS UGLY, but I don't find a better way until we
550 * a) ensure that dead nodes are not used as input
551 * b) it might be sufficient to add those stupid NO_REG nodes
554 static void init_lh_walker_dep(ir_node *irn, void *data)
556 build_walker *w = (build_walker*)data;
557 ir_edge_kind_t kind = w->kind;
558 list_head *head = _get_irn_outs_head(irn, kind);
561 INIT_LIST_HEAD(head);
562 _get_irn_edge_info(irn, kind)->edges_built = 0;
563 _get_irn_edge_info(irn, kind)->out_count = 0;
565 for (i = get_irn_deps(irn) - 1; i >= 0; --i) {
566 ir_node *dep = get_irn_dep(irn, i);
568 head = _get_irn_outs_head(dep, kind);
570 INIT_LIST_HEAD(head);
571 _get_irn_edge_info(dep, kind)->edges_built = 0;
572 _get_irn_edge_info(dep, kind)->out_count = 0;
576 typedef struct visitor_info_t {
577 irg_walk_func *visit;
582 * Visitor: initializes the list-heads and set the out-count
583 * of all nodes to 0 of nodes that are not seen so far.
585 static void visitor(ir_node *irn, void *data)
587 visitor_info_t *info = (visitor_info_t*)data;
591 if (!is_Block(irn) && is_Deleted(get_nodes_block(irn)))
594 if (!irn_visited_else_mark(irn)) {
595 info->visit(irn, info->data);
600 * Build the initial edge set.
601 * Beware, this is not a simple task because it suffers from two
603 * - the anchor set allows access to Nodes that may not be reachable from
605 * - the identities add nodes to the "root set" that are not yet reachable
606 * from End. However, after some transformations, the CSE may revival these
609 * These problems can be fixed using different strategies:
610 * - Add an age flag to every node. Whenever the edge of a node is older
611 * then the current edge, invalidate the edges of this node.
612 * While this would help for revivaled nodes, it increases memory and runtime.
613 * - Delete the identities set.
614 * Solves the revival problem, but may increase the memory consumption, as
615 * nodes cannot be revivaled at all.
616 * - Manually iterate over the identities root set. This did not consume more memory
617 * but increase the computation time because the |identities| >= |V|
619 * Currently, we use the last option.
621 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
623 struct build_walker w;
624 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
625 visitor_info_t visit;
631 assert(!info->activated);
634 edges_init_graph_kind(irg, kind);
635 if (kind == EDGE_KIND_DEP) {
636 irg_walk_anchors(irg, init_lh_walker_dep, NULL, &w);
637 /* Argh: Dep nodes might be dead, so we MUST visit identities first */
638 visit.visit = init_lh_walker_dep;
639 visit_all_identities(irg, visitor, &visit);
640 irg_walk_anchors(irg, NULL, build_edges_walker, &w);
642 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
643 visit.visit = init_lh_walker;
644 visit_all_identities(irg, visitor, &visit);
648 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
650 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
653 if (info->allocated) {
654 obstack_free(&info->edges_obst, NULL);
655 ir_edgeset_destroy(&info->edges);
660 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
662 return _edges_activated_kind(irg, kind);
667 * Reroute all use-edges from a node to another.
668 * @param from The node whose use-edges shall be withdrawn.
669 * @param to The node to which all the use-edges of @p from shall be
671 * @param irg The graph.
673 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind)
675 ir_graph *irg = get_irn_irg(from);
676 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
678 if (set_edge && edges_activated_kind(irg, kind)) {
679 struct list_head *head = _get_irn_outs_head(from, kind);
681 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
683 while (head != head->next) {
684 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
685 assert(edge->pos >= -1);
686 set_edge(edge->src, edge->pos, to);
691 static void verify_set_presence(ir_node *irn, void *data)
693 build_walker *w = (build_walker*)data;
694 ir_graph *irg = get_irn_irg(irn);
695 ir_edgeset_t *edges = &_get_irg_edge_info(irg, w->kind)->edges;
698 foreach_tgt(irn, i, n, w->kind) {
704 e = ir_edgeset_find(edges, &templ);
708 w->problem_found = 1;
713 static void verify_list_presence(ir_node *irn, void *data)
715 build_walker *w = (build_walker*)data;
718 bitset_set(w->reachable, get_irn_idx(irn));
720 /* check list heads */
721 verify_list_head(irn, w->kind);
723 foreach_out_edge_kind(irn, e, w->kind) {
726 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
727 w->problem_found = 1;
731 tgt = get_n(e->src, e->pos, w->kind);
734 w->problem_found = 1;
739 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
741 struct build_walker w;
742 ir_edgeset_t *edges = &_get_irg_edge_info(irg, kind)->edges;
744 ir_edgeset_iterator_t iter;
747 w.reachable = bitset_alloca(get_irg_last_idx(irg));
750 /* Clear the present bit in all edges available. */
751 foreach_ir_edgeset(edges, e, iter) {
755 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
758 * Dump all edges which are not invalid and not present.
759 * These edges are superfluous and their presence in the
762 foreach_ir_edgeset(edges, e, iter) {
763 if (! e->invalid && ! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
765 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d is superfluous\n", edge_get_id(e), e->src, e->pos);
769 return w.problem_found;
772 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
775 * Clear link field of all nodes.
777 static void clear_links(ir_node *irn, void *env)
783 if (IGNORE_NODE(irn)) {
784 set_irn_link(irn, NULL);
788 irg = get_irn_irg(irn);
789 bs = bitset_malloc(get_irg_last_idx(irg));
790 set_irn_link(irn, bs);
794 * Increases count (stored in link field) for all operands of a node.
796 static void count_user(ir_node *irn, void *env)
802 first = is_Block(irn) ? 0 : -1;
803 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
804 ir_node *op = get_irn_n(irn, i);
805 bitset_t *bs = (bitset_t*)get_irn_link(op);
808 bitset_set(bs, get_irn_idx(irn));
813 * Verifies if collected count, number of edges in list and stored edge count are in sync.
815 static void verify_edge_counter(ir_node *irn, void *env)
817 build_walker *w = (build_walker*)env;
823 const struct list_head *head;
824 const struct list_head *pos;
827 if (IGNORE_NODE(irn))
830 bs = (bitset_t*)get_irn_link(irn);
833 edge_cnt = _get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
834 head = _get_irn_outs_head(irn, EDGE_KIND_NORMAL);
836 /* We can iterate safely here, list heads have already been verified. */
837 list_for_each(pos, head) {
841 /* check all nodes that reference us and count edges that point number
842 * of ins that actually point to us */
843 irg = get_irn_irg(irn);
845 bitset_foreach(bs, idx) {
847 ir_node *src = get_idx_irn(irg, idx);
849 arity = get_irn_arity(src);
850 for (i = 0; i < arity; ++i) {
851 ir_node *in = get_irn_n(src, i);
857 if (edge_cnt != list_cnt) {
858 w->problem_found = 1;
859 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
860 edge_cnt, list_cnt, irn);
863 if (ref_cnt != list_cnt) {
864 w->problem_found = 1;
865 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but the list contains %d edge(s)\n",
866 irn, ref_cnt, list_cnt);
873 * Verifies the out edges of an irg.
875 int edges_verify(ir_graph *irg)
877 struct build_walker w;
878 int problem_found = 0;
880 /* verify normal edges only */
881 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
883 w.kind = EDGE_KIND_NORMAL;
887 irg_walk_anchors(irg, clear_links, count_user, &w);
888 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
890 return problem_found ? 1 : w.problem_found;
893 typedef struct pass_t {
894 ir_graph_pass_t pass;
895 unsigned assert_on_problem;
899 * Wrapper to edges_verify to be run as an ir_graph pass.
901 static int edges_verify_wrapper(ir_graph *irg, void *context)
903 pass_t *pass = (pass_t*)context;
904 int problems_found = edges_verify(irg);
905 /* do NOT rerun the pass if verify is ok :-) */
906 assert(problems_found && pass->assert_on_problem);
910 /* Creates an ir_graph pass for edges_verify(). */
911 ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem)
913 pass_t *pass = XMALLOCZ(pass_t);
915 def_graph_pass_constructor(
916 &pass->pass, name ? name : "edges_verify", edges_verify_wrapper);
918 /* neither dump nor verify */
919 pass->pass.dump_irg = (DUMP_ON_IRG_FUNC)ir_prog_no_dump;
920 pass->pass.verify_irg = (RUN_ON_IRG_FUNC)ir_prog_no_verify;
922 pass->assert_on_problem = assert_on_problem;
926 void init_edges(void)
928 FIRM_DBG_REGISTER(dbg, DBG_EDGES);
931 void edges_init_dbg(int do_dbg)
936 void edges_activate(ir_graph *irg)
938 edges_activate_kind(irg, EDGE_KIND_NORMAL);
939 edges_activate_kind(irg, EDGE_KIND_BLOCK);
940 if (get_irg_phase_state(irg) == phase_backend)
941 edges_activate_kind(irg, EDGE_KIND_DEP);
944 void edges_deactivate(ir_graph *irg)
946 if (get_irg_phase_state(irg) == phase_backend)
947 edges_deactivate_kind(irg, EDGE_KIND_DEP);
948 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
949 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
952 int edges_assure(ir_graph *irg)
956 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
959 edges_activate_kind(irg, EDGE_KIND_BLOCK);
961 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
964 edges_activate_kind(irg, EDGE_KIND_NORMAL);
970 int edges_assure_kind(ir_graph *irg, ir_edge_kind_t kind)
972 int activated = edges_activated_kind(irg, kind);
975 edges_activate_kind(irg, kind);
980 void edges_node_deleted(ir_node *irn)
982 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL);
983 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK);
986 void edges_node_revival(ir_node *irn)
988 edges_node_revival_kind(irn, EDGE_KIND_NORMAL);
989 edges_node_revival_kind(irn, EDGE_KIND_BLOCK);
992 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind)
994 return _get_irn_out_edge_first_kind(irn, kind);
997 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last)
999 return _get_irn_out_edge_next(irn, last);
1002 ir_node *(get_edge_src_irn)(const ir_edge_t *edge)
1004 return _get_edge_src_irn(edge);
1007 int (get_edge_src_pos)(const ir_edge_t *edge)
1009 return _get_edge_src_pos(edge);
1012 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind)
1014 return _get_irn_n_edges_kind(irn, kind);
1017 static void irg_walk_edges2(ir_node *node, irg_walk_func *pre,
1018 irg_walk_func *post, void *env)
1020 const ir_edge_t *edge, *next;
1022 if (irn_visited_else_mark(node))
1028 foreach_out_edge_kind_safe(node, edge, next, EDGE_KIND_NORMAL) {
1029 /* find the corresponding successor block. */
1030 ir_node *pred = get_edge_src_irn(edge);
1031 irg_walk_edges2(pred, pre, post, env);
1038 void irg_walk_edges(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
1041 ir_graph *irg = get_irn_irg(node);
1043 assert(edges_activated(irg));
1044 assert(is_Block(node));
1046 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1048 inc_irg_visited(irg);
1049 irg_walk_edges2(node, pre, post, env);
1051 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1054 static void irg_block_edges_walk2(ir_node *bl, irg_walk_func *pre,
1055 irg_walk_func *post, void *env)
1057 const ir_edge_t *edge, *next;
1059 if (!Block_block_visited(bl)) {
1060 mark_Block_block_visited(bl);
1065 foreach_out_edge_kind_safe(bl, edge, next, EDGE_KIND_BLOCK) {
1066 /* find the corresponding successor block. */
1067 ir_node *pred = get_edge_src_irn(edge);
1068 irg_block_edges_walk2(pred, pre, post, env);
1076 void irg_block_edges_walk(ir_node *node, irg_walk_func *pre,
1077 irg_walk_func *post, void *env)
1079 ir_graph *irg = get_irn_irg(node);
1081 assert(edges_activated(irg));
1082 assert(is_Block(node));
1084 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1086 inc_irg_block_visited(irg);
1087 irg_block_edges_walk2(node, pre, post, env);
1089 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);