2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Always available outs.
23 * @author Sebastian Hack, Michael Beck, Andreas Schoesser
27 * This are out-edges (also called def-use edges) that are dynamically
28 * updated as the graph changes.
34 #include "iredgekinds.h"
35 #include "iredges_t.h"
45 #include "iredgeset.h"
50 #define HashSet ir_edgeset_t
51 #define HashSetIterator ir_edgeset_iterator_t
52 #define ValueType ir_edge_t*
53 #define NullValue NULL
54 #define DeletedValue ((ir_edge_t*)-1)
55 #define Hash(this,key) (HASH_PTR(key->src) ^ (key->pos * 40013))
56 #define KeysEqual(this,key1,key2) ((key1->src) == (key2->src) && (key1->pos == key2->pos))
57 #define SetRangeEmpty(ptr,size) memset(ptr, 0, (size) * sizeof((ptr)[0]))
59 #define hashset_init ir_edgeset_init
60 void ir_edgeset_init_size(ir_edgeset_t *self, size_t size);
61 #define hashset_init_size ir_edgeset_init_size
62 #define hashset_destroy ir_edgeset_destroy
63 #define hashset_insert ir_edgeset_insert
64 #define hashset_remove ir_edgeset_remove
65 ir_edge_t *ir_edgeset_find(const ir_edgeset_t *self, const ir_edge_t*);
66 #define hashset_find ir_edgeset_find
67 size_t ir_edgeset_size(const ir_edgeset_t *self);
68 #define hashset_size ir_edgeset_size
69 #define hashset_iterator_init ir_edgeset_iterator_init
70 #define hashset_iterator_next ir_edgeset_iterator_next
71 #define hashset_remove_iterator ir_edgeset_remove_iterator
76 * A function that allows for setting an edge.
77 * This abstraction is necessary since different edge kind have
78 * different methods of setting edges.
80 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
83 * A function that returns the "arity" of a given edge kind
86 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
89 * A function that returns the pos'th edge of a given edge kind for a node.
91 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
94 * Additional data for an edge kind.
97 const char *name; /**< name of this edge kind */
98 set_edge_func_t *set_edge; /**< the set_edge function */
99 int first_idx; /**< index of the first possible edge */
100 get_edge_src_arity_func_t *get_arity; /**< the get_arity function */
101 get_edge_src_n_func_t *get_n; /**< the get_n function */
102 } ir_edge_kind_info_t;
105 * Get the predecessor block.
107 static ir_node *get_block_n(const ir_node *block, int pos)
110 return get_Block_cfgpred_block(block, pos);
115 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
116 { "normal" , set_irn_n, -1, get_irn_arity, get_irn_n },
117 { "block succs", NULL, 0, get_irn_arity, get_block_n },
118 { "dependency", set_irn_dep, 0, get_irn_deps, get_irn_dep }
121 #define foreach_tgt(irn, i, n, kind) for (i = edge_kind_info[kind].first_idx, n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
122 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
123 #define get_kind_str(kind) (edge_kind_info[kind].name)
125 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
128 * This flag is set to 1, if the edges get initialized for an irg.
129 * Then register additional data is forbidden.
131 static int edges_used = 0;
134 * Summed size of all users private data
137 static size_t edges_private_size = 0;
138 #define EDGE_SIZE (sizeof(ir_edge_t) + edges_private_size)
141 * If set to 1, the list heads are checked every time an edge is changed.
143 static int edges_dbg = 0;
146 * Returns an ID for the given edge.
148 static inline long edge_get_id(const ir_edge_t *e)
154 * Announce to reserve extra space for each edge to be allocated.
156 * @param n: Size of the space to reserve
158 * @return Offset at which the private data will begin
160 * Several users can reserve extra space for private usage.
161 * Each user has to remember his given offset and the size of his private data.
162 * To be called before FIRM is initialized.
164 size_t edges_register_private_data(size_t n)
166 size_t res = edges_private_size;
168 assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
170 edges_private_size += n;
175 * Reset the user's private data at offset 'offset'
176 * The user has to remember his offset and the size of his data!
177 * Caution: Using wrong values here can destroy other users private data!
179 void edges_reset_private_data(ir_graph *irg, int offset, unsigned size)
181 irg_edge_info_t *info = _get_irg_edge_info(irg, EDGE_KIND_NORMAL);
183 ir_edgeset_iterator_t iter;
185 foreach_ir_edgeset(&info->edges, edge, iter) {
186 memset(edge + sizeof(*edge) + offset, 0, size);
190 #define TIMES37(x) (((x) << 5) + ((x) << 2) + (x))
192 #define get_irn_out_list_head(irn) (&get_irn_out_info(irn)->outs)
194 #define edge_hash(edge) (TIMES37((edge)->pos) + HASH_PTR((edge)->src))
197 * Initialize the out information for a graph.
198 * @note Dead node elimination can call this on an already initialized graph.
200 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
202 if (edges_activated_kind(irg, kind)) {
203 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
204 size_t amount = irg->estimated_node_count * 2;
207 if (info->allocated) {
208 amount = ir_edgeset_size(&info->edges);
209 ir_edgeset_destroy(&info->edges);
210 obstack_free(&info->edges_obst, NULL);
212 obstack_init(&info->edges_obst);
213 INIT_LIST_HEAD(&info->free_edges);
214 ir_edgeset_init_size(&info->edges, amount);
220 * Get the edge object of an outgoing edge at a node.
221 * @param irg The graph, the node is in.
222 * @param src The node at which the edge originates.
223 * @param pos The position of the edge.
224 * @param kind The kind of the edge.
225 * @return The corresponding edge object or NULL,
226 * if no such edge exists.
228 const ir_edge_t *get_irn_edge_kind(const ir_node *src, int pos, ir_edge_kind_t kind)
230 ir_graph *irg = get_irn_irg(src);
231 if (edges_activated_kind(irg, kind)) {
232 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
235 key.src = (ir_node *)src;
238 return ir_edgeset_find(&info->edges, &key);
245 * Change the out count
247 * @param tgt the edge target
248 * @param kind the kind of the edge
250 static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs)
252 irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
253 info->out_count += ofs;
257 * Verify the edge list of a node, ie. ensure it's a loop:
258 * head -> e_1 -> ... -> e_n -> head
260 static inline void verify_list_head(ir_node *irn, ir_edge_kind_t kind)
264 pset *lh_set = pset_new_ptr(16);
265 const struct list_head *head = _get_irn_outs_head(irn, kind);
266 const struct list_head *pos;
268 list_for_each(pos, head) {
269 if (pset_find_ptr(lh_set, pos)) {
270 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
272 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
273 fprintf(stderr, "- at list entry %d\n", num);
275 fprintf(stderr, "- edge(%ld) is invalid\n", edge_get_id(edge));
277 ir_fprintf(stderr, "- edge(%ld) %+F(%d)\n", edge_get_id(edge), edge->src, edge->pos);
282 pset_insert_ptr(lh_set, pos);
290 void edges_dump_kind(ir_graph *irg, ir_edge_kind_t kind)
292 irg_edge_info_t *info;
294 ir_edgeset_iterator_t iter;
297 if (!edges_activated_kind(irg, kind))
300 info = _get_irg_edge_info(irg, kind);
301 edges = &info->edges;
302 foreach_ir_edgeset(edges, e, iter) {
303 ir_printf("%+F %d %d\n", e->src, e->pos, e->invalid);
307 /* The edge from (src, pos) -> old_tgt is redirected to tgt */
308 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
309 ir_node *old_tgt, ir_edge_kind_t kind,
312 const char *msg = "";
313 irg_edge_info_t *info;
318 assert(edges_activated_kind(irg, kind));
321 * Only do something, if the old and new target differ.
326 info = _get_irg_edge_info(irg, kind);
327 edges = &info->edges;
329 /* Initialize the edge template to search in the set. */
334 * If the target is NULL, the edge shall be deleted.
337 /* search the edge in the set. */
338 edge = ir_edgeset_find(edges, &templ);
340 /* mark the edge invalid if it was found */
343 list_del(&edge->list);
344 ir_edgeset_remove(edges, edge);
345 list_add(&edge->list, &info->free_edges);
349 edge_change_cnt(old_tgt, kind, -1);
351 /* If the edge was not found issue a warning on the debug stream */
352 msg = "edge to delete not found!\n";
356 * The target is not NULL and the old target differs
357 * from the new target, the edge shall be moved (if the
358 * old target was != NULL) or added (if the old target was
361 struct list_head *head = _get_irn_outs_head(tgt, kind);
363 assert(head->next && head->prev &&
364 "target list head must have been initialized");
366 /* If the old target is not null, the edge is moved. */
368 edge = ir_edgeset_find(edges, &templ);
369 assert(edge && "edge to redirect not found!");
370 assert(! edge->invalid && "Invalid edge encountered");
374 list_move(&edge->list, head);
375 edge_change_cnt(old_tgt, kind, -1);
377 /* The old target was NULL, thus, the edge is newly created. */
381 if (list_empty(&info->free_edges)) {
382 edge = (ir_edge_t*)obstack_alloc(&info->edges_obst, EDGE_SIZE);
384 edge = list_entry(info->free_edges.next, ir_edge_t, list);
385 list_del(&edge->list);
393 edge->list.next = NULL;
394 edge->list.prev = NULL;
395 memset(edge + 1, 0, edges_private_size);
397 new_edge = ir_edgeset_insert(edges, edge);
398 if (new_edge != edge) {
399 panic("new edge exists already");
403 list_add(&edge->list, head);
406 edge_change_cnt(tgt, kind, +1);
409 #ifndef DEBUG_libfirm
410 /* verify list heads */
413 verify_list_head(tgt, kind);
415 verify_list_head(old_tgt, kind);
419 DBG((dbg, LEVEL_5, "announce out edge: %+F %d-> %+F(%+F): %s\n", src, pos, tgt, old_tgt, msg));
422 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt,
425 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
426 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
429 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
431 ir_node *bl_old = old_tgt ? get_nodes_block(old_tgt) : NULL;
432 ir_node *bl_tgt = NULL;
435 bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(tgt);
437 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
438 } else if (get_irn_mode(src) == mode_X && old_tgt != NULL && is_Block(old_tgt)) {
439 /* moving a jump node from one block to another */
440 const ir_edge_t *edge;
441 const ir_edge_t *next;
442 foreach_out_edge_kind_safe(old_tgt, edge, next, EDGE_KIND_BLOCK) {
443 ir_node *succ = get_edge_src_irn(edge);
444 int pos = get_edge_src_pos(edge);
445 ir_node *block_pred = get_Block_cfgpred(succ, pos);
446 if (block_pred != src)
448 edges_notify_edge_kind(succ, pos, tgt, old_tgt,
449 EDGE_KIND_BLOCK, irg);
456 * Delete all in edges of a given kind from the node old.
458 * @param old the node
459 * @param kind the kind of edges to remove
460 * @param irg the irg of the old node
462 static void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind)
465 ir_graph *irg = get_irn_irg(old);
467 if (!edges_activated_kind(irg, kind))
470 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
472 foreach_tgt(old, i, n, kind) {
473 ir_node *old_tgt = get_n(old, i, kind);
474 edges_notify_edge_kind(old, i, NULL, old_tgt, kind, irg);
479 * A node might be revivaled by CSE. Assure its edges.
481 * @param irn the node
482 * @param kind the kind of edges to remove
483 * @param irg the irg of the old node
485 static void edges_node_revival_kind(ir_node *irn, ir_edge_kind_t kind)
487 irn_edge_info_t *info;
489 ir_graph *irg = get_irn_irg(irn);
491 if (!edges_activated_kind(irg, kind))
494 info = _get_irn_edge_info(irn, kind);
495 if (info->edges_built)
498 DBG((dbg, LEVEL_5, "node revivaled (kind: %s): %+F\n", get_kind_str(kind), irn));
500 foreach_tgt(irn, i, n, kind) {
501 ir_node *tgt = get_n(irn, i, kind);
502 edges_notify_edge_kind(irn, i, tgt, NULL, kind, irg);
504 info->edges_built = 1;
507 typedef struct build_walker {
510 unsigned problem_found;
514 * Post-Walker: notify all edges
516 static void build_edges_walker(ir_node *irn, void *data)
518 build_walker *w = (build_walker*)data;
520 ir_edge_kind_t kind = w->kind;
521 ir_graph *irg = get_irn_irg(irn);
522 get_edge_src_n_func_t *get_n;
524 get_n = edge_kind_info[kind].get_n;
525 foreach_tgt(irn, i, n, kind) {
526 ir_node *pred = get_n(irn, i, kind);
527 edges_notify_edge_kind(irn, i, pred, NULL, kind, irg);
529 _get_irn_edge_info(irn, kind)->edges_built = 1;
533 * Pre-Walker: initializes the list-heads and set the out-count
536 static void init_lh_walker(ir_node *irn, void *data)
538 build_walker *w = (build_walker*)data;
539 ir_edge_kind_t kind = w->kind;
540 list_head *head = _get_irn_outs_head(irn, kind);
541 INIT_LIST_HEAD(head);
542 _get_irn_edge_info(irn, kind)->edges_built = 0;
543 _get_irn_edge_info(irn, kind)->out_count = 0;
547 * Pre-Walker: initializes the list-heads and set the out-count
550 * Additionally touches DEP nodes, as they might be DEAD.
551 * THIS IS UGLY, but I don't find a better way until we
553 * a) ensure that dead nodes are not used as input
554 * b) it might be sufficient to add those stupid NO_REG nodes
557 static void init_lh_walker_dep(ir_node *irn, void *data)
559 build_walker *w = (build_walker*)data;
560 ir_edge_kind_t kind = w->kind;
561 list_head *head = _get_irn_outs_head(irn, kind);
564 INIT_LIST_HEAD(head);
565 _get_irn_edge_info(irn, kind)->edges_built = 0;
566 _get_irn_edge_info(irn, kind)->out_count = 0;
568 for (i = get_irn_deps(irn) - 1; i >= 0; --i) {
569 ir_node *dep = get_irn_dep(irn, i);
571 head = _get_irn_outs_head(dep, kind);
573 INIT_LIST_HEAD(head);
574 _get_irn_edge_info(dep, kind)->edges_built = 0;
575 _get_irn_edge_info(dep, kind)->out_count = 0;
579 typedef struct visitor_info_t {
580 irg_walk_func *visit;
585 * Visitor: initializes the list-heads and set the out-count
586 * of all nodes to 0 of nodes that are not seen so far.
588 static void visitor(ir_node *irn, void *data)
590 visitor_info_t *info = (visitor_info_t*)data;
595 if (!irn_visited_else_mark(irn)) {
596 info->visit(irn, info->data);
601 * Build the initial edge set.
602 * Beware, this is not a simple task because it suffers from two
604 * - the anchor set allows access to Nodes that may not be reachable from
606 * - the identities add nodes to the "root set" that are not yet reachable
607 * from End. However, after some transformations, the CSE may revival these
610 * These problems can be fixed using different strategies:
611 * - Add an age flag to every node. Whenever the edge of a node is older
612 * then the current edge, invalidate the edges of this node.
613 * While this would help for revivaled nodes, it increases memory and runtime.
614 * - Delete the identities set.
615 * Solves the revival problem, but may increase the memory consumption, as
616 * nodes cannot be revivaled at all.
617 * - Manually iterate over the identities root set. This did not consume more memory
618 * but increase the computation time because the |identities| >= |V|
620 * Currently, we use the last option.
622 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
624 struct build_walker w;
625 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
626 visitor_info_t visit;
632 assert(!info->activated);
635 edges_init_graph_kind(irg, kind);
636 if (kind == EDGE_KIND_DEP) {
637 irg_walk_anchors(irg, init_lh_walker_dep, NULL, &w);
638 /* Argh: Dep nodes might be dead, so we MUST visit identities first */
639 visit.visit = init_lh_walker_dep;
640 visit_all_identities(irg, visitor, &visit);
641 irg_walk_anchors(irg, NULL, build_edges_walker, &w);
643 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
644 visit.visit = init_lh_walker;
645 visit_all_identities(irg, visitor, &visit);
649 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
651 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
654 if (info->allocated) {
655 obstack_free(&info->edges_obst, NULL);
656 ir_edgeset_destroy(&info->edges);
661 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
663 return _edges_activated_kind(irg, kind);
668 * Reroute all use-edges from a node to another.
669 * @param from The node whose use-edges shall be withdrawn.
670 * @param to The node to which all the use-edges of @p from shall be
672 * @param irg The graph.
674 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind)
676 ir_graph *irg = get_irn_irg(from);
677 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
679 if (set_edge && edges_activated_kind(irg, kind)) {
680 struct list_head *head = _get_irn_outs_head(from, kind);
682 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
684 while (head != head->next) {
685 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
686 assert(edge->pos >= -1);
687 set_edge(edge->src, edge->pos, to);
692 static void verify_set_presence(ir_node *irn, void *data)
694 build_walker *w = (build_walker*)data;
695 ir_graph *irg = get_irn_irg(irn);
696 ir_edgeset_t *edges = &_get_irg_edge_info(irg, w->kind)->edges;
699 foreach_tgt(irn, i, n, w->kind) {
705 e = ir_edgeset_find(edges, &templ);
709 w->problem_found = 1;
714 static void verify_list_presence(ir_node *irn, void *data)
716 build_walker *w = (build_walker*)data;
719 bitset_set(w->reachable, get_irn_idx(irn));
721 /* check list heads */
722 verify_list_head(irn, w->kind);
724 foreach_out_edge_kind(irn, e, w->kind) {
727 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
728 w->problem_found = 1;
732 tgt = get_n(e->src, e->pos, w->kind);
735 w->problem_found = 1;
740 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
742 struct build_walker w;
743 ir_edgeset_t *edges = &_get_irg_edge_info(irg, kind)->edges;
745 ir_edgeset_iterator_t iter;
748 w.reachable = bitset_alloca(get_irg_last_idx(irg));
751 /* Clear the present bit in all edges available. */
752 foreach_ir_edgeset(edges, e, iter) {
756 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
759 * Dump all edges which are not invalid and not present.
760 * These edges are superfluous and their presence in the
763 foreach_ir_edgeset(edges, e, iter) {
764 if (! e->invalid && ! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
766 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d is superfluous\n", edge_get_id(e), e->src, e->pos);
770 return w.problem_found;
773 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
776 * Clear link field of all nodes.
778 static void clear_links(ir_node *irn, void *env)
784 if (IGNORE_NODE(irn)) {
785 set_irn_link(irn, NULL);
789 irg = get_irn_irg(irn);
790 bs = bitset_malloc(get_irg_last_idx(irg));
791 set_irn_link(irn, bs);
795 * Increases count (stored in link field) for all operands of a node.
797 static void count_user(ir_node *irn, void *env)
803 first = is_Block(irn) ? 0 : -1;
804 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
805 ir_node *op = get_irn_n(irn, i);
806 bitset_t *bs = (bitset_t*)get_irn_link(op);
809 bitset_set(bs, get_irn_idx(irn));
814 * Verifies if collected count, number of edges in list and stored edge count are in sync.
816 static void verify_edge_counter(ir_node *irn, void *env)
818 build_walker *w = (build_walker*)env;
824 const struct list_head *head;
825 const struct list_head *pos;
828 if (IGNORE_NODE(irn))
831 bs = (bitset_t*)get_irn_link(irn);
834 edge_cnt = _get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
835 head = _get_irn_outs_head(irn, EDGE_KIND_NORMAL);
837 /* We can iterate safely here, list heads have already been verified. */
838 list_for_each(pos, head) {
842 /* check all nodes that reference us and count edges that point number
843 * of ins that actually point to us */
844 irg = get_irn_irg(irn);
846 bitset_foreach(bs, idx) {
848 ir_node *src = get_idx_irn(irg, idx);
850 arity = get_irn_arity(src);
851 for (i = 0; i < arity; ++i) {
852 ir_node *in = get_irn_n(src, i);
858 if (edge_cnt != list_cnt) {
859 w->problem_found = 1;
860 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
861 edge_cnt, list_cnt, irn);
864 if (ref_cnt != list_cnt) {
865 w->problem_found = 1;
866 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but the list contains %d edge(s)\n",
867 irn, ref_cnt, list_cnt);
874 * Verifies the out edges of an irg.
876 int edges_verify(ir_graph *irg)
878 struct build_walker w;
879 int problem_found = 0;
881 /* verify normal edges only */
882 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
884 w.kind = EDGE_KIND_NORMAL;
888 irg_walk_anchors(irg, clear_links, count_user, &w);
889 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
891 return problem_found ? 1 : w.problem_found;
894 typedef struct pass_t {
895 ir_graph_pass_t pass;
896 unsigned assert_on_problem;
900 * Wrapper to edges_verify to be run as an ir_graph pass.
902 static int edges_verify_wrapper(ir_graph *irg, void *context)
904 pass_t *pass = (pass_t*)context;
905 int problems_found = edges_verify(irg);
906 /* do NOT rerun the pass if verify is ok :-) */
907 assert(problems_found && pass->assert_on_problem);
911 /* Creates an ir_graph pass for edges_verify(). */
912 ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem)
914 pass_t *pass = XMALLOCZ(pass_t);
916 def_graph_pass_constructor(
917 &pass->pass, name ? name : "edges_verify", edges_verify_wrapper);
919 /* neither dump nor verify */
920 pass->pass.dump_irg = (DUMP_ON_IRG_FUNC)ir_prog_no_dump;
921 pass->pass.verify_irg = (RUN_ON_IRG_FUNC)ir_prog_no_verify;
923 pass->assert_on_problem = assert_on_problem;
927 void init_edges(void)
929 FIRM_DBG_REGISTER(dbg, DBG_EDGES);
932 void edges_init_dbg(int do_dbg)
937 void edges_activate(ir_graph *irg)
939 edges_activate_kind(irg, EDGE_KIND_NORMAL);
940 edges_activate_kind(irg, EDGE_KIND_BLOCK);
941 if (get_irg_phase_state(irg) == phase_backend)
942 edges_activate_kind(irg, EDGE_KIND_DEP);
945 void edges_deactivate(ir_graph *irg)
947 if (get_irg_phase_state(irg) == phase_backend)
948 edges_deactivate_kind(irg, EDGE_KIND_DEP);
949 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
950 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
953 int edges_assure(ir_graph *irg)
957 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
960 edges_activate_kind(irg, EDGE_KIND_BLOCK);
962 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
965 edges_activate_kind(irg, EDGE_KIND_NORMAL);
971 int edges_assure_kind(ir_graph *irg, ir_edge_kind_t kind)
973 int activated = edges_activated_kind(irg, kind);
976 edges_activate_kind(irg, kind);
981 void edges_node_deleted(ir_node *irn)
983 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL);
984 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK);
987 void edges_node_revival(ir_node *irn)
989 edges_node_revival_kind(irn, EDGE_KIND_NORMAL);
990 edges_node_revival_kind(irn, EDGE_KIND_BLOCK);
993 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind)
995 return _get_irn_out_edge_first_kind(irn, kind);
998 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last)
1000 return _get_irn_out_edge_next(irn, last);
1003 ir_node *(get_edge_src_irn)(const ir_edge_t *edge)
1005 return _get_edge_src_irn(edge);
1008 int (get_edge_src_pos)(const ir_edge_t *edge)
1010 return _get_edge_src_pos(edge);
1013 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind)
1015 return _get_irn_n_edges_kind(irn, kind);
1018 static void irg_walk_edges2(ir_node *node, irg_walk_func *pre,
1019 irg_walk_func *post, void *env)
1021 const ir_edge_t *edge, *next;
1023 if (irn_visited_else_mark(node))
1029 foreach_out_edge_kind_safe(node, edge, next, EDGE_KIND_NORMAL) {
1030 /* find the corresponding successor block. */
1031 ir_node *pred = get_edge_src_irn(edge);
1032 irg_walk_edges2(pred, pre, post, env);
1039 void irg_walk_edges(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
1042 ir_graph *irg = get_irn_irg(node);
1044 assert(edges_activated(irg));
1045 assert(is_Block(node));
1047 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1049 inc_irg_visited(irg);
1050 irg_walk_edges2(node, pre, post, env);
1052 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1055 static void irg_block_edges_walk2(ir_node *bl, irg_walk_func *pre,
1056 irg_walk_func *post, void *env)
1058 const ir_edge_t *edge, *next;
1060 if (!Block_block_visited(bl)) {
1061 mark_Block_block_visited(bl);
1066 foreach_out_edge_kind_safe(bl, edge, next, EDGE_KIND_BLOCK) {
1067 /* find the corresponding successor block. */
1068 ir_node *pred = get_edge_src_irn(edge);
1069 irg_block_edges_walk2(pred, pre, post, env);
1077 void irg_block_edges_walk(ir_node *node, irg_walk_func *pre,
1078 irg_walk_func *post, void *env)
1080 ir_graph *irg = get_irn_irg(node);
1082 assert(edges_activated(irg));
1083 assert(is_Block(node));
1085 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1087 inc_irg_block_visited(irg);
1088 irg_block_edges_walk2(node, pre, post, env);
1090 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);