2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Always available outs.
9 * @author Sebastian Hack, Michael Beck, Andreas Schoesser
12 * This are out-edges (also called def-use edges) that are dynamically
13 * updated as the graph changes.
19 #include "iredgekinds.h"
20 #include "iredges_t.h"
30 #include "iredgeset.h"
35 #define HashSet ir_edgeset_t
36 #define HashSetIterator ir_edgeset_iterator_t
37 #define ValueType ir_edge_t*
38 #define NullValue NULL
39 #define DeletedValue ((ir_edge_t*)-1)
40 #define Hash(this,key) (hash_ptr(key->src) ^ (key->pos * 40013))
41 #define KeysEqual(this,key1,key2) ((key1->src) == (key2->src) && (key1->pos == key2->pos))
42 #define SetRangeEmpty(ptr,size) memset(ptr, 0, (size) * sizeof((ptr)[0]))
44 #define hashset_init ir_edgeset_init
45 void ir_edgeset_init_size(ir_edgeset_t *self, size_t size);
46 #define hashset_init_size ir_edgeset_init_size
47 #define hashset_destroy ir_edgeset_destroy
48 #define hashset_insert ir_edgeset_insert
49 #define hashset_remove ir_edgeset_remove
50 ir_edge_t *ir_edgeset_find(const ir_edgeset_t *self, const ir_edge_t*);
51 #define hashset_find ir_edgeset_find
52 size_t ir_edgeset_size(const ir_edgeset_t *self);
53 #define hashset_size ir_edgeset_size
54 #define hashset_iterator_init ir_edgeset_iterator_init
55 #define hashset_iterator_next ir_edgeset_iterator_next
56 #define hashset_remove_iterator ir_edgeset_remove_iterator
58 #include "hashset.c.inl"
61 * A function that allows for setting an edge.
62 * This abstraction is necessary since different edge kind have
63 * different methods of setting edges.
65 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
68 * A function that returns the "arity" of a given edge kind
71 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
74 * A function that returns the pos'th edge of a given edge kind for a node.
76 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
79 * Additional data for an edge kind.
82 const char *name; /**< name of this edge kind */
83 set_edge_func_t *set_edge; /**< the set_edge function */
84 int first_idx; /**< index of the first possible edge */
85 get_edge_src_arity_func_t *get_arity; /**< the get_arity function */
86 get_edge_src_n_func_t *get_n; /**< the get_n function */
87 } ir_edge_kind_info_t;
90 * Get the predecessor block.
92 static ir_node *get_block_n(const ir_node *block, int pos)
95 return get_Block_cfgpred_block(block, pos);
100 static ir_node *get_irn_safe_n(const ir_node *node, int n)
102 if (n == -1 && is_Block(node))
104 return get_irn_n(node, n);
107 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
108 { "normal" , set_irn_n, -1, get_irn_arity, get_irn_safe_n },
109 { "block succs", NULL, 0, get_irn_arity, get_block_n },
110 { "dependency", set_irn_dep, 0, get_irn_deps, get_irn_dep }
113 #define foreach_tgt(irn, i, n, kind) for (i = edge_kind_info[kind].first_idx, n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
114 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
115 #define get_kind_str(kind) (edge_kind_info[kind].name)
117 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
120 * This flag is set to 1, if the edges get initialized for an irg.
121 * Then register additional data is forbidden.
123 static int edges_used = 0;
126 * If set to 1, the list heads are checked every time an edge is changed.
128 static int edges_dbg = 0;
131 * Returns an ID for the given edge.
133 static inline long edge_get_id(const ir_edge_t *e)
138 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
140 if (edges_activated_kind(irg, kind)) {
141 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
142 size_t amount = get_irg_last_idx(irg) * 5 / 4;
145 if (info->allocated) {
146 amount = ir_edgeset_size(&info->edges);
147 ir_edgeset_destroy(&info->edges);
148 obstack_free(&info->edges_obst, NULL);
150 obstack_init(&info->edges_obst);
151 INIT_LIST_HEAD(&info->free_edges);
152 ir_edgeset_init_size(&info->edges, amount);
158 * Change the out count
160 * @param tgt the edge target
161 * @param kind the kind of the edge
163 static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs)
165 irn_edge_info_t *info = get_irn_edge_info(tgt, kind);
166 info->out_count += ofs;
170 * Verify the edge list of a node, i.e. ensure it's a loop:
171 * head -> e_1 -> ... -> e_n -> head
173 static inline void verify_list_head(ir_node *irn, ir_edge_kind_t kind)
177 pset *lh_set = pset_new_ptr(16);
178 const struct list_head *head = &get_irn_edge_info(irn, kind)->outs_head;
179 const struct list_head *pos;
181 list_for_each(pos, head) {
182 if (pset_find_ptr(lh_set, pos)) {
183 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
185 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
186 fprintf(stderr, "- at list entry %d\n", num);
188 ir_fprintf(stderr, "- edge(%ld) %+F(%d)\n", edge_get_id(edge), edge->src, edge->pos);
193 pset_insert_ptr(lh_set, pos);
201 void edges_dump_kind(ir_graph *irg, ir_edge_kind_t kind)
203 irg_edge_info_t *info;
205 ir_edgeset_iterator_t iter;
208 if (!edges_activated_kind(irg, kind))
211 info = get_irg_edge_info(irg, kind);
212 edges = &info->edges;
213 foreach_ir_edgeset(edges, e, iter) {
214 ir_printf("%+F %d\n", e->src, e->pos);
218 static void add_edge(ir_node *src, int pos, ir_node *tgt, ir_edge_kind_t kind,
221 assert(edges_activated_kind(irg, kind));
222 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
223 ir_edgeset_t *edges = &info->edges;
225 struct list_head *head = &get_irn_edge_info(tgt, kind)->outs_head;
226 assert(head->next && head->prev &&
227 "target list head must have been initialized");
229 /* The old target was NULL, thus, the edge is newly created. */
231 if (list_empty(&info->free_edges)) {
232 edge = OALLOC(&info->edges_obst, ir_edge_t);
234 edge = list_entry(info->free_edges.next, ir_edge_t, list);
235 list_del(&edge->list);
244 ir_edge_t *new_edge = ir_edgeset_insert(edges, edge);
245 assert(new_edge == edge);
248 list_add(&edge->list, head);
250 edge_change_cnt(tgt, kind, +1);
253 static void delete_edge(ir_node *src, int pos, ir_node *old_tgt,
254 ir_edge_kind_t kind, ir_graph *irg)
256 assert(edges_activated_kind(irg, kind));
258 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
259 ir_edgeset_t *edges = &info->edges;
261 /* Initialize the edge template to search in the set. */
266 /* search the edge in the set. */
267 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
269 /* mark the edge invalid if it was found */
273 list_del(&edge->list);
274 ir_edgeset_remove(edges, edge);
275 list_add(&edge->list, &info->free_edges);
278 edge_change_cnt(old_tgt, kind, -1);
281 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
282 ir_node *old_tgt, ir_edge_kind_t kind,
285 if (old_tgt == NULL) {
286 add_edge(src, pos, tgt, kind, irg);
288 } else if (tgt == NULL) {
289 delete_edge(src, pos, old_tgt, kind, irg);
293 /* Only do something, if the old and new target differ. */
297 assert(edges_activated_kind(irg, kind));
298 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
299 ir_edgeset_t *edges = &info->edges;
302 * The target is not NULL and the old target differs
303 * from the new target, the edge shall be moved (if the
304 * old target was != NULL) or added (if the old target was
307 struct list_head *head = &get_irn_edge_info(tgt, kind)->outs_head;
309 assert(head->next && head->prev &&
310 "target list head must have been initialized");
312 /* Initialize the edge template to search in the set. */
317 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
318 assert(edge && "edge to redirect not found!");
320 list_move(&edge->list, head);
321 edge_change_cnt(old_tgt, kind, -1);
322 edge_change_cnt(tgt, kind, +1);
324 #ifndef DEBUG_libfirm
325 /* verify list heads */
328 verify_list_head(tgt, kind);
330 verify_list_head(old_tgt, kind);
335 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt,
338 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
339 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
342 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
344 ir_node *bl_old = old_tgt ? get_nodes_block(old_tgt) : NULL;
345 ir_node *bl_tgt = NULL;
348 bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(tgt);
350 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
351 } else if (get_irn_mode(src) == mode_X && old_tgt != NULL && is_Block(old_tgt)) {
352 /* moving a jump node from one block to another */
353 foreach_out_edge_kind_safe(old_tgt, edge, EDGE_KIND_BLOCK) {
354 ir_node *succ = get_edge_src_irn(edge);
355 int succ_pos = get_edge_src_pos(edge);
356 ir_node *block_pred = get_Block_cfgpred(succ, succ_pos);
357 if (block_pred != src)
359 edges_notify_edge_kind(succ, succ_pos, tgt, old_tgt,
360 EDGE_KIND_BLOCK, irg);
367 * Delete all in edges of a given kind from the node old.
369 * @param old the node
370 * @param kind the kind of edges to remove
371 * @param irg the irg of the old node
373 static void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind)
376 ir_graph *irg = get_irn_irg(old);
378 if (!edges_activated_kind(irg, kind))
381 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
383 foreach_tgt(old, i, n, kind) {
384 ir_node *old_tgt = get_n(old, i, kind);
385 delete_edge(old, i, old_tgt, kind, irg);
390 * A node might be revivaled by CSE. Assure its edges.
392 * @param irn the node
393 * @param kind the kind of edges to remove
394 * @param irg the irg of the old node
396 static void edges_node_revival_kind(ir_node *irn, ir_edge_kind_t kind)
398 irn_edge_info_t *info;
400 ir_graph *irg = get_irn_irg(irn);
402 if (!edges_activated_kind(irg, kind))
405 info = get_irn_edge_info(irn, kind);
406 if (info->edges_built)
409 DBG((dbg, LEVEL_5, "node revivaled (kind: %s): %+F\n", get_kind_str(kind), irn));
411 foreach_tgt(irn, i, n, kind) {
412 ir_node *tgt = get_n(irn, i, kind);
413 add_edge(irn, i, tgt, kind, irg);
415 info->edges_built = 1;
418 typedef struct build_walker {
421 unsigned problem_found;
425 * Post-Walker: notify all edges
427 static void build_edges_walker(ir_node *irn, void *data)
429 build_walker *w = (build_walker*)data;
431 ir_edge_kind_t kind = w->kind;
432 ir_graph *irg = get_irn_irg(irn);
434 foreach_tgt(irn, i, n, kind) {
435 ir_node *pred = get_n(irn, i, kind);
437 add_edge(irn, i, pred, kind, irg);
439 get_irn_edge_info(irn, kind)->edges_built = 1;
443 * Pre-Walker: initializes the list-heads and set the out-count
446 static void init_lh_walker(ir_node *irn, void *data)
448 build_walker *w = (build_walker*)data;
449 ir_edge_kind_t kind = w->kind;
450 list_head *head = &get_irn_edge_info(irn, kind)->outs_head;
451 INIT_LIST_HEAD(head);
452 get_irn_edge_info(irn, kind)->edges_built = 0;
453 get_irn_edge_info(irn, kind)->out_count = 0;
457 * Pre-Walker: initializes the list-heads and set the out-count
460 * Additionally touches DEP nodes, as they might be DEAD.
461 * THIS IS UGLY, but I don't find a better way until we
463 * a) ensure that dead nodes are not used as input
464 * b) it might be sufficient to add those stupid NO_REG nodes
467 static void init_lh_walker_dep(ir_node *irn, void *data)
469 build_walker *w = (build_walker*)data;
470 ir_edge_kind_t kind = w->kind;
471 list_head *head = &get_irn_edge_info(irn, kind)->outs_head;
474 INIT_LIST_HEAD(head);
475 get_irn_edge_info(irn, kind)->edges_built = 0;
476 get_irn_edge_info(irn, kind)->out_count = 0;
478 for (i = get_irn_deps(irn) - 1; i >= 0; --i) {
479 ir_node *dep = get_irn_dep(irn, i);
481 head = &get_irn_edge_info(dep, kind)->outs_head;
483 INIT_LIST_HEAD(head);
484 get_irn_edge_info(dep, kind)->edges_built = 0;
485 get_irn_edge_info(dep, kind)->out_count = 0;
489 typedef struct visitor_info_t {
490 irg_walk_func *visit;
495 * Visitor: initializes the list-heads and set the out-count
496 * of all nodes to 0 of nodes that are not seen so far.
498 static void visitor(ir_node *irn, void *data)
500 visitor_info_t *info = (visitor_info_t*)data;
504 if (!is_Block(irn) && is_Deleted(get_nodes_block(irn)))
507 if (!irn_visited_else_mark(irn)) {
508 info->visit(irn, info->data);
512 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
515 * Build the initial edge set.
516 * Beware, this is not a simple task because it suffers from two
518 * - the anchor set allows access to Nodes that may not be reachable from
520 * - the identities add nodes to the "root set" that are not yet reachable
521 * from End. However, after some transformations, the CSE may revival these
524 * These problems can be fixed using different strategies:
525 * - Add an age flag to every node. Whenever the edge of a node is older
526 * then the current edge, invalidate the edges of this node.
527 * While this would help for revivaled nodes, it increases memory and runtime.
528 * - Delete the identities set.
529 * Solves the revival problem, but may increase the memory consumption, as
530 * nodes cannot be revivaled at all.
531 * - Manually iterate over the identities root set. This did not consume more memory
532 * but increase the computation time because the |identities| >= |V|
534 * Currently, we use the last option.
536 struct build_walker w;
537 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
538 visitor_info_t visit;
544 assert(!info->activated);
547 edges_init_graph_kind(irg, kind);
548 if (kind == EDGE_KIND_DEP) {
549 irg_walk_anchors(irg, init_lh_walker_dep, NULL, &w);
550 /* Argh: Dep nodes might be dead, so we MUST visit identities first */
551 visit.visit = init_lh_walker_dep;
552 visit_all_identities(irg, visitor, &visit);
553 irg_walk_anchors(irg, NULL, build_edges_walker, &w);
555 visit.visit = init_lh_walker;
556 visit_all_identities(irg, visitor, &visit);
557 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
561 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
563 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
566 if (info->allocated) {
567 obstack_free(&info->edges_obst, NULL);
568 ir_edgeset_destroy(&info->edges);
571 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
574 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
576 return edges_activated_kind_(irg, kind);
579 int (edges_activated)(const ir_graph *irg)
581 return edges_activated_(irg);
584 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind)
586 ir_graph *irg = get_irn_irg(from);
587 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
589 if (set_edge && edges_activated_kind(irg, kind)) {
590 struct list_head *head = &get_irn_edge_info(from, kind)->outs_head;
592 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
594 while (head != head->next) {
595 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
596 assert(edge->pos >= -1);
597 set_edge(edge->src, edge->pos, to);
602 void edges_reroute(ir_node *from, ir_node *to)
604 edges_reroute_kind(from, to, EDGE_KIND_NORMAL);
607 void edges_reroute_except(ir_node *from, ir_node *to, ir_node *exception)
609 foreach_out_edge_safe(from, edge) {
610 ir_node *src = get_edge_src_irn(edge);
611 if (src == exception)
613 set_irn_n(src, edge->pos, to);
618 static void verify_set_presence(ir_node *irn, void *data)
620 build_walker *w = (build_walker*)data;
621 ir_graph *irg = get_irn_irg(irn);
622 ir_edgeset_t *edges = &get_irg_edge_info(irg, w->kind)->edges;
625 foreach_tgt(irn, i, n, w->kind) {
631 e = ir_edgeset_find(edges, &templ);
635 w->problem_found = 1;
640 static void verify_list_presence(ir_node *irn, void *data)
642 build_walker *w = (build_walker*)data;
644 bitset_set(w->reachable, get_irn_idx(irn));
646 /* check list heads */
647 verify_list_head(irn, w->kind);
649 foreach_out_edge_kind(irn, e, w->kind) {
652 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
653 w->problem_found = 1;
657 tgt = get_n(e->src, e->pos, w->kind);
660 w->problem_found = 1;
666 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
669 struct build_walker w;
670 ir_edgeset_t *edges = &get_irg_edge_info(irg, kind)->edges;
672 ir_edgeset_iterator_t iter;
675 w.reachable = bitset_alloca(get_irg_last_idx(irg));
678 /* Clear the present bit in all edges available. */
679 foreach_ir_edgeset(edges, e, iter) {
683 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
686 * Dump all edges which are not invalid and not present.
687 * These edges are superfluous and their presence in the
690 foreach_ir_edgeset(edges, e, iter) {
691 if (! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
693 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d is superfluous\n", edge_get_id(e), e->src, e->pos);
697 return w.problem_found;
705 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
708 * Clear link field of all nodes.
710 static void clear_links(ir_node *irn, void *env)
716 if (IGNORE_NODE(irn)) {
717 set_irn_link(irn, NULL);
721 irg = get_irn_irg(irn);
722 bs = bitset_malloc(get_irg_last_idx(irg));
723 set_irn_link(irn, bs);
727 * Increases count (stored in link field) for all operands of a node.
729 static void count_user(ir_node *irn, void *env)
735 first = is_Block(irn) ? 0 : -1;
736 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
737 ir_node *op = get_irn_n(irn, i);
738 bitset_t *bs = (bitset_t*)get_irn_link(op);
741 bitset_set(bs, get_irn_idx(irn));
746 * Verifies if collected count, number of edges in list and stored edge count are in sync.
748 static void verify_edge_counter(ir_node *irn, void *env)
750 build_walker *w = (build_walker*)env;
751 if (IGNORE_NODE(irn))
754 bitset_t *bs = (bitset_t*)get_irn_link(irn);
756 int edge_cnt = get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
757 const struct list_head *head
758 = &get_irn_edge_info(irn, EDGE_KIND_NORMAL)->outs_head;
760 /* We can iterate safely here, list heads have already been verified. */
761 const struct list_head *pos;
762 list_for_each(pos, head) {
766 /* check all nodes that reference us and count edges that point number
767 * of ins that actually point to us */
768 ir_graph *irg = get_irn_irg(irn);
770 bitset_foreach(bs, idx) {
772 ir_node *src = get_idx_irn(irg, idx);
774 arity = get_irn_arity(src);
775 for (i = 0; i < arity; ++i) {
776 ir_node *in = get_irn_n(src, i);
782 if (edge_cnt != list_cnt) {
783 w->problem_found = 1;
784 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
785 edge_cnt, list_cnt, irn);
788 if (ref_cnt != list_cnt) {
789 w->problem_found = 1;
790 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but the list contains %d edge(s)\n",
791 irn, ref_cnt, list_cnt);
797 int edges_verify(ir_graph *irg)
799 struct build_walker w;
800 int problem_found = 0;
802 /* verify normal edges only */
803 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
805 w.kind = EDGE_KIND_NORMAL;
809 irg_walk_anchors(irg, clear_links, count_user, &w);
810 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
812 return problem_found ? 1 : w.problem_found;
815 typedef struct pass_t {
816 ir_graph_pass_t pass;
817 unsigned assert_on_problem;
821 * Wrapper to edges_verify to be run as an ir_graph pass.
823 static int edges_verify_wrapper(ir_graph *irg, void *context)
825 pass_t *pass = (pass_t*)context;
826 int problems_found = edges_verify(irg);
827 /* do NOT rerun the pass if verify is ok :-) */
828 assert(problems_found && pass->assert_on_problem);
832 ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem)
834 pass_t *pass = XMALLOCZ(pass_t);
836 def_graph_pass_constructor(
837 &pass->pass, name ? name : "edges_verify", edges_verify_wrapper);
839 /* neither dump nor verify */
840 pass->pass.dump_irg = (DUMP_ON_IRG_FUNC)ir_prog_no_dump;
841 pass->pass.verify_irg = (RUN_ON_IRG_FUNC)ir_prog_no_verify;
843 pass->assert_on_problem = assert_on_problem;
847 void init_edges(void)
849 FIRM_DBG_REGISTER(dbg, "firm.ir.edges");
852 void edges_init_dbg(int do_dbg)
857 void edges_activate(ir_graph *irg)
859 edges_activate_kind(irg, EDGE_KIND_NORMAL);
860 edges_activate_kind(irg, EDGE_KIND_BLOCK);
861 edges_activate_kind(irg, EDGE_KIND_DEP);
864 void edges_deactivate(ir_graph *irg)
866 edges_deactivate_kind(irg, EDGE_KIND_DEP);
867 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
868 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
871 void assure_edges(ir_graph *irg)
873 assure_edges_kind(irg, EDGE_KIND_BLOCK);
874 assure_edges_kind(irg, EDGE_KIND_NORMAL);
875 assure_edges_kind(irg, EDGE_KIND_DEP);
876 add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
879 void assure_edges_kind(ir_graph *irg, ir_edge_kind_t kind)
881 if (!edges_activated_kind(irg, kind))
882 edges_activate_kind(irg, kind);
885 void edges_node_deleted(ir_node *irn)
887 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL);
888 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK);
889 edges_node_deleted_kind(irn, EDGE_KIND_DEP);
892 void edges_node_revival(ir_node *irn)
894 edges_node_revival_kind(irn, EDGE_KIND_NORMAL);
895 edges_node_revival_kind(irn, EDGE_KIND_BLOCK);
898 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind)
900 return get_irn_out_edge_first_kind_(irn, kind);
903 const ir_edge_t *(get_irn_out_edge_first)(const ir_node *irn)
905 return get_irn_out_edge_first_kind_(irn, EDGE_KIND_NORMAL);
908 const ir_edge_t *(get_block_succ_first)(const ir_node *block)
910 return get_irn_out_edge_first_kind_(block, EDGE_KIND_BLOCK);
913 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last, ir_edge_kind_t kind)
915 return get_irn_out_edge_next_(irn, last, kind);
918 ir_node *(get_edge_src_irn)(const ir_edge_t *edge)
920 return get_edge_src_irn_(edge);
923 int (get_edge_src_pos)(const ir_edge_t *edge)
925 return get_edge_src_pos_(edge);
928 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind)
930 return get_irn_n_edges_kind_(irn, kind);
933 static void irg_walk_edges2(ir_node *node, irg_walk_func *pre,
934 irg_walk_func *post, void *env)
936 if (irn_visited_else_mark(node))
942 foreach_out_edge_kind_safe(node, edge, EDGE_KIND_NORMAL) {
943 /* find the corresponding successor block. */
944 ir_node *pred = get_edge_src_irn(edge);
945 irg_walk_edges2(pred, pre, post, env);
952 void irg_walk_edges(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
955 ir_graph *irg = get_irn_irg(node);
957 assert(edges_activated(irg));
958 assert(is_Block(node));
960 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
962 inc_irg_visited(irg);
963 irg_walk_edges2(node, pre, post, env);
965 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
968 static void irg_block_edges_walk2(ir_node *bl, irg_walk_func *pre,
969 irg_walk_func *post, void *env)
971 if (!Block_block_visited(bl)) {
972 mark_Block_block_visited(bl);
977 foreach_out_edge_kind_safe(bl, edge, EDGE_KIND_BLOCK) {
978 /* find the corresponding successor block. */
979 ir_node *pred = get_edge_src_irn(edge);
980 irg_block_edges_walk2(pred, pre, post, env);
988 void irg_block_edges_walk(ir_node *node, irg_walk_func *pre,
989 irg_walk_func *post, void *env)
991 ir_graph *irg = get_irn_irg(node);
993 assert(edges_activated(irg));
994 assert(is_Block(node));
996 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
998 inc_irg_block_visited(irg);
999 irg_block_edges_walk2(node, pre, post, env);
1001 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);