2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Always available outs.
23 * @author Sebastian Hack, Michael Beck, Andreas Schoesser
27 * This are out-edges (also called def-use edges) that are dynamically
28 * updated as the graph changes.
34 #include "iredgekinds.h"
35 #include "iredges_t.h"
43 #include "iredgeset.h"
48 #define HashSet ir_edgeset_t
49 #define HashSetIterator ir_edgeset_iterator_t
50 #define ValueType ir_edge_t*
51 #define NullValue NULL
52 #define DeletedValue ((ir_edge_t*)-1)
53 #define Hash(this,key) (HASH_PTR(key->src) ^ (key->pos * 40013))
54 #define KeysEqual(this,key1,key2) ((key1->src) == (key2->src) && (key1->pos == key2->pos))
55 #define SetRangeEmpty(ptr,size) memset(ptr, 0, (size) * sizeof((ptr)[0]))
57 #define hashset_init ir_edgeset_init
58 #define hashset_init_size ir_edgeset_init_size
59 #define hashset_destroy ir_edgeset_destroy
60 #define hashset_insert ir_edgeset_insert
61 #define hashset_remove ir_edgeset_remove
62 #define hashset_find ir_edgeset_find
63 #define hashset_size ir_edgeset_size
64 #define hashset_iterator_init ir_edgeset_iterator_init
65 #define hashset_iterator_next ir_edgeset_iterator_next
66 #define hashset_remove_iterator ir_edgeset_remove_iterator
71 * A function that allows for setting an edge.
72 * This abstraction is necessary since different edge kind have
73 * different methods of setting edges.
75 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
77 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
79 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
82 * Additional data for an edge kind.
85 const char *name; /**< name of this edge kind */
86 set_edge_func_t *set_edge; /**< the set_edge function */
87 int first_idx; /**< index of the first possible edge */
88 get_edge_src_arity_func_t *get_arity; /**< the get_arity function */
89 get_edge_src_n_func_t *get_n; /**< the get_n function */
90 } ir_edge_kind_info_t;
93 * Get the predecessor block.
95 static ir_node *get_block_n(const ir_node *irn, int pos) {
97 return get_Block_cfgpred_block(irn, pos);
102 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
103 { "normal" , set_irn_n, -1, get_irn_arity, get_irn_n },
104 { "block succs", NULL, 0, get_irn_arity, get_block_n },
105 { "dependency", set_irn_dep, 0, get_irn_deps, get_irn_dep }
108 #define foreach_tgt(irn, i, n, kind) for(i = edge_kind_info[kind].first_idx, n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
109 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
110 #define get_kind_str(kind) (edge_kind_info[kind].name)
112 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
115 * This flag is set to 1, if the edges get initialized for an irg.
116 * Then register additional data is forbidden.
118 static int edges_used = 0;
121 * Summed size of all users private data
124 static int edges_private_size = 0;
125 #define EDGE_SIZE (sizeof(ir_edge_t) + edges_private_size)
128 * If set to 1, the list heads are checked every time an edge is changed.
130 static int edges_dbg = 0;
133 /* a static variable holding the last number assigned to a new edge */
134 static long last_edge_num = -1;
137 static INLINE long edge_get_id(const ir_edge_t *e) {
140 #else /* DEBUG_libfirm */
142 #endif /* DEBUG_libfirm */
146 * Announce to reserve extra space for each edge to be allocated.
148 * @param n: Size of the space to reserve
150 * @return Offset at which the private data will begin
152 * Several users can reserve extra space for private usage.
153 * Each user has to remember his given offset and the size of his private data.
154 * To be called before FIRM is initialized.
156 int edges_register_private_data(size_t n) {
157 int res = edges_private_size;
159 assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
161 edges_private_size += n;
166 * Reset the user's private data at offset 'offset'
167 * The user has to remember his offset and the size of his data!
168 * Caution: Using wrong values here can destroy other users private data!
170 void edges_reset_private_data(ir_graph *irg, int offset, size_t size) {
171 irg_edge_info_t *info = _get_irg_edge_info(irg, EDGE_KIND_NORMAL);
173 ir_edgeset_iterator_t iter;
175 foreach_ir_edgeset(&info->edges, edge, iter) {
176 memset(edge + sizeof(*edge) + offset, 0, size);
180 #define TIMES37(x) (((x) << 5) + ((x) << 2) + (x))
182 #define get_irn_out_list_head(irn) (&get_irn_out_info(irn)->outs)
184 #define edge_hash(edge) (TIMES37((edge)->pos) + HASH_PTR((edge)->src))
187 * Initialize the out information for a graph.
188 * @note Dead node elimination can call this on an already initialized graph.
190 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind) {
191 if (edges_activated_kind(irg, kind)) {
192 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
193 size_t amount = irg->estimated_node_count * 2;
196 if(info->allocated) {
197 amount = ir_edgeset_size(&info->edges);
198 ir_edgeset_destroy(&info->edges);
199 obstack_free(&info->edges_obst, NULL);
201 obstack_init(&info->edges_obst);
202 ir_edgeset_init_size(&info->edges, amount);
208 * Get the edge object of an outgoing edge at a node.
209 * @param irg The graph, the node is in.
210 * @param src The node at which the edge originates.
211 * @param pos The position of the edge.
212 * @return The corresponding edge object or NULL,
213 * if no such edge exists.
215 const ir_edge_t *get_irn_edge_kind(ir_graph *irg, const ir_node *src, int pos, ir_edge_kind_t kind)
217 if (edges_activated_kind(irg, kind)) {
218 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
221 key.src = (ir_node *)src;
224 return ir_edgeset_find(&info->edges, &key);
231 * Get the edge object of an outgoing edge at a node.
232 * Looks for an edge for all kinds.
234 const ir_edge_t *get_irn_edge(ir_graph *irg, const ir_node *src, int pos) {
235 const ir_edge_t *edge;
236 if((edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_NORMAL)) == NULL)
237 edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_BLOCK);
242 * Change the out count
244 * @param tgt the edge target
245 * @param kind the kind of the edge
247 static INLINE void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs) {
248 irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
249 info->out_count += ofs;
252 assert(info->out_count >= 0);
253 if (info->out_count == 0 && kind == EDGE_KIND_NORMAL) {
254 /* tgt lost it's last user */
257 for (i = get_irn_arity(tgt) - 1; i >= -1; --i) {
258 ir_node *prev = get_irn_n(tgt, i);
260 edges_notify_edge(tgt, i, NULL, prev, current_ir_graph);
262 for (i = get_irn_deps(tgt) - 1; i >= 0; --i) {
263 ir_node *prev = get_irn_dep(tgt, i);
265 edges_notify_edge_kind(tgt, i, NULL, prev, EDGE_KIND_DEP, current_ir_graph);
273 * Verify the edge list of a node, ie. ensure it's a loop:
274 * head -> e_1 -> ... -> e_n -> head
276 static INLINE void vrfy_list_head(ir_node *irn, ir_edge_kind_t kind) {
279 pset *lh_set = pset_new_ptr(16);
280 const struct list_head *head = _get_irn_outs_head(irn, kind);
281 const struct list_head *pos;
283 list_for_each(pos, head) {
284 if (pset_find_ptr(lh_set, pos)) {
285 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
287 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
288 fprintf(stderr, "- at list entry %d\n", num);
290 fprintf(stderr, "- edge(%ld) is invalid\n", edge_get_id(edge));
292 ir_fprintf(stderr, "- edge(%ld) %+F(%d)\n", edge_get_id(edge), edge->src, edge->pos);
297 pset_insert_ptr(lh_set, pos);
305 /* The edge from (src, pos) -> old_tgt is redirected to tgt */
306 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
307 ir_node *old_tgt, ir_edge_kind_t kind,
310 const char *msg = "";
311 irg_edge_info_t *info;
316 assert(edges_activated_kind(irg, kind));
319 * Only do something, if the old and new target differ.
324 info = _get_irg_edge_info(irg, kind);
325 edges = &info->edges;
327 /* Initialize the edge template to search in the set. */
332 * If the target is NULL, the edge shall be deleted.
335 /* search the edge in the set. */
336 edge = ir_edgeset_find(edges, &templ);
338 /* mark the edge invalid if it was found */
341 list_del(&edge->list);
347 #endif /* DEBUG_libfirm */
348 edge_change_cnt(old_tgt, kind, -1);
351 /* If the edge was not found issue a warning on the debug stream */
353 msg = "edge to delete not found!\n";
358 * The target is not NULL and the old target differs
359 * from the new target, the edge shall be moved (if the
360 * old target was != NULL) or added (if the old target was
364 struct list_head *head = _get_irn_outs_head(tgt, kind);
366 assert(head->next && head->prev &&
367 "target list head must have been initialized");
369 /* If the old target is not null, the edge is moved. */
371 edge = ir_edgeset_find(edges, &templ);
372 assert(edge && "edge to redirect not found!");
373 assert(! edge->invalid && "Invalid edge encountered");
377 list_move(&edge->list, head);
378 edge_change_cnt(old_tgt, kind, -1);
381 /* The old target was null, thus, the edge is newly created. */
385 = obstack_alloc(&info->edges_obst, EDGE_SIZE);
386 memset(edge, 0, EDGE_SIZE);
390 DEBUG_ONLY(edge->src_nr = get_irn_node_nr(src));
392 new_edge = ir_edgeset_insert(edges, edge);
393 if(new_edge != edge) {
394 obstack_free(&info->edges_obst, edge);
397 assert(! edge->invalid && "Freshly inserted edge is invalid?!?");
398 assert(edge->list.next == NULL && edge->list.prev == NULL &&
399 "New edge must not have list head initialized");
402 list_add(&edge->list, head);
404 edge->edge_nr = ++last_edge_num;
405 #endif /* DEBUG_libfirm */
408 edge_change_cnt(tgt, kind, +1);
411 #ifndef DEBUG_libfirm
412 /* verify list heads */
415 vrfy_list_head(tgt, kind);
417 vrfy_list_head(old_tgt, kind);
421 DBG((dbg, LEVEL_5, "announce out edge: %+F %d-> %+F(%+F): %s\n", src, pos, tgt, old_tgt, msg));
424 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt, ir_graph *irg)
426 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
427 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
430 if (edges_activated_kind(irg, EDGE_KIND_BLOCK) && is_Block(src)) {
432 /* a MacroBlock edge: ignore it here */
434 ir_node *bl_old = old_tgt ? get_nodes_block(skip_Proj(old_tgt)) : NULL;
435 ir_node *bl_tgt = NULL;
438 bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(skip_Proj(tgt));
440 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
446 * Delete all in edges of a given kind from the node old.
448 * @param old the node
449 * @param kind the kind of edges to remove
450 * @param irg the irg of the old node
452 static void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind, ir_graph *irg)
456 if (!edges_activated_kind(irg, kind))
459 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
461 foreach_tgt(old, i, n, kind) {
462 ir_node *old_tgt = get_n(old, i, kind);
463 edges_notify_edge_kind(old, i, NULL, old_tgt, kind, irg);
467 struct build_walker {
471 unsigned problem_found;
475 * Post-Walker: notify all edges
477 static void build_edges_walker(ir_node *irn, void *data) {
478 struct build_walker *w = data;
480 ir_edge_kind_t kind = w->kind;
481 ir_graph *irg = w->irg;
482 get_edge_src_n_func_t *get_n;
484 get_n = edge_kind_info[kind].get_n;
485 foreach_tgt(irn, i, n, kind) {
486 ir_node *pred = get_n(irn, i, kind);
487 edges_notify_edge_kind(irn, i, pred, NULL, kind, irg);
492 * Pre-Walker: initializes the list-heads and set the out-count
495 static void init_lh_walker(ir_node *irn, void *data) {
496 struct build_walker *w = data;
497 ir_edge_kind_t kind = w->kind;
498 list_head *head = _get_irn_outs_head(irn, kind);
499 INIT_LIST_HEAD(head);
500 _get_irn_edge_info(irn, kind)->out_count = 0;
504 * Pre-Walker: initializes the list-heads and set the out-count
507 * Additionally touches DEP nodes, as they might be DEAD.
508 * THIS IS UGLY, but I don't find a better way until we
510 * a) ensure that dead nodes are not used as input
511 * b) it might be sufficient to add those stupid NO_REG nodes
514 static void init_lh_walker_dep(ir_node *irn, void *data) {
515 struct build_walker *w = data;
516 ir_edge_kind_t kind = w->kind;
517 list_head *head = _get_irn_outs_head(irn, kind);
520 INIT_LIST_HEAD(head);
521 _get_irn_edge_info(irn, kind)->out_count = 0;
523 for (i = get_irn_deps(irn) - 1; i >= 0; --i) {
524 ir_node *dep = get_irn_dep(irn, i);
526 head = _get_irn_outs_head(dep, kind);
528 INIT_LIST_HEAD(head);
529 _get_irn_edge_info(dep, kind)->out_count = 0;
533 typedef struct visitor_info_t {
534 irg_walk_func *visit;
539 * Visitor: initializes the list-heads and set the out-count
540 * of all nodes to 0 of nodes that are not seen so far.
542 static void visitor(ir_node *irn, void *data) {
543 visitor_info_t *info = data;
545 if (!irn_visited(irn)) {
546 mark_irn_visited(irn);
547 info->visit(irn, info->data);
552 * Build the initial edge set.
553 * Beware, this is not a simple task because it suffers from two
555 * - the anchor set allows access to Nodes that may not be reachable from
557 * - the identities add nodes to the "root set" that are not yet reachable
558 * from End. However, after some transformations, the CSE may revival these
561 * These problems can be fixed using different strategies:
562 * - Add an age flag to every node. Whenever the edge of a node is older
563 * then the current edge, invalidate the edges of this node.
564 * While this would help for revivaled nodes, it increases memory and runtime.
565 * - Delete the identities set.
566 * Solves the revival problem, but may increase the memory consumption, as
567 * nodes cannot be revivaled at all.
568 * - Manually iterate over the identities root set. This did not consume more memory
569 * but increase the computation time because the |identities| >= |V|
571 * Currently, we use the last option.
573 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
575 struct build_walker w;
576 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
577 visitor_info_t visit;
585 edges_init_graph_kind(irg, kind);
586 if (kind == EDGE_KIND_DEP) {
587 irg_walk_anchors(irg, init_lh_walker_dep, NULL, &w);
588 /* Argh: Dep nodes might be dead, so we MUST visit identities first */
589 visit.visit = init_lh_walker_dep;
590 visit_all_identities(irg, visitor, &visit);
591 irg_walk_anchors(irg, NULL, build_edges_walker, &w);
593 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
594 visit.visit = init_lh_walker;
595 visit_all_identities(irg, visitor, &visit);
599 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
601 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
604 if (info->allocated) {
605 obstack_free(&info->edges_obst, NULL);
606 ir_edgeset_destroy(&info->edges);
611 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
613 return _edges_activated_kind(irg, kind);
618 * Reroute all use-edges from a node to another.
619 * @param from The node whose use-edges shall be withdrawn.
620 * @param to The node to which all the use-edges of @p from shall be
622 * @param irg The graph.
624 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind, ir_graph *irg)
626 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
628 if(set_edge && edges_activated_kind(irg, kind)) {
629 struct list_head *head = _get_irn_outs_head(from, kind);
631 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
633 while (head != head->next) {
634 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
635 assert(edge->pos >= -1);
636 set_edge(edge->src, edge->pos, to);
641 static void verify_set_presence(ir_node *irn, void *data)
643 struct build_walker *w = data;
644 ir_edgeset_t *edges = &_get_irg_edge_info(w->irg, w->kind)->edges;
647 foreach_tgt(irn, i, n, w->kind) {
653 e = ir_edgeset_find(edges, &templ);
657 w->problem_found = 1;
659 ir_fprintf(stderr, "Edge Verifier: edge %+F,%d -> %+F (kind: \"%s\") is missing\n",
660 irn, i, get_n(irn, i, w->kind), get_kind_str(w->kind));
666 static void verify_list_presence(ir_node *irn, void *data)
668 struct build_walker *w = data;
671 bitset_set(w->reachable, get_irn_idx(irn));
673 /* check list heads */
674 vrfy_list_head(irn, w->kind);
676 foreach_out_edge_kind(irn, e, w->kind) {
679 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
680 w->problem_found = 1;
682 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F -> %+F recorded at src position %d, but src has arity %d\n",
683 edge_get_id(e), e->src, irn, e->pos, get_irn_arity(e->src));
688 tgt = get_n(e->src, e->pos, w->kind);
691 w->problem_found = 1;
693 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d (kind \"%s\") is no out edge of %+F but of %+F\n",
694 edge_get_id(e), e->src, e->pos, get_kind_str(w->kind), irn, tgt);
700 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
702 struct build_walker w;
703 ir_edgeset_t *edges = &_get_irg_edge_info(irg, kind)->edges;
705 ir_edgeset_iterator_t iter;
709 w.reachable = bitset_alloca(get_irg_last_idx(irg));
712 /* Clear the present bit in all edges available. */
713 foreach_ir_edgeset(edges, e, iter) {
717 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
720 * Dump all edges which are not invalid and not present.
721 * These edges are superfluous and their presence in the
724 foreach_ir_edgeset(edges, e, iter) {
725 if (! e->invalid && ! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
727 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d is superfluous\n", edge_get_id(e), e->src, e->pos);
731 return w.problem_found;
734 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
737 * Clear link field of all nodes.
739 static void clear_links(ir_node *irn, void *env) {
740 struct build_walker *w = env;
743 if (IGNORE_NODE(irn)) {
744 set_irn_link(irn, NULL);
748 bs = bitset_malloc(get_irg_last_idx(w->irg));
749 set_irn_link(irn, bs);
753 * Increases count (stored in link field) for all operands of a node.
755 static void count_user(ir_node *irn, void *env) {
761 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
762 ir_node *op = get_irn_n(irn, i);
763 bitset_t *bs = get_irn_link(op);
766 bitset_set(bs, get_irn_idx(irn));
771 * Verifies if collected count, number of edges in list and stored edge count are in sync.
773 static void verify_edge_counter(ir_node *irn, void *env) {
774 struct build_walker *w = env;
780 const struct list_head *head;
781 const struct list_head *pos;
783 if (IGNORE_NODE(irn))
786 bs = get_irn_link(irn);
789 edge_cnt = _get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
790 head = _get_irn_outs_head(irn, EDGE_KIND_NORMAL);
792 /* We can iterate safely here, list heads have already been verified. */
793 list_for_each(pos, head) {
797 /* check all nodes that reference us and count edges that point number
798 * of ins that actually point to us */
800 bitset_foreach(bs, idx) {
802 ir_node *src = get_idx_irn(w->irg, idx);
804 arity = get_irn_arity(src);
805 for (i = 0; i < arity; ++i) {
806 ir_node *in = get_irn_n(src, i);
812 if (edge_cnt != list_cnt) {
813 w->problem_found = 1;
814 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
815 edge_cnt, list_cnt, irn);
818 if (ref_cnt != list_cnt) {
819 w->problem_found = 1;
820 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but the list contains %d edge(s)\n",
821 irn, ref_cnt, list_cnt);
823 /* Matze: buggy if a node has multiple ins pointing at irn */
825 list_for_each(pos, head) {
826 ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
827 bitset_flip(bs, get_irn_idx(edge->src));
830 if (ref_cnt < list_cnt)
831 fprintf(stderr," following nodes are recorded in list, but not as user:\n");
833 fprintf(stderr," following nodes are user, but not recorded in list:\n");
836 bitset_foreach(bs, idx) {
837 ir_node *src = get_idx_irn(w->irg, idx);
838 ir_fprintf(stderr, " %+F", src);
840 fprintf(stderr, "\n");
848 * Verifies the out edges of an irg.
850 int edges_verify(ir_graph *irg) {
851 struct build_walker w;
852 int problem_found = 0;
854 /* verify normal edges only */
855 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
858 w.kind = EDGE_KIND_NORMAL;
862 irg_walk_anchors(irg, clear_links, count_user, &w);
863 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
865 return problem_found ? 1 : w.problem_found;
868 void init_edges(void) {
869 FIRM_DBG_REGISTER(dbg, DBG_EDGES);
870 /* firm_dbg_set_mask(dbg, -1); */
873 void edges_init_dbg(int do_dbg) {
877 void edges_activate(ir_graph *irg) {
878 edges_activate_kind(irg, EDGE_KIND_NORMAL);
879 edges_activate_kind(irg, EDGE_KIND_BLOCK);
880 if (get_irg_phase_state(irg) == phase_backend)
881 edges_activate_kind(irg, EDGE_KIND_DEP);
884 void edges_deactivate(ir_graph *irg) {
885 if (get_irg_phase_state(irg) == phase_backend)
886 edges_deactivate_kind(irg, EDGE_KIND_DEP);
887 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
888 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
891 int edges_assure(ir_graph *irg) {
892 int activated = edges_activated(irg);
900 int edges_assure_kind(ir_graph *irg, ir_edge_kind_t kind) {
901 int activated = edges_activated_kind(irg, kind);
904 edges_activate_kind(irg, kind);
909 void edges_node_deleted(ir_node *irn, ir_graph *irg) {
910 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL, irg);
911 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK, irg);
915 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind) {
916 return _get_irn_out_edge_first_kind(irn, kind);
919 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last) {
920 return _get_irn_out_edge_next(irn, last);
923 ir_node *(get_edge_src_irn)(const ir_edge_t *edge) {
924 return _get_edge_src_irn(edge);
927 int (get_edge_src_pos)(const ir_edge_t *edge) {
928 return _get_edge_src_pos(edge);
931 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind) {
932 return _get_irn_n_edges_kind(irn, kind);
935 void dump_all_out_edges(ir_node *irn) {
937 for (i = 0; i < EDGE_KIND_LAST; ++i) {
938 const ir_edge_t *edge;
940 printf("kind \"%s\"\n", get_kind_str(i));
941 foreach_out_edge_kind(irn, edge, i) {
942 ir_printf("\t%+F(%d)\n", edge->src, edge->pos);
947 static void irg_block_edges_walk2(ir_node *bl,
948 irg_walk_func *pre, irg_walk_func *post,
950 const ir_edge_t *edge, *next;
952 if (!Block_block_visited(bl)) {
953 mark_Block_block_visited(bl);
958 foreach_out_edge_kind_safe(bl, edge, next, EDGE_KIND_BLOCK) {
959 /* find the corresponding successor block. */
960 ir_node *pred = get_edge_src_irn(edge);
961 irg_block_edges_walk2(pred, pre, post, env);
969 void irg_block_edges_walk(ir_node *node,
970 irg_walk_func *pre, irg_walk_func *post,
973 assert(edges_activated(current_ir_graph));
974 assert(is_Block(node));
976 ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);
978 inc_irg_block_visited(current_ir_graph);
979 irg_block_edges_walk2(node, pre, post, env);
981 ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_VISITED);