3 * File name: ir/ir/iredges.c
4 * Purpose: Always available outs.
5 * Author: Sebastian Hack
6 * Modified by: Michael Beck, Andreas Schoesser
9 * Copyright: (c) 1998-2006 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
14 * Always available outs.
15 * @author Sebastian Hack
32 #include "iredgekinds.h"
33 #include "iredges_t.h"
43 * A function that allows for setting an edge.
44 * This abstraction is necessary since different edge kind have
45 * different methods of setting edges.
47 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
49 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
51 typedef int (get_edge_src_first_func_t)(const ir_node *src);
53 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
56 * Additional data for an edge kind.
60 set_edge_func_t *set_edge;
61 get_edge_src_first_func_t *get_first;
62 get_edge_src_arity_func_t *get_arity;
63 get_edge_src_n_func_t *get_n;
64 } ir_edge_kind_info_t;
66 static int get_zero(const ir_node *irn)
71 static int get_irn_first(const ir_node *irn)
73 return 0 - !is_Block(irn);
76 static ir_node *get_block_n(const ir_node *irn, int pos)
78 return is_Block(irn) ? get_Block_cfgpred_block((ir_node *) irn, pos) : 0;
81 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
82 { "normal" , set_irn_n, get_irn_first, get_irn_arity, get_irn_n },
83 { "block succs", NULL, get_zero, get_irn_arity, get_block_n },
84 { "dependency", set_irn_dep, get_zero, get_irn_deps, get_irn_dep }
87 #define foreach_tgt(irn, i, n, kind) for(i = edge_kind_info[kind].get_first(irn), n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
88 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
89 #define get_kind_str(kind) (edge_kind_info[kind].name)
91 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
94 * This flag is set to 1, if the edges get initialized for an irg.
95 * Then register additional data is forbidden.
97 static int edges_used = 0;
100 * Summed size of all users private data
103 static int edges_private_size = 0;
104 #define EDGE_SIZE (sizeof(ir_edge_t) + edges_private_size)
107 * If set to 1, the list heads are checked every time an edge is changed.
109 static int edges_dbg = 0;
113 * Announce to reserve extra space for each edge to be allocated.
114 * @Param n: Size of the space to reserve
115 * @ Returns: Offset at which the private data will begin
116 * Several users can reserve extra space for private usage.
117 * Each user has to remember his given offset and the size of his private data.
118 * To be called before FIRM is initialized.
121 int edges_register_private_data(size_t n)
123 int res = edges_private_size;
125 assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
127 edges_private_size += n;
132 * Reset the user's private data at offset 'offset'
133 * The user has to remember his offset and the size of his data!
134 * Caution: Using wrong values here can destroy other users private data!
137 void edges_reset_private_data(ir_graph *irg, int offset, size_t size)
139 irg_edge_info_t *info = _get_irg_edge_info(irg, EDGE_KIND_NORMAL);
142 foreach_set(info->edges, edge)
144 memset(edge + sizeof(*edge) + offset, 0, size);
148 #define TIMES37(x) (((x) << 5) + ((x) << 2) + (x))
150 #define get_irn_out_list_head(irn) (&get_irn_out_info(irn)->outs)
152 static int edge_cmp(const void *p1, const void *p2, size_t len)
154 const ir_edge_t *e1 = p1;
155 const ir_edge_t *e2 = p2;
156 int res = e1->src == e2->src && e1->pos == e2->pos;
161 #define edge_hash(edge) (TIMES37((edge)->pos) + HASH_PTR((edge)->src))
164 * Initialize the out information for a graph.
165 * @note Dead node elimination can call this on an already initialized graph.
167 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
169 if(edges_activated_kind(irg, kind)) {
170 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
175 amount = set_count(info->edges);
176 del_set(info->edges);
178 info->edges = new_set(edge_cmp, amount);
183 * Get the edge object of an outgoing edge at a node.
184 * @param irg The graph, the node is in.
185 * @param src The node at which the edge originates.
186 * @param pos The position of the edge.
187 * @return The corresponding edge object or NULL,
188 * if no such edge exists.
190 const ir_edge_t *get_irn_edge_kind(ir_graph *irg, const ir_node *src, int pos, ir_edge_kind_t kind)
192 if (edges_activated_kind(irg, kind)) {
193 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
196 key.src = (ir_node *)src;
198 return set_find(info->edges, &key, EDGE_SIZE, edge_hash(&key));
205 * Get the edge object of an outgoing edge at a node.
206 * Looks for an edge for all kinds.
209 const ir_edge_t *get_irn_edge(ir_graph *irg, const ir_node *src, int pos)
211 const ir_edge_t *edge;
212 if((edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_NORMAL)) == NULL)
213 edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_BLOCK);
218 * Change the out count
220 static INLINE void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs) {
221 irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
222 info->out_count += ofs;
226 * Verify the edge list of a node, ie. ensure it's a loop:
227 * head -> e_1 -> ... -> e_n -> head
229 static INLINE void vrfy_list_head(ir_node *irn, ir_edge_kind_t kind) {
232 pset *lh_set = pset_new_ptr(16);
233 const struct list_head *head = _get_irn_outs_head(irn, kind);
234 const struct list_head *pos;
236 list_for_each(pos, head) {
237 if (pset_find_ptr(lh_set, pos)) {
238 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
240 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
241 fprintf(stderr, "- at list entry %d\n", num);
243 fprintf(stderr, "- edge is invalid\n");
245 ir_fprintf(stderr, "- edge %+F(%d)\n", edge->src, edge->pos);
250 pset_insert_ptr(lh_set, pos);
258 /* The edge from (src, pos) -> old_tgt is redirected to tgt */
259 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt, ir_edge_kind_t kind, ir_graph *irg)
261 const char *msg = "";
263 assert(edges_activated_kind(irg, kind));
266 * Only do something, if the old and new target differ.
269 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
270 set *edges = info->edges;
271 ir_edge_t *templ = alloca(EDGE_SIZE);
274 /* Initialize the edge template to search in the set. */
275 memset(templ, 0, EDGE_SIZE);
281 DEBUG_ONLY(templ->src_nr = get_irn_node_nr(src));
284 * If the target is NULL, the edge shall be deleted.
287 /* search the edge in the set. */
288 edge = set_find(edges, templ, EDGE_SIZE, edge_hash(templ));
290 /* mark the edge invalid if it was found */
293 list_del(&edge->list);
297 edge_change_cnt(old_tgt, kind, -1);
300 /* If the edge was not found issue a warning on the debug stream */
302 msg = "edge to delete not found!\n";
307 * The target is not NULL and the old target differs
308 * from the new target, the edge shall be moved (if the
309 * old target was != NULL) or added (if the old target was
313 struct list_head *head = _get_irn_outs_head(tgt, kind);
315 assert(head->next && head->prev &&
316 "target list head must have been initialized");
318 /* If the old target is not null, the edge is moved. */
320 edge = set_find(edges, templ, EDGE_SIZE, edge_hash(templ));
321 assert(edge && "edge to redirect not found!");
322 assert(! edge->invalid && "Invalid edge encountered");
326 list_move(&edge->list, head);
327 edge_change_cnt(old_tgt, kind, -1);
330 /* The old target was null, thus, the edge is newly created. */
332 edge = set_insert(edges, templ, EDGE_SIZE, edge_hash(templ));
334 assert(! edge->invalid && "Freshly inserted edge is invalid?!?");
335 assert(edge->list.next == NULL && edge->list.prev == NULL &&
336 "New edge must not have list head initialized");
339 list_add(&edge->list, head);
342 edge_change_cnt(tgt, kind, +1);
345 /* verify list heads */
348 vrfy_list_head(tgt, kind);
350 vrfy_list_head(old_tgt, kind);
354 /* If the target and the old target are equal, nothing is done. */
355 DBG((dbg, LEVEL_5, "announce out edge: %+F %d-> %+F(%+F): %s\n", src, pos, tgt, old_tgt, msg));
358 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt, ir_graph *irg)
360 if(edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
361 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
364 if (edges_activated_kind(irg, EDGE_KIND_BLOCK) && is_Block(src)) {
365 /* do not use get_nodes_block() here, it fails when running unpinned */
366 ir_node *bl_old = old_tgt ? get_irn_n(skip_Proj(old_tgt), -1) : NULL;
367 ir_node *bl_tgt = NULL;
370 bl_tgt = is_Bad(tgt) ? tgt : get_irn_n(skip_Proj(tgt), -1);
372 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
377 void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind, ir_graph *irg)
381 if(!edges_activated_kind(irg, kind))
384 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
386 foreach_tgt(old, i, n, kind) {
387 ir_node *old_tgt = get_n(old, i, kind);
388 edges_notify_edge_kind(old, i, NULL, old_tgt, kind, irg);
392 struct build_walker {
396 unsigned problem_found;
400 * Post-Walker: notify all edges
402 static void build_edges_walker(ir_node *irn, void *data) {
403 struct build_walker *w = data;
406 if (! edges_activated_kind(w->irg, w->kind))
409 foreach_tgt(irn, i, n, w->kind)
410 edges_notify_edge_kind(irn, i, get_n(irn, i, w->kind), NULL, w->kind, w->irg);
414 * Pre-Walker: initializes the list-heads and set the out-count
417 static void init_lh_walker(ir_node *irn, void *data) {
418 struct build_walker *w = data;
419 INIT_LIST_HEAD(_get_irn_outs_head(irn, w->kind));
420 _get_irn_edge_info(irn, w->kind)->out_count = 0;
424 * Visitor: initializes the list-heads and set the out-count
425 * of all nodes to 0 of nodes that are not seen so far.
427 static void visitor(ir_node *irn, void *data) {
428 if (irn_not_visited(irn)) {
429 mark_irn_visited(irn);
430 init_lh_walker(irn, data);
435 * Build the initial edge set.
436 * Beware, this is not a simple task because it suffers from two
438 * - the anchor set allows access to Nodes that may not be reachable from
440 * - the identities add nodes to the "root set" that are not yet reachable
441 * from End. However, after some transformations, the CSE may revival these
444 * These problems can be fixed using different strategies:
445 * - Add an age flag to every node. Whenever the edge of a node is older
446 * then the current edge, invalidate the edges of this node.
447 * While this would help for revivaled nodes, it increases memory and runtime.
448 * - Delete the identities set.
449 * Solves the revival problem, but may increase the memory consumption, as
450 * nodes cannot be revivaled at all.
451 * - Manually iterate over the identities root set. This did not consume more memory
452 * but increase the computation time because the |identities| >= |V|
454 * Currently, we use the last option.
456 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
458 struct build_walker w;
459 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
465 edges_init_graph_kind(irg, kind);
466 //irg_walk_graph(irg, init_lh_walker, build_edges_walker, &w);
467 inc_irg_visited(irg);
468 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
469 visit_all_identities(irg, visitor, &w);
472 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
474 irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
478 del_set(info->edges);
483 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
485 return _edges_activated_kind(irg, kind);
490 * Reroute all use-edges from a node to another.
491 * @param from The node whose use-edges shall be withdrawn.
492 * @param to The node to which all the use-edges of @p from shall be
494 * @param irg The graph.
496 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind, ir_graph *irg)
498 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
500 if(set_edge && edges_activated_kind(irg, kind)) {
501 struct list_head *head = _get_irn_outs_head(from, kind);
503 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
505 while(head != head->next) {
506 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
507 assert(edge->pos >= -1);
508 set_edge(edge->src, edge->pos, to);
513 static void verify_set_presence(ir_node *irn, void *data)
515 struct build_walker *w = data;
516 set *edges = _get_irg_edge_info(w->irg, w->kind)->edges;
519 foreach_tgt(irn, i, n, w->kind) {
525 e = set_find(edges, &templ, EDGE_SIZE, edge_hash(&templ));
529 w->problem_found = 1;
530 ir_fprintf(stderr, "Edge Verifier: edge %+F,%d (kind: \"%s\") is missing\n", irn, i, get_kind_str(w->kind));
535 static void verify_list_presence(ir_node *irn, void *data)
537 struct build_walker *w = data;
540 bitset_set(w->reachable, get_irn_idx(irn));
542 /* check list heads */
543 vrfy_list_head(irn, w->kind);
545 foreach_out_edge_kind(irn, e, w->kind) {
548 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
549 w->problem_found = 1;
550 ir_fprintf(stderr, "Edge Verifier: edge %+F -> %+F recorded at src position %d, but src has arity %d\n",
551 e->src, irn, e->pos, get_irn_arity(e->src));
555 tgt = get_n(e->src, e->pos, w->kind);
558 w->problem_found = 1;
559 ir_fprintf(stderr, "Edge Verifier: edge %+F,%d (kind \"%s\") is no out edge of %+F but of %+F\n",
560 e->src, e->pos, get_kind_str(w->kind), irn, tgt);
565 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
567 struct build_walker w;
568 set *edges = _get_irg_edge_info(irg, kind)->edges;
573 w.reachable = bitset_alloca(get_irg_last_idx(irg));
576 /* Clear the present bit in all edges available. */
577 for (e = set_first(edges); e; e = set_next(edges))
580 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
583 * Dump all edges which are not invalid and not present.
584 * These edges are superfluous and their presence in the
587 for (e = set_first(edges); e; e = set_next(edges)) {
588 if (! e->invalid && ! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
590 ir_fprintf(stderr, "Edge Verifier: edge %+F,%d is superfluous\n", e->src, e->pos);
594 return w.problem_found;
597 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
600 * Clear link field of all nodes.
602 static void clear_links(ir_node *irn, void *env) {
603 struct build_walker *w = env;
606 set_irn_link(irn, NULL);
608 if (IGNORE_NODE(irn))
611 bs = bitset_malloc(get_irg_last_idx(w->irg));
612 set_irn_link(irn, bs);
616 * Increases count (stored in link field) for all operands of a node.
618 static void count_user(ir_node *irn, void *env) {
622 first = get_irn_first(irn);
623 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
624 ir_node *op = get_irn_n(irn, i);
625 bitset_t *bs = get_irn_link(op);
628 bitset_set(bs, get_irn_idx(irn));
633 * Verifies if collected count, number of edges in list and stored edge count are in sync.
635 static void verify_edge_counter(ir_node *irn, void *env) {
636 struct build_walker *w = env;
641 const struct list_head *head;
642 const struct list_head *pos;
644 if (IGNORE_NODE(irn))
647 bs = get_irn_link(irn);
649 ref_cnt = bitset_popcnt(bs);
650 edge_cnt = _get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
651 head = _get_irn_outs_head(irn, EDGE_KIND_NORMAL);
653 /* We can iterate safely here, list heads have already been verified. */
654 list_for_each(pos, head) {
655 ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
656 if (! is_Bad(edge->src))
660 if (edge_cnt != list_cnt) {
661 w->problem_found = 1;
662 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
663 edge_cnt, list_cnt, irn);
666 if (ref_cnt != list_cnt) {
669 w->problem_found = 1;
670 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but %d edge(s) recorded in list\n",
671 irn, ref_cnt, list_cnt);
673 list_for_each(pos, head) {
674 ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
675 if (! is_Bad(edge->src))
676 bitset_flip(bs, get_irn_idx(edge->src));
679 if (ref_cnt < list_cnt)
680 fprintf(stderr," following nodes are recorded in list, but not as user:\n");
682 fprintf(stderr," following nodes are user, but not recorded in list:\n");
685 bitset_foreach(bs, idx) {
686 ir_node *src = get_idx_irn(w->irg, idx);
687 ir_fprintf(stderr, " %+F", src);
689 fprintf(stderr, "\n");
696 * Verifies the out edges of an irg.
698 int edges_verify(ir_graph *irg) {
699 struct build_walker w;
700 int problem_found = 0;
705 /* verify normal edges only */
706 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
709 w.kind = EDGE_KIND_NORMAL;
713 inc_irg_visited(irg);
714 irg_walk_anchors(irg, clear_links, count_user, &w);
715 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
717 return problem_found ? 1 : w.problem_found;
720 void init_edges(void)
722 FIRM_DBG_REGISTER(dbg, DBG_EDGES);
723 /* firm_dbg_set_mask(dbg, -1); */
726 void edges_init_dbg(int do_dbg) {
730 void edges_activate(ir_graph *irg)
732 edges_activate_kind(irg, EDGE_KIND_NORMAL);
733 edges_activate_kind(irg, EDGE_KIND_BLOCK);
736 void edges_deactivate(ir_graph *irg)
738 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
739 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
742 int edges_assure(ir_graph *irg)
744 int activated = edges_activated(irg);
752 void edges_node_deleted(ir_node *irn, ir_graph *irg)
754 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL, irg);
755 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK, irg);
759 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind)
761 return _get_irn_out_edge_first_kind(irn, kind);
764 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last)
766 return _get_irn_out_edge_next(irn, last);
769 ir_node *(get_edge_src_irn)(const ir_edge_t *edge)
771 return _get_edge_src_irn(edge);
774 int (get_edge_src_pos)(const ir_edge_t *edge)
776 return _get_edge_src_pos(edge);
779 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind)
781 return _get_irn_n_edges_kind(irn, kind);
784 void dump_all_out_edges(ir_node *irn)
787 for(i = 0; i < EDGE_KIND_LAST; ++i) {
788 const ir_edge_t *edge;
790 printf("kind \"%s\"\n", get_kind_str(i));
791 foreach_out_edge_kind(irn, edge, i) {
792 ir_printf("\t%+F(%d)\n", edge->src, edge->pos);