2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Cliff Click's Combined Analysis/Optimization
23 * @author Michael Beck
26 * Note that we use the terminology from Click's work here, which is different
27 * in some cases from Firm terminology. Especially, Click's type is a
28 * Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
36 #include "iroptimize.h"
43 #include "irgraph_t.h"
58 /* define this to check that all type translations are monotone */
59 #define VERIFY_MONOTONE
61 typedef struct node_t node_t;
62 typedef struct partition_t partition_t;
63 typedef struct opcode_key_t opcode_key_t;
64 typedef struct listmap_entry_t listmap_entry_t;
66 /** The type of the compute function. */
67 typedef void (*compute_func)(node_t *node);
73 ir_opcode code; /**< The Firm opcode. */
74 ir_mode *mode; /**< The mode of all nodes in the partition. */
76 long proj; /**< For Proj nodes, its proj number */
77 ir_entity *ent; /**< For Sel Nodes, its entity */
82 * An entry in the list_map.
84 struct listmap_entry_t {
85 void *id; /**< The id. */
86 node_t *list; /**< The associated list for this id. */
87 listmap_entry_t *next; /**< Link to the next entry in the map. */
90 /** We must map id's to lists. */
91 typedef struct listmap_t {
92 set *map; /**< Map id's to listmap_entry_t's */
93 listmap_entry_t *values; /**< List of all values in the map. */
97 * A lattice element. Because we handle constants and symbolic constants different, we
98 * have to use this union.
109 ir_node *node; /**< The IR-node itself. */
110 list_head node_list; /**< Double-linked list of entries. */
111 list_head cprop_list; /**< Double-linked partition.cprop list. */
112 partition_t *part; /**< points to the partition this node belongs to */
113 node_t *next; /**< Next node on local list (partition.touched, fallen). */
114 lattice_elem_t type; /**< The associated lattice element "type". */
115 int max_user_input; /**< Maximum input number of Def-Use edges. */
116 int next_edge; /**< Index of the next Def-Use edge to use. */
117 unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
118 unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
119 unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
123 * A partition containing congruent nodes.
126 list_head entries; /**< The head of partition node list. */
127 list_head cprop; /**< The head of partition.cprop list. */
128 list_head split_list; /**< Double-linked list of entries that must be processed by split_by(). */
129 partition_t *wl_next; /**< Next entry in the work list if any. */
130 partition_t *touched_next; /**< Points to the next partition in the touched set. */
131 partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
132 node_t *touched; /**< The partition.touched set of this partition. */
133 unsigned n_nodes; /**< Number of entries in this partition. */
134 unsigned n_touched; /**< Number of entries in the partition.touched. */
135 int max_arity; /**< Maximum arity of all entries. */
136 int max_user_inputs; /**< Maximum number of user inputs of all entries. */
137 unsigned on_worklist:1; /**< Set, if this partition is in the work list. */
138 unsigned on_touched:1; /**< Set, if this partition is on the touched set. */
139 unsigned on_cprop:1; /**< Set, if this partition is on the cprop list. */
141 partition_t *dbg_next; /**< Link all partitions for debugging */
142 unsigned nr; /**< A unique number for (what-)mapping, >0. */
146 typedef struct environment_t {
147 struct obstack obst; /**< obstack to allocate data structures. */
148 partition_t *worklist; /**< The work list. */
149 partition_t *cprop; /**< The constant propagation list. */
150 partition_t *touched; /**< the touched set. */
151 partition_t *initial; /**< The initial partition. */
152 set *opcode2id_map; /**< The opcodeMode->id map. */
153 pmap *type2id_map; /**< The type->id map. */
154 int end_idx; /**< -1 for local and 0 for global congruences. */
155 int lambda_input; /**< Captured argument for lambda_partition(). */
157 partition_t *dbg_list; /**< List of all partitions. */
161 /** Type of the what function. */
162 typedef void *(*what_func)(const node_t *node, environment_t *env);
164 #define get_irn_node(irn) ((node_t *)get_irn_link(irn))
165 #define set_irn_node(irn, node) set_irn_link(irn, node)
167 /* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
168 #undef tarval_unreachable
169 #define tarval_unreachable tarval_top
172 /** The debug module handle. */
173 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
175 /** Next partition number. */
176 DEBUG_ONLY(static unsigned part_nr = 0);
179 static INLINE lattice_elem_t get_partition_type(const partition_t *X);
182 * Dump partition to output.
184 static void dump_partition(const char *msg, const partition_t *part) {
187 lattice_elem_t type = get_partition_type(part);
189 DB((dbg, LEVEL_2, "%s part%u (%u, %+F) {\n ", msg, part->nr, part->n_nodes, type));
190 list_for_each_entry(node_t, node, &part->entries, node_list) {
191 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
194 DB((dbg, LEVEL_2, "\n}\n"));
198 * Dump all partitions.
200 static void dump_all_partitions(const environment_t *env) {
201 const partition_t *P;
203 DB((dbg, LEVEL_2, "All partitions\n===============\n"));
204 for (P = env->dbg_list; P != NULL; P = P->dbg_next)
205 dump_partition("", P);
209 #define dump_partition(msg, part)
210 #define dump_all_partitions(env)
213 #if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
215 * Verify that a type transition is monotone
217 static void verify_type(const lattice_elem_t old_type, const lattice_elem_t new_type) {
218 if (old_type.tv == new_type.tv) {
222 if (old_type.tv == tarval_top) {
223 /* from Top down-to is always allowed */
226 if (old_type.tv == tarval_reachable) {
227 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
229 if (new_type.tv == tarval_bottom || new_type.tv == tarval_reachable) {
233 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
236 #define verify_type(old_type, new_type)
240 * Compare two pointer values of a listmap.
242 static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
243 const listmap_entry_t *e1 = elt;
244 const listmap_entry_t *e2 = key;
247 return e1->id != e2->id;
248 } /* listmap_cmp_ptr */
251 * Initializes a listmap.
253 * @param map the listmap
255 static void listmap_init(listmap_t *map) {
256 map->map = new_set(listmap_cmp_ptr, 16);
261 * Terminates a listmap.
263 * @param map the listmap
265 static void listmap_term(listmap_t *map) {
270 * Return the associated listmap entry for a given id.
272 * @param map the listmap
273 * @param id the id to search for
275 * @return the asociated listmap entry for the given id
277 static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
278 listmap_entry_t key, *entry;
283 entry = set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
285 if (entry->list == NULL) {
286 /* a new entry, put into the list */
287 entry->next = map->values;
294 * Calculate the hash value for an opcode map entry.
296 * @param entry an opcode map entry
298 * @return a hash value for the given opcode map entry
300 static unsigned opcode_hash(const opcode_key_t *entry) {
301 return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent);
305 * Compare two entries in the opcode map.
307 static int cmp_opcode(const void *elt, const void *key, size_t size) {
308 const opcode_key_t *o1 = elt;
309 const opcode_key_t *o2 = key;
312 return o1->code != o2->code || o1->mode != o2->mode ||
313 o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent;
317 * Compare two Def-Use edges for input position.
319 static int cmp_def_use_edge(const void *a, const void *b) {
320 const ir_def_use_edge *ea = a;
321 const ir_def_use_edge *eb = b;
323 /* no overrun, because range is [-1, MAXINT] */
324 return ea->pos - eb->pos;
325 } /* cmp_def_use_edge */
328 * We need the Def-Use edges sorted.
330 static void sort_irn_outs(node_t *node) {
331 ir_node *irn = node->node;
332 int n_outs = get_irn_n_outs(irn);
335 qsort(&irn->out[1], n_outs, sizeof(irn->out[0]), cmp_def_use_edge);
337 node->max_user_input = irn->out[n_outs].pos;
338 } /* sort_irn_outs */
341 * Return the type of a node.
343 * @param irn an IR-node
345 * @return the associated type of this node
347 static INLINE lattice_elem_t get_node_type(const ir_node *irn) {
348 return get_irn_node(irn)->type;
349 } /* get_node_type */
352 * Return the tarval of a node.
354 * @param irn an IR-node
356 * @return the associated type of this node
358 static INLINE tarval *get_node_tarval(const ir_node *irn) {
359 lattice_elem_t type = get_node_type(irn);
361 if (is_tarval(type.tv))
363 return tarval_bottom;
364 } /* get_node_type */
367 * Add a partition to the worklist.
369 static INLINE void add_to_worklist(partition_t *X, environment_t *env) {
370 assert(X->on_worklist == 0);
371 X->wl_next = env->worklist;
377 * Create a new empty partition.
379 * @param env the environment
381 * @return a newly allocated partition
383 static INLINE partition_t *new_partition(environment_t *env) {
384 partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
386 INIT_LIST_HEAD(&part->entries);
387 INIT_LIST_HEAD(&part->cprop);
388 INIT_LIST_HEAD(&part->split_list);
389 part->wl_next = NULL;
390 part->touched_next = NULL;
391 part->cprop_next = NULL;
392 part->touched = NULL;
396 part->max_user_inputs = 0;
397 part->on_worklist = 0;
398 part->on_touched = 0;
401 part->dbg_next = env->dbg_list;
402 env->dbg_list = part;
403 part->nr = part_nr++;
407 } /* new_partition */
410 * Get the first node from a partition.
412 static INLINE node_t *get_first_node(const partition_t *X) {
413 return list_entry(X->entries.next, node_t, node_list);
417 * Return the type of a partition (assuming partition is non-empty and
418 * all elements have the same type).
420 * @param X a partition
422 * @return the type of the first element of the partition
424 static INLINE lattice_elem_t get_partition_type(const partition_t *X) {
425 const node_t *first = get_first_node(X);
427 } /* get_partition_type */
430 * Creates a partition node for the given IR-node and place it
431 * into the given partition.
433 * @param irn an IR-node
434 * @param part a partition to place the node in
435 * @param env the environment
437 * @return the created node
439 static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env) {
440 /* create a partition node and place it in the partition */
441 node_t *node = obstack_alloc(&env->obst, sizeof(*node));
443 INIT_LIST_HEAD(&node->node_list);
444 INIT_LIST_HEAD(&node->cprop_list);
448 node->type.tv = tarval_top;
449 node->max_user_input = 0;
451 node->on_touched = 0;
454 set_irn_node(irn, node);
456 list_add_tail(&node->node_list, &part->entries);
460 } /* create_partition_node */
463 * Pre-Walker, init all Block-Phi lists.
465 static void init_block_phis(ir_node *irn, void *env) {
469 set_Block_phis(irn, NULL);
474 * Post-Walker, initialize all Nodes' type to U or top and place
475 * all nodes into the TOP partition.
477 static void create_initial_partitions(ir_node *irn, void *ctx) {
478 environment_t *env = ctx;
479 partition_t *part = env->initial;
483 node = create_partition_node(irn, part, env);
485 arity = get_irn_arity(irn);
486 if (arity > part->max_arity)
487 part->max_arity = arity;
488 if (node->max_user_input > part->max_user_inputs)
489 part->max_user_inputs = node->max_user_input;
492 add_Block_phi(get_nodes_block(irn), irn);
494 } /* create_initial_partitions */
497 * Add a partition to the touched set if not already there.
499 * @param part the partition
500 * @param env the environment
502 static INLINE void add_to_touched(partition_t *part, environment_t *env) {
503 if (part->on_touched == 0) {
504 part->touched_next = env->touched;
506 part->on_touched = 1;
508 } /* add_to_touched */
511 * Add a node to the entry.partition.touched set if not already there.
515 static INLINE void add_to_partition_touched(node_t *y) {
516 if (y->on_touched == 0) {
517 partition_t *part = y->part;
519 y->next = part->touched;
524 } /* add_to_partition_touched */
527 * Update the worklist: If Z is on worklist then add Z' to worklist.
528 * Else add the smaller of Z and Z' to worklist.
530 * @param Z the Z partition
531 * @param Z_prime the Z' partition, a previous part of Z
532 * @param env the environment
534 static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env) {
535 if (Z->on_worklist || Z_prime->n_nodes < Z->n_nodes) {
536 add_to_worklist(Z_prime, env);
538 add_to_worklist(Z, env);
540 } /* update_worklist */
543 * Split a partition by a local list.
545 * @param Z the Z partition to split
546 * @param g a (non-empty) node list
547 * @param env the environment
549 * @return a new partition containing the nodes of g
551 static partition_t *split(partition_t *Z, node_t *g, environment_t *env) {
552 partition_t *Z_prime;
555 int max_input, max_arity, arity;
557 dump_partition("Splitting ", Z);
561 /* Remove g from Z. */
562 for (node = g; node != NULL; node = node->next) {
563 list_del(&node->node_list);
566 assert(n < Z->n_nodes);
569 /* Move g to a new partition, Z
\92. */
570 Z_prime = new_partition(env);
571 max_arity = max_input = 0;
572 for (node = g; node != NULL; node = node->next) {
573 list_add(&node->node_list, &Z_prime->entries);
574 node->part = Z_prime;
575 arity = get_irn_arity(node->node);
576 if (arity > max_arity)
578 if (node->max_user_input > max_input)
579 max_input = node->max_user_input;
581 Z_prime->max_arity = max_arity;
582 Z_prime->max_user_inputs = max_input;
583 Z_prime->n_nodes = n;
585 update_worklist(Z, Z_prime, env);
587 dump_partition("Now ", Z);
588 dump_partition("Created new ", Z_prime);
593 * Returns non-zero if the i'th input of a Phi node is live.
595 * @param phi a Phi-node
596 * @param i an input number
598 * @return non-zero if the i'th input of the given Phi node is live
600 static int is_live_input(ir_node *phi, int i) {
602 ir_node *block = get_nodes_block(phi);
603 ir_node *pred = get_Block_cfgpred(block, i);
604 lattice_elem_t type = get_node_type(pred);
606 return type.tv != tarval_unreachable;
608 /* else it's the control input, always live */
610 } /* is_live_input */
613 * Return non-zero if a type is a constant.
615 static int is_constant_type(lattice_elem_t type) {
616 if (type.tv != tarval_bottom && type.tv != tarval_top)
619 } /* is_constant_type */
622 * Place a node on the cprop list.
625 * @param env the environment
627 static void add_node_to_cprop(node_t *y, environment_t *env) {
628 /* Add y to y.partition.cprop. */
629 if (y->on_cprop == 0) {
630 partition_t *Y = y->part;
632 list_add_tail(&y->cprop_list, &Y->cprop);
635 DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
637 /* place its partition on the cprop list */
638 if (Y->on_cprop == 0) {
639 Y->cprop_next = env->cprop;
644 if (get_irn_mode(y->node) == mode_T) {
645 /* mode_T nodes always produce tarval_bottom, so we must explicitly
646 add it's Proj's to get constant evaluation to work */
649 for (i = get_irn_n_outs(y->node) - 1; i >= 0; --i) {
650 node_t *proj = get_irn_node(get_irn_out(y->node, i));
652 add_node_to_cprop(proj, env);
656 if (is_Block(y->node)) {
657 /* Due to the way we handle Phi's, we must place all Phis of a block on the list
658 * if someone placed the block. The Block is only placed if the reachability
659 * changes, and this must be re-evaluated in compute_Phi(). */
661 for (phi = get_Block_phis(y->node); phi != NULL; phi = get_Phi_next(phi)) {
662 node_t *p = get_irn_node(phi);
663 add_node_to_cprop(p, env);
666 } /* add_node_to_cprop */
669 * Check whether a type is neither Top or a constant.
670 * Note: U is handled like Top here, R is a constant.
672 * @param type the type to check
674 static int type_is_neither_top_nor_const(const lattice_elem_t type) {
675 if (is_tarval(type.tv)) {
676 if (type.tv == tarval_top)
678 if (tarval_is_constant(type.tv))
688 * Split the partitions if caused by the first entry on the worklist.
690 * @param env the environment
692 static void cause_splits(environment_t *env) {
693 partition_t *X, *Y, *Z;
699 /* remove the first partition from the worklist */
701 env->worklist = X->wl_next;
704 dump_partition("Cause_split: ", X);
705 end_idx = env->end_idx;
706 for (i = -1; i <= X->max_user_inputs; ++i) {
707 /* empty the touched set: already done, just clear the list */
710 list_for_each_entry(node_t, x, &X->entries, node_list) {
716 num_edges = get_irn_n_outs(x->node);
718 while (x->next_edge <= num_edges) {
719 ir_def_use_edge *edge = &x->node->out[x->next_edge];
721 /* check if we have necessary edges */
729 /* ignore the "control input" for non-pinned nodes
730 if we are running in GCSE mode */
731 if (i < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
734 y = get_irn_node(succ);
735 if (is_constant_type(y->type)) {
736 code = get_irn_opcode(succ);
737 if (code == iro_Sub || code == iro_Cmp)
738 add_node_to_cprop(y, env);
741 /* Partitions of constants should not be split simply because their Nodes have unequal
742 functions or incongruent inputs. */
743 if (type_is_neither_top_nor_const(y->type) &&
744 (! is_Phi(y->node) || is_live_input(y->node, i))) {
746 add_to_touched(Y, env);
747 add_to_partition_touched(y);
752 for (Z = env->touched; Z != NULL; Z = Z->touched_next) {
753 /* remove it from the touched set */
756 if (Z->n_nodes != Z->n_touched) {
757 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
758 split(Z, Z->touched, env);
760 /* Empty local Z.touched. */
761 for (e = Z->touched; e != NULL; e = e->next) {
771 * Implements split_by_what(): Split a partition by characteristics given
772 * by the what function.
774 * @param X the partition to split
775 * @param What a function returning an Id for every node of the partition X
776 * @param P a list head to store the result partitions
777 * @param env the environment
781 static list_head *split_by_what(partition_t *X, what_func What,
782 list_head *P, environment_t *env) {
785 listmap_entry_t *iter;
788 /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
790 list_for_each_entry(node_t, x, &X->entries, node_list) {
791 void *id = What(x, env);
792 listmap_entry_t *entry;
795 /* input not allowed, ignore */
798 /* Add x to map[What(x)]. */
799 entry = listmap_find(&map, id);
800 x->next = entry->list;
803 /* Let P be a set of Partitions. */
805 /* for all sets S except one in the range of map do */
806 for (iter = map.values; iter != NULL; iter = iter->next) {
807 if (iter->next == NULL) {
808 /* this is the last entry, ignore */
813 /* Add SPLIT( X, S ) to P. */
814 DB((dbg, LEVEL_2, "Split part%d by what\n", X->nr));
815 R = split(X, S, env);
816 list_add(&R->split_list, P);
819 list_add(&X->split_list, P);
823 } /* split_by_what */
825 /** lambda n.(n.type) */
826 static void *lambda_type(const node_t *node, environment_t *env) {
828 return node->type.tv;
831 /** lambda n.(n.opcode) */
832 static void *lambda_opcode(const node_t *node, environment_t *env) {
833 opcode_key_t key, *entry;
834 ir_node *irn = node->node;
836 key.code = get_irn_opcode(irn);
837 key.mode = get_irn_mode(irn);
841 switch (get_irn_opcode(irn)) {
843 key.u.proj = get_Proj_proj(irn);
846 key.u.ent = get_Sel_entity(irn);
852 entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
854 } /* lambda_opcode */
856 /** lambda n.(n[i].partition) */
857 static void *lambda_partition(const node_t *node, environment_t *env) {
858 ir_node *skipped = skip_Proj(node->node);
861 int i = env->lambda_input;
863 if (i >= get_irn_arity(node->node)) {
864 /* we are outside the allowed range */
868 /* ignore the "control input" for non-pinned nodes
869 if we are running in GCSE mode */
870 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
873 pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
874 p = get_irn_node(pred);
877 } /* lambda_partition */
880 * Checks whether a type is a constant.
882 static int is_type_constant(lattice_elem_t type) {
883 if (is_tarval(type.tv))
884 return tarval_is_constant(type.tv);
885 /* else it is a symconst */
890 * Implements split_by().
892 * @param X the partition to split
893 * @param env the environment
895 static void split_by(partition_t *X, environment_t *env) {
901 DB((dbg, LEVEL_2, "WHAT = lambda n.(n.type) on part%d\n", X->nr));
902 P = split_by_what(X, lambda_type, P, env);
904 partition_t *Y = list_entry(P->next, partition_t, split_list);
906 list_del(&Y->split_list);
907 if (Y->n_nodes > 1) {
908 lattice_elem_t type = get_partition_type(Y);
910 /* we do not want split the TOP or constant partitions */
911 if (type.tv != tarval_top && !is_type_constant(type)) {
916 DB((dbg, LEVEL_2, "WHAT = lambda n.(n.opcode) on part%d\n", Y->nr));
917 Q = split_by_what(Y, lambda_opcode, Q, env);
921 partition_t *Z = list_entry(Q->next, partition_t, split_list);
922 int max_arity = Z->max_arity;
923 list_head *R = &hR, *S = &hS, *T;
925 list_del(&Z->split_list);
927 if (Z->n_nodes > 1) {
932 * BEWARE: during splitting by input 2 for instance we might
933 * create new partitions which are different by input 1, so collect
934 * them and split further.
936 list_add(&Z->split_list, R);
937 for (input = max_arity - 1; input >= -1; --input) {
939 partition_t *Z_prime = list_entry(R->next, partition_t, split_list);
941 list_del(&Z_prime->split_list);
942 if (Z_prime->n_nodes > 1) {
943 env->lambda_input = input;
944 DB((dbg, LEVEL_2, "WHAT = lambda n.(n[%d].partition) on part%d\n", input, Z_prime->nr));
945 S = split_by_what(Z_prime, lambda_partition, S, env);
947 list_add(&Z_prime->split_list, S);
949 } while (!list_empty(R));
955 } while (!list_empty(Q));
958 } while (!list_empty(P));
962 * (Re-)compute the type for a given node.
964 * @param node the node
966 static void default_compute(node_t *node) {
968 ir_node *irn = node->node;
969 node_t *block = get_irn_node(get_nodes_block(irn));
971 if (block->type.tv == tarval_unreachable) {
972 node->type.tv = tarval_top;
976 /* if any of the data inputs have type top, the result is type top */
977 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
978 ir_node *pred = get_irn_n(irn, i);
979 node_t *p = get_irn_node(pred);
981 if (p->type.tv == tarval_top) {
982 node->type.tv = tarval_top;
987 if (get_irn_mode(node->node) == mode_X)
988 node->type.tv = tarval_reachable;
990 node->type.tv = computed_value(irn);
991 } /* default_compute */
994 * (Re-)compute the type for a Block node.
996 * @param node the node
998 static void compute_Block(node_t *node) {
1000 ir_node *block = node->node;
1002 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1003 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1005 if (pred->type.tv == tarval_reachable) {
1006 /* A block is reachable, if at least of predecessor is reachable. */
1007 node->type.tv = tarval_reachable;
1011 node->type.tv = tarval_top;
1012 } /* compute_Block */
1015 * (Re-)compute the type for a Bad node.
1017 * @param node the node
1019 static void compute_Bad(node_t *node) {
1020 /* Bad nodes ALWAYS compute Top */
1021 node->type.tv = tarval_top;
1025 * (Re-)compute the type for an Unknown node.
1027 * @param node the node
1029 static void compute_Unknown(node_t *node) {
1030 /* While Unknown nodes compute Top, but this is dangerous:
1031 * a if (unknown) would lead to BOTH control flows unreachable.
1032 * While this is correct in the given semantics, it would destroy the Firm
1034 * For now, we compute bottom here.
1036 node->type.tv = tarval_bottom;
1037 } /* compute_Unknown */
1040 * (Re-)compute the type for a Jmp node.
1042 * @param node the node
1044 static void compute_Jmp(node_t *node) {
1045 node_t *block = get_irn_node(get_nodes_block(node->node));
1047 node->type = block->type;
1051 * (Re-)compute the type for the End node.
1053 * @param node the node
1055 static void compute_End(node_t *node) {
1056 /* the End node is NOT dead of course */
1057 node->type.tv = tarval_reachable;
1061 * (Re-)compute the type for a SymConst node.
1063 * @param node the node
1065 static void compute_SymConst(node_t *node) {
1066 ir_node *irn = node->node;
1067 node_t *block = get_irn_node(get_nodes_block(irn));
1069 if (block->type.tv == tarval_unreachable) {
1070 node->type.tv = tarval_top;
1073 switch (get_SymConst_kind(irn)) {
1074 case symconst_addr_ent:
1075 /* case symconst_addr_name: cannot handle this yet */
1076 node->type.sym = get_SymConst_symbol(irn);
1079 node->type.tv = computed_value(irn);
1081 } /* compute_SymConst */
1084 * (Re-)compute the type for a Phi node.
1086 * @param node the node
1088 static void compute_Phi(node_t *node) {
1090 ir_node *phi = node->node;
1091 lattice_elem_t type;
1093 /* if a Phi is in a unreachable block, its type is TOP */
1094 node_t *block = get_irn_node(get_nodes_block(phi));
1096 if (block->type.tv == tarval_unreachable) {
1097 node->type.tv = tarval_top;
1101 /* Phi implements the Meet operation */
1102 type.tv = tarval_top;
1103 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
1104 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
1105 node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
1107 if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
1108 /* ignore TOP inputs: We must check here for unreachable blocks,
1109 because Firm constants live in the Start Block are NEVER Top.
1110 Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
1111 comes from a unreachable input. */
1114 if (pred->type.tv == tarval_bottom) {
1115 node->type.tv = tarval_bottom;
1117 } else if (type.tv == tarval_top) {
1118 /* first constant found */
1120 } else if (type.tv != pred->type.tv) {
1121 /* different constants or tarval_bottom */
1122 node->type.tv = tarval_bottom;
1125 /* else nothing, constants are the same */
1131 * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
1133 * @param node the node
1135 static void compute_Add(node_t *node) {
1136 ir_node *sub = node->node;
1137 node_t *l = get_irn_node(get_Add_left(sub));
1138 node_t *r = get_irn_node(get_Add_right(sub));
1139 lattice_elem_t a = l->type;
1140 lattice_elem_t b = r->type;
1141 node_t *block = get_irn_node(get_nodes_block(sub));
1144 if (block->type.tv == tarval_unreachable) {
1145 node->type.tv = tarval_top;
1149 if (a.tv == tarval_top || b.tv == tarval_top) {
1150 node->type.tv = tarval_top;
1151 } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
1152 node->type.tv = tarval_bottom;
1154 /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
1155 must call tarval_add() first to handle this case! */
1156 if (is_tarval(a.tv)) {
1157 if (is_tarval(b.tv)) {
1158 node->type.tv = tarval_add(a.tv, b.tv);
1161 mode = get_tarval_mode(a.tv);
1162 if (a.tv == get_mode_null(mode)) {
1166 } else if (is_tarval(b.tv)) {
1167 mode = get_tarval_mode(b.tv);
1168 if (b.tv == get_mode_null(mode)) {
1173 node->type.tv = tarval_bottom;
1178 * Returns true if a type is a constant.
1180 static int is_con(const lattice_elem_t type) {
1181 return is_entity(type.sym.entity_p) || tarval_is_constant(type.tv);
1185 * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
1187 * @param node the node
1189 static void compute_Sub(node_t *node) {
1190 ir_node *sub = node->node;
1191 node_t *l = get_irn_node(get_Sub_left(sub));
1192 node_t *r = get_irn_node(get_Sub_right(sub));
1193 lattice_elem_t a = l->type;
1194 lattice_elem_t b = r->type;
1195 node_t *block = get_irn_node(get_nodes_block(sub));
1197 if (block->type.tv == tarval_unreachable) {
1198 node->type.tv = tarval_top;
1201 if (a.tv == tarval_top || b.tv == tarval_top) {
1202 node->type.tv = tarval_top;
1203 } else if (is_con(a) && is_con(b)) {
1204 if (is_tarval(a.tv) && is_tarval(b.tv)) {
1205 node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
1206 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
1208 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
1211 node->type.tv = tarval_bottom;
1213 } else if (r->part == l->part &&
1214 (!mode_is_float(get_irn_mode(l->node)))) {
1215 if (node->type.tv == tarval_top) {
1217 * BEWARE: a - a is NOT always 0 for floating Point values, as
1218 * NaN op NaN = NaN, so we must check this here.
1220 ir_mode *mode = get_irn_mode(sub);
1221 node->type.tv = get_mode_null(mode);
1223 node->type.tv = tarval_bottom;
1226 node->type.tv = tarval_bottom;
1231 * (Re-)compute the type for Cmp.
1233 * @param node the node
1235 static void compute_Cmp(node_t *node) {
1236 ir_node *cmp = node->node;
1237 node_t *l = get_irn_node(get_Cmp_left(cmp));
1238 node_t *r = get_irn_node(get_Cmp_right(cmp));
1239 lattice_elem_t a = l->type;
1240 lattice_elem_t b = r->type;
1242 if (a.tv == tarval_top || b.tv == tarval_top) {
1243 node->type.tv = tarval_top;
1244 } else if (is_con(a) && is_con(b)) {
1245 /* both nodes are constants, we can propbably do something */
1246 node->type.tv = tarval_b_true;
1247 } else if (r->part == l->part) {
1248 /* both nodes congruent, we can probably do something */
1249 node->type.tv = tarval_b_true;
1251 node->type.tv = tarval_bottom;
1253 } /* compute_Proj_Cmp */
1256 * (Re-)compute the type for a Proj(Cmp).
1258 * @param node the node
1259 * @param cond the predecessor Cmp node
1261 static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
1262 ir_node *proj = node->node;
1263 node_t *l = get_irn_node(get_Cmp_left(cmp));
1264 node_t *r = get_irn_node(get_Cmp_right(cmp));
1265 lattice_elem_t a = l->type;
1266 lattice_elem_t b = r->type;
1267 pn_Cmp pnc = get_Proj_proj(proj);
1269 if (a.tv == tarval_top || b.tv == tarval_top) {
1270 node->type.tv = tarval_top;
1271 } else if (is_con(a) && is_con(b)) {
1272 default_compute(node);
1273 } else if (r->part == l->part &&
1274 (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
1275 if (node->type.tv == tarval_top) {
1277 * BEWARE: a == a is NOT always True for floating Point values, as
1278 * NaN != NaN is defined, so we must check this here.
1280 node->type.tv = new_tarval_from_long(pnc & pn_Cmp_Eq, mode_b);
1282 node->type.tv = tarval_bottom;
1285 node->type.tv = tarval_bottom;
1287 } /* compute_Proj_Cmp */
1290 * (Re-)compute the type for a Proj(Cond).
1292 * @param node the node
1293 * @param cond the predecessor Cond node
1295 static void compute_Proj_Cond(node_t *node, ir_node *cond) {
1296 ir_node *proj = node->node;
1297 long pnc = get_Proj_proj(proj);
1298 ir_node *sel = get_Cond_selector(cond);
1299 node_t *selector = get_irn_node(sel);
1301 if (get_irn_mode(sel) == mode_b) {
1303 if (pnc == pn_Cond_true) {
1304 if (selector->type.tv == tarval_b_false) {
1305 node->type.tv = tarval_unreachable;
1306 } else if (selector->type.tv == tarval_b_true) {
1307 node->type.tv = tarval_reachable;
1308 } else if (selector->type.tv == tarval_bottom) {
1309 node->type.tv = tarval_reachable;
1311 assert(selector->type.tv == tarval_top);
1312 node->type.tv = tarval_unreachable;
1315 assert(pnc == pn_Cond_false);
1317 if (selector->type.tv == tarval_b_false) {
1318 node->type.tv = tarval_reachable;
1319 } else if (selector->type.tv == tarval_b_true) {
1320 node->type.tv = tarval_unreachable;
1321 } else if (selector->type.tv == tarval_bottom) {
1322 node->type.tv = tarval_reachable;
1324 assert(selector->type.tv == tarval_top);
1325 node->type.tv = tarval_unreachable;
1330 if (selector->type.tv == tarval_bottom) {
1331 node->type.tv = tarval_reachable;
1332 } else if (selector->type.tv == tarval_top) {
1333 node->type.tv = tarval_unreachable;
1335 long value = get_tarval_long(selector->type.tv);
1336 if (pnc == get_Cond_defaultProj(cond)) {
1337 /* default switch, have to check ALL other cases */
1340 for (i = get_irn_n_outs(cond) - 1; i >= 0; --i) {
1341 ir_node *succ = get_irn_out(cond, i);
1345 if (value == get_Proj_proj(succ)) {
1346 /* we found a match, will NOT take the default case */
1347 node->type.tv = tarval_unreachable;
1351 /* all cases checked, no match, will take default case */
1352 node->type.tv = tarval_reachable;
1355 node->type.tv = value == pnc ? tarval_reachable : tarval_unreachable;
1359 } /* compute_Proj_Cond */
1362 * (Re-)compute the type for a Proj-Nodes.
1364 * @param node the node
1366 static void compute_Proj(node_t *node) {
1367 ir_node *proj = node->node;
1368 ir_mode *mode = get_irn_mode(proj);
1369 node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
1370 ir_node *pred = get_Proj_pred(proj);
1372 if (get_Proj_proj(proj) == pn_Start_X_initial_exec && is_Start(pred)) {
1373 /* The initial_exec node is ALWAYS reachable. */
1374 node->type.tv = tarval_reachable;
1378 if (block->type.tv == tarval_unreachable) {
1379 /* a Proj in a unreachable Block stay Top */
1380 node->type.tv = tarval_top;
1383 if (get_irn_node(pred)->type.tv == tarval_top) {
1384 /* if the predecessor is Top, its Proj follow */
1385 node->type.tv = tarval_top;
1389 if (mode == mode_M) {
1390 /* mode M is always bottom */
1391 node->type.tv = tarval_bottom;
1394 if (mode != mode_X) {
1396 compute_Proj_Cmp(node, pred);
1398 default_compute(node);
1401 /* handle mode_X nodes */
1403 switch (get_irn_opcode(pred)) {
1405 /* the Proj_X from the Start is always reachable.
1406 However this is already handled at the top. */
1407 node->type.tv = tarval_reachable;
1410 compute_Proj_Cond(node, pred);
1413 default_compute(node);
1415 } /* compute_Proj */
1418 * (Re-)compute the type for a Confirm-Nodes.
1420 * @param node the node
1422 static void compute_Confirm(node_t *node) {
1423 ir_node *confirm = node->node;
1424 node_t *pred = get_irn_node(get_Confirm_value(confirm));
1426 if (get_Confirm_cmp(confirm) == pn_Cmp_Eq) {
1427 node_t *bound = get_irn_node(get_Confirm_bound(confirm));
1429 if (is_con(bound->type)) {
1430 /* is equal to a constant */
1431 node->type = bound->type;
1435 /* a Confirm is a copy OR a Const */
1436 node->type = pred->type;
1437 } /* compute_Confirm */
1440 * (Re-)compute the type for a given node.
1442 * @param node the node
1444 static void compute(node_t *node) {
1445 compute_func func = (compute_func)node->node->op->ops.generic;
1452 * Propagate constant evaluation.
1454 * @param env the environment
1456 static void propagate(environment_t *env) {
1459 lattice_elem_t old_type;
1464 while (env->cprop != NULL) {
1465 /* remove the first partition X from cprop */
1468 env->cprop = X->cprop_next;
1470 DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
1473 while (! list_empty(&X->cprop)) {
1474 /* remove the first Node x from X.cprop */
1475 x = list_entry(X->cprop.next, node_t, cprop_list);
1476 list_del(&x->cprop_list);
1479 /* compute a new type for x */
1481 DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
1483 if (x->type.tv != old_type.tv) {
1484 verify_type(old_type, x->type);
1485 DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
1487 if (x->on_fallen == 0) {
1488 /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
1489 not already on the list. */
1494 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
1496 for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
1497 ir_node *succ = get_irn_out(x->node, i);
1498 node_t *y = get_irn_node(succ);
1500 /* Add y to y.partition.cprop. */
1501 add_node_to_cprop(y, env);
1506 if (n_fallen > 0 && n_fallen != X->n_nodes) {
1507 DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
1508 Y = split(X, fallen, env);
1512 /* remove the nodes from the fallen list */
1513 for (x = fallen; x != NULL; x = x->next)
1522 * Get the leader for a given node from its congruence class.
1524 * @param irn the node
1526 static ir_node *get_leader(node_t *node) {
1527 partition_t *part = node->part;
1529 if (part->n_nodes > 1) {
1530 DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
1532 return get_first_node(part)->node;
1538 * Return non-zero if the control flow predecessor node pred
1539 * is the only reachable control flow exit of its block.
1541 * @param pred the control flow exit
1543 static int can_exchange(ir_node *pred) {
1546 else if (is_Jmp(pred))
1548 else if (get_irn_mode(pred) == mode_T) {
1551 /* if the predecessor block has more than one
1552 reachable outputs we cannot remove the block */
1554 for (i = get_irn_n_outs(pred) - 1; i >= 0; --i) {
1555 ir_node *proj = get_irn_out(pred, i);
1558 /* skip non-control flow Proj's */
1559 if (get_irn_mode(proj) != mode_X)
1562 node = get_irn_node(proj);
1563 if (node->type.tv == tarval_reachable) {
1574 * Block Post-Walker, apply the analysis results on control flow by
1575 * shortening Phi's and Block inputs.
1577 static void apply_cf(ir_node *block, void *ctx) {
1578 node_t *node = get_irn_node(block);
1580 ir_node **ins, **in_X;
1581 ir_node *phi, *next;
1584 if (block == get_irg_end_block(current_ir_graph) ||
1585 block == get_irg_start_block(current_ir_graph)) {
1586 /* the EndBlock is always reachable even if the analysis
1587 finds out the opposite :-) */
1590 if (node->type.tv == tarval_unreachable) {
1591 /* mark dead blocks */
1592 set_Block_dead(block);
1596 n = get_Block_n_cfgpreds(block);
1599 /* only one predecessor combine */
1600 ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
1602 if (can_exchange(pred))
1603 exchange(block, get_nodes_block(pred));
1607 NEW_ARR_A(ir_node *, in_X, n);
1609 for (i = 0; i < n; ++i) {
1610 ir_node *pred = get_Block_cfgpred(block, i);
1611 node_t *node = get_irn_node(pred);
1613 if (node->type.tv == tarval_reachable) {
1620 NEW_ARR_A(ir_node *, ins, n);
1621 for (phi = get_Block_phis(block); phi != NULL; phi = next) {
1622 node_t *node = get_irn_node(phi);
1624 next = get_Phi_next(phi);
1625 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
1626 /* this Phi is replaced by a constant */
1627 tarval *tv = node->type.tv;
1628 ir_node *c = new_r_Const(current_ir_graph, block, get_tarval_mode(tv), tv);
1630 set_irn_node(c, node);
1632 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
1636 for (i = 0; i < n; ++i) {
1637 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1639 if (pred->type.tv == tarval_reachable) {
1640 ins[j++] = get_Phi_pred(phi, i);
1644 /* this Phi is replaced by a single predecessor */
1645 ir_node *s = ins[0];
1648 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, s));
1651 set_irn_in(phi, j, ins);
1657 /* this Block has only one live predecessor */
1658 ir_node *pred = skip_Proj(in_X[0]);
1660 if (can_exchange(pred))
1661 exchange(block, get_nodes_block(pred));
1663 set_irn_in(block, k, in_X);
1668 * Post-Walker, apply the analysis results;
1670 static void apply_result(ir_node *irn, void *ctx) {
1671 node_t *node = get_irn_node(irn);
1674 if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
1675 /* blocks already handled, do not touch the End node */
1677 node_t *block = get_irn_node(get_nodes_block(irn));
1679 if (block->type.tv == tarval_unreachable) {
1680 ir_node *bad = get_irg_bad(current_ir_graph);
1682 /* here, bad might already have a node, but this can be safely ignored
1683 as long as bad has at least ONE valid node */
1684 set_irn_node(bad, node);
1686 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
1689 else if (node->type.tv == tarval_unreachable) {
1690 ir_node *bad = get_irg_bad(current_ir_graph);
1692 /* see comment above */
1693 set_irn_node(bad, node);
1695 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
1698 else if (get_irn_mode(irn) == mode_X) {
1701 ir_node *cond = get_Proj_pred(irn);
1703 if (is_Cond(cond)) {
1704 node_t *sel = get_irn_node(get_Cond_selector(cond));
1706 if (is_tarval(sel->type.tv) && tarval_is_constant(sel->type.tv)) {
1707 /* Cond selector is a constant, make a Jmp */
1708 ir_node *jmp = new_r_Jmp(current_ir_graph, block->node);
1709 set_irn_node(jmp, node);
1711 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
1717 /* normal data node */
1718 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
1719 tarval *tv = node->type.tv;
1721 if (! is_Const(irn)) {
1722 /* can be replaced by a constant */
1723 ir_node *c = new_r_Const(current_ir_graph, block->node, get_tarval_mode(tv), tv);
1724 set_irn_node(c, node);
1726 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
1729 } else if (is_entity(node->type.sym.entity_p)) {
1730 if (! is_SymConst(irn)) {
1731 /* can be replaced by a Symconst */
1732 ir_node *symc = new_r_SymConst(current_ir_graph, block->node, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
1733 set_irn_node(symc, node);
1736 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
1737 exchange(irn, symc);
1740 ir_node *leader = get_leader(node);
1742 if (leader != irn) {
1743 DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
1744 exchange(irn, leader);
1749 } /* apply_result */
1751 #define SET(code) op_##code->ops.generic = (op_func)compute_##code
1754 * sets the generic functions to compute.
1756 static void set_compute_functions(void) {
1759 /* set the default compute function */
1760 for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
1761 ir_op *op = get_irp_opcode(i);
1762 op->ops.generic = (op_func)default_compute;
1765 /* set specific functions */
1778 } /* set_compute_functions */
1780 static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
1781 ir_node *irn = local != NULL ? local : n;
1782 node_t *node = get_irn_node(irn);
1784 ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
1788 void combo(ir_graph *irg) {
1792 ir_graph *rem = current_ir_graph;
1794 current_ir_graph = irg;
1796 /* register a debug mask */
1797 FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
1798 //firm_dbg_set_mask(dbg, SET_LEVEL_3);
1800 DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
1802 obstack_init(&env.obst);
1803 env.worklist = NULL;
1807 #ifdef DEBUG_libfirm
1808 env.dbg_list = NULL;
1810 env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
1811 env.type2id_map = pmap_create();
1812 env.end_idx = get_opt_global_cse() ? 0 : -1;
1813 env.lambda_input = 0;
1815 assure_irg_outs(irg);
1817 /* we have our own value_of function */
1818 set_value_of_func(get_node_tarval);
1820 set_compute_functions();
1821 DEBUG_ONLY(part_nr = 0);
1823 /* create the initial partition and place it on the work list */
1824 env.initial = new_partition(&env);
1825 add_to_worklist(env.initial, &env);
1826 irg_walk_graph(irg, init_block_phis, create_initial_partitions, &env);
1828 /* Place the START Node's partition on cprop.
1829 Place the START Node on its local worklist. */
1830 initial_X = get_irg_initial_exec(irg);
1831 start = get_irn_node(initial_X);
1832 add_node_to_cprop(start, &env);
1836 if (env.worklist != NULL)
1838 } while (env.cprop != NULL || env.worklist != NULL);
1840 dump_all_partitions(&env);
1843 set_dump_node_vcgattr_hook(dump_partition_hook);
1844 dump_ir_block_graph(irg, "-partition");
1845 set_dump_node_vcgattr_hook(NULL);
1847 (void)dump_partition_hook;
1850 /* apply the result */
1851 irg_block_walk_graph(irg, NULL, apply_cf, &env);
1852 irg_walk_graph(irg, NULL, apply_result, &env);
1854 pmap_destroy(env.type2id_map);
1855 del_set(env.opcode2id_map);
1856 obstack_free(&env.obst, NULL);
1858 /* restore value_of() default behavior */
1859 set_value_of_func(NULL);
1860 current_ir_graph = rem;