2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Cliff Click's Combined Analysis/Optimization
23 * @author Michael Beck
26 * Note that the current implementation lack the leaders/followers
29 * Note further that we use the terminology from Click's work here, which is different
30 * in some cases from Firm terminology. Especially, Click's type is a
31 * Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
39 #include "iroptimize.h"
46 #include "irgraph_t.h"
61 /* define this to check that all type translations are monotone */
62 #define VERIFY_MONOTONE
64 typedef struct node_t node_t;
65 typedef struct partition_t partition_t;
66 typedef struct opcode_key_t opcode_key_t;
67 typedef struct listmap_entry_t listmap_entry_t;
69 /** The type of the compute function. */
70 typedef void (*compute_func)(node_t *node);
76 ir_opcode code; /**< The Firm opcode. */
77 ir_mode *mode; /**< The mode of all nodes in the partition. */
79 long proj; /**< For Proj nodes, its proj number */
80 ir_entity *ent; /**< For Sel Nodes, its entity */
85 * An entry in the list_map.
87 struct listmap_entry_t {
88 void *id; /**< The id. */
89 node_t *list; /**< The associated list for this id. */
90 listmap_entry_t *next; /**< Link to the next entry in the map. */
93 /** We must map id's to lists. */
94 typedef struct listmap_t {
95 set *map; /**< Map id's to listmap_entry_t's */
96 listmap_entry_t *values; /**< List of all values in the map. */
100 * A lattice element. Because we handle constants and symbolic constants different, we
101 * have to use this union.
112 ir_node *node; /**< The IR-node itself. */
113 list_head node_list; /**< Double-linked list of entries. */
114 list_head cprop_list; /**< Double-linked partition.cprop list. */
115 partition_t *part; /**< points to the partition this node belongs to */
116 node_t *next; /**< Next node on local list (partition.touched, fallen). */
117 lattice_elem_t type; /**< The associated lattice element "type". */
118 int max_user_input; /**< Maximum input number of Def-Use edges. */
119 int next_edge; /**< Index of the next Def-Use edge to use. */
120 unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
121 unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
122 unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
126 * A partition containing congruent nodes.
129 list_head entries; /**< The head of partition node list. */
130 list_head cprop; /**< The head of partition.cprop list. */
131 list_head split_list; /**< Double-linked list of entries that must be processed by split_by(). */
132 partition_t *wl_next; /**< Next entry in the work list if any. */
133 partition_t *touched_next; /**< Points to the next partition in the touched set. */
134 partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
135 node_t *touched; /**< The partition.touched set of this partition. */
136 unsigned n_nodes; /**< Number of entries in this partition. */
137 unsigned n_touched; /**< Number of entries in the partition.touched. */
138 int max_arity; /**< Maximum arity of all entries. */
139 int max_user_inputs; /**< Maximum number of user inputs of all entries. */
140 unsigned on_worklist:1; /**< Set, if this partition is in the work list. */
141 unsigned on_touched:1; /**< Set, if this partition is on the touched set. */
142 unsigned on_cprop:1; /**< Set, if this partition is on the cprop list. */
144 partition_t *dbg_next; /**< Link all partitions for debugging */
145 unsigned nr; /**< A unique number for (what-)mapping, >0. */
149 typedef struct environment_t {
150 struct obstack obst; /**< obstack to allocate data structures. */
151 partition_t *worklist; /**< The work list. */
152 partition_t *cprop; /**< The constant propagation list. */
153 partition_t *touched; /**< the touched set. */
154 partition_t *initial; /**< The initial partition. */
155 set *opcode2id_map; /**< The opcodeMode->id map. */
156 pmap *type2id_map; /**< The type->id map. */
157 int end_idx; /**< -1 for local and 0 for global congruences. */
158 int lambda_input; /**< Captured argument for lambda_partition(). */
160 partition_t *dbg_list; /**< List of all partitions. */
164 /** Type of the what function. */
165 typedef void *(*what_func)(const node_t *node, environment_t *env);
167 #define get_irn_node(irn) ((node_t *)get_irn_link(irn))
168 #define set_irn_node(irn, node) set_irn_link(irn, node)
170 /* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
171 #undef tarval_unreachable
172 #define tarval_unreachable tarval_top
175 /** The debug module handle. */
176 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
178 /** Next partition number. */
179 DEBUG_ONLY(static unsigned part_nr = 0);
182 static INLINE lattice_elem_t get_partition_type(const partition_t *X);
185 * Dump partition to output.
187 static void dump_partition(const char *msg, const partition_t *part) {
190 lattice_elem_t type = get_partition_type(part);
192 DB((dbg, LEVEL_2, "%s part%u (%u, %+F) {\n ", msg, part->nr, part->n_nodes, type));
193 list_for_each_entry(node_t, node, &part->entries, node_list) {
194 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
197 DB((dbg, LEVEL_2, "\n}\n"));
201 * Dump all partitions.
203 static void dump_all_partitions(const environment_t *env) {
204 const partition_t *P;
206 DB((dbg, LEVEL_2, "All partitions\n===============\n"));
207 for (P = env->dbg_list; P != NULL; P = P->dbg_next)
208 dump_partition("", P);
212 #define dump_partition(msg, part)
213 #define dump_all_partitions(env)
216 #if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
218 * Verify that a type transition is monotone
220 static void verify_type(const lattice_elem_t old_type, const lattice_elem_t new_type) {
221 if (old_type.tv == new_type.tv) {
225 if (old_type.tv == tarval_top) {
226 /* from Top down-to is always allowed */
229 if (old_type.tv == tarval_reachable) {
230 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
232 if (new_type.tv == tarval_bottom || new_type.tv == tarval_reachable) {
236 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
239 #define verify_type(old_type, new_type)
243 * Compare two pointer values of a listmap.
245 static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
246 const listmap_entry_t *e1 = elt;
247 const listmap_entry_t *e2 = key;
250 return e1->id != e2->id;
251 } /* listmap_cmp_ptr */
254 * Initializes a listmap.
256 * @param map the listmap
258 static void listmap_init(listmap_t *map) {
259 map->map = new_set(listmap_cmp_ptr, 16);
264 * Terminates a listmap.
266 * @param map the listmap
268 static void listmap_term(listmap_t *map) {
273 * Return the associated listmap entry for a given id.
275 * @param map the listmap
276 * @param id the id to search for
278 * @return the asociated listmap entry for the given id
280 static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
281 listmap_entry_t key, *entry;
286 entry = set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
288 if (entry->list == NULL) {
289 /* a new entry, put into the list */
290 entry->next = map->values;
297 * Calculate the hash value for an opcode map entry.
299 * @param entry an opcode map entry
301 * @return a hash value for the given opcode map entry
303 static unsigned opcode_hash(const opcode_key_t *entry) {
304 return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent);
308 * Compare two entries in the opcode map.
310 static int cmp_opcode(const void *elt, const void *key, size_t size) {
311 const opcode_key_t *o1 = elt;
312 const opcode_key_t *o2 = key;
315 return o1->code != o2->code || o1->mode != o2->mode ||
316 o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent;
320 * Compare two Def-Use edges for input position.
322 static int cmp_def_use_edge(const void *a, const void *b) {
323 const ir_def_use_edge *ea = a;
324 const ir_def_use_edge *eb = b;
326 /* no overrun, because range is [-1, MAXINT] */
327 return ea->pos - eb->pos;
328 } /* cmp_def_use_edge */
331 * We need the Def-Use edges sorted.
333 static void sort_irn_outs(node_t *node) {
334 ir_node *irn = node->node;
335 int n_outs = get_irn_n_outs(irn);
338 qsort(&irn->out[1], n_outs, sizeof(irn->out[0]), cmp_def_use_edge);
340 node->max_user_input = irn->out[n_outs].pos;
341 } /* sort_irn_outs */
344 * Return the type of a node.
346 * @param irn an IR-node
348 * @return the associated type of this node
350 static INLINE lattice_elem_t get_node_type(const ir_node *irn) {
351 return get_irn_node(irn)->type;
352 } /* get_node_type */
355 * Return the tarval of a node.
357 * @param irn an IR-node
359 * @return the associated type of this node
361 static INLINE tarval *get_node_tarval(const ir_node *irn) {
362 lattice_elem_t type = get_node_type(irn);
364 if (is_tarval(type.tv))
366 return tarval_bottom;
367 } /* get_node_type */
370 * Add a partition to the worklist.
372 static INLINE void add_to_worklist(partition_t *X, environment_t *env) {
373 assert(X->on_worklist == 0);
374 X->wl_next = env->worklist;
380 * Create a new empty partition.
382 * @param env the environment
384 * @return a newly allocated partition
386 static INLINE partition_t *new_partition(environment_t *env) {
387 partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
389 INIT_LIST_HEAD(&part->entries);
390 INIT_LIST_HEAD(&part->cprop);
391 INIT_LIST_HEAD(&part->split_list);
392 part->wl_next = NULL;
393 part->touched_next = NULL;
394 part->cprop_next = NULL;
395 part->touched = NULL;
399 part->max_user_inputs = 0;
400 part->on_worklist = 0;
401 part->on_touched = 0;
404 part->dbg_next = env->dbg_list;
405 env->dbg_list = part;
406 part->nr = part_nr++;
410 } /* new_partition */
413 * Get the first node from a partition.
415 static INLINE node_t *get_first_node(const partition_t *X) {
416 return list_entry(X->entries.next, node_t, node_list);
420 * Return the type of a partition (assuming partition is non-empty and
421 * all elements have the same type).
423 * @param X a partition
425 * @return the type of the first element of the partition
427 static INLINE lattice_elem_t get_partition_type(const partition_t *X) {
428 const node_t *first = get_first_node(X);
430 } /* get_partition_type */
433 * Creates a partition node for the given IR-node and place it
434 * into the given partition.
436 * @param irn an IR-node
437 * @param part a partition to place the node in
438 * @param env the environment
440 * @return the created node
442 static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env) {
443 /* create a partition node and place it in the partition */
444 node_t *node = obstack_alloc(&env->obst, sizeof(*node));
446 INIT_LIST_HEAD(&node->node_list);
447 INIT_LIST_HEAD(&node->cprop_list);
451 node->type.tv = tarval_top;
452 node->max_user_input = 0;
454 node->on_touched = 0;
457 set_irn_node(irn, node);
459 list_add_tail(&node->node_list, &part->entries);
463 } /* create_partition_node */
466 * Pre-Walker, init all Block-Phi lists.
468 static void init_block_phis(ir_node *irn, void *env) {
472 set_Block_phis(irn, NULL);
477 * Post-Walker, initialize all Nodes' type to U or top and place
478 * all nodes into the TOP partition.
480 static void create_initial_partitions(ir_node *irn, void *ctx) {
481 environment_t *env = ctx;
482 partition_t *part = env->initial;
486 node = create_partition_node(irn, part, env);
488 arity = get_irn_arity(irn);
489 if (arity > part->max_arity)
490 part->max_arity = arity;
491 if (node->max_user_input > part->max_user_inputs)
492 part->max_user_inputs = node->max_user_input;
495 add_Block_phi(get_nodes_block(irn), irn);
497 } /* create_initial_partitions */
500 * Add a partition to the touched set if not already there.
502 * @param part the partition
503 * @param env the environment
505 static INLINE void add_to_touched(partition_t *part, environment_t *env) {
506 if (part->on_touched == 0) {
507 part->touched_next = env->touched;
509 part->on_touched = 1;
511 } /* add_to_touched */
514 * Add a node to the entry.partition.touched set if not already there.
518 static INLINE void add_to_partition_touched(node_t *y) {
519 if (y->on_touched == 0) {
520 partition_t *part = y->part;
522 y->next = part->touched;
527 } /* add_to_partition_touched */
530 * Update the worklist: If Z is on worklist then add Z' to worklist.
531 * Else add the smaller of Z and Z' to worklist.
533 * @param Z the Z partition
534 * @param Z_prime the Z' partition, a previous part of Z
535 * @param env the environment
537 static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env) {
538 if (Z->on_worklist || Z_prime->n_nodes < Z->n_nodes) {
539 add_to_worklist(Z_prime, env);
541 add_to_worklist(Z, env);
543 } /* update_worklist */
546 * Split a partition by a local list.
548 * @param Z the Z partition to split
549 * @param g a (non-empty) node list
550 * @param env the environment
552 * @return a new partition containing the nodes of g
554 static partition_t *split(partition_t *Z, node_t *g, environment_t *env) {
555 partition_t *Z_prime;
558 int max_input, max_arity, arity;
560 dump_partition("Splitting ", Z);
564 /* Remove g from Z. */
565 for (node = g; node != NULL; node = node->next) {
566 list_del(&node->node_list);
569 assert(n < Z->n_nodes);
572 /* Move g to a new partition, Z
\92. */
573 Z_prime = new_partition(env);
574 max_arity = max_input = 0;
575 for (node = g; node != NULL; node = node->next) {
576 list_add(&node->node_list, &Z_prime->entries);
577 node->part = Z_prime;
578 arity = get_irn_arity(node->node);
579 if (arity > max_arity)
581 if (node->max_user_input > max_input)
582 max_input = node->max_user_input;
584 Z_prime->max_arity = max_arity;
585 Z_prime->max_user_inputs = max_input;
586 Z_prime->n_nodes = n;
588 update_worklist(Z, Z_prime, env);
590 dump_partition("Now ", Z);
591 dump_partition("Created new ", Z_prime);
596 * Returns non-zero if the i'th input of a Phi node is live.
598 * @param phi a Phi-node
599 * @param i an input number
601 * @return non-zero if the i'th input of the given Phi node is live
603 static int is_live_input(ir_node *phi, int i) {
605 ir_node *block = get_nodes_block(phi);
606 ir_node *pred = get_Block_cfgpred(block, i);
607 lattice_elem_t type = get_node_type(pred);
609 return type.tv != tarval_unreachable;
611 /* else it's the control input, always live */
613 } /* is_live_input */
616 * Return non-zero if a type is a constant.
618 static int is_constant_type(lattice_elem_t type) {
619 if (type.tv != tarval_bottom && type.tv != tarval_top)
622 } /* is_constant_type */
625 * Place a node on the cprop list.
628 * @param env the environment
630 static void add_node_to_cprop(node_t *y, environment_t *env) {
631 /* Add y to y.partition.cprop. */
632 if (y->on_cprop == 0) {
633 partition_t *Y = y->part;
635 list_add_tail(&y->cprop_list, &Y->cprop);
638 DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
640 /* place its partition on the cprop list */
641 if (Y->on_cprop == 0) {
642 Y->cprop_next = env->cprop;
647 if (get_irn_mode(y->node) == mode_T) {
648 /* mode_T nodes always produce tarval_bottom, so we must explicitly
649 add it's Proj's to get constant evaluation to work */
652 for (i = get_irn_n_outs(y->node) - 1; i >= 0; --i) {
653 node_t *proj = get_irn_node(get_irn_out(y->node, i));
655 add_node_to_cprop(proj, env);
659 if (is_Block(y->node)) {
660 /* Due to the way we handle Phi's, we must place all Phis of a block on the list
661 * if someone placed the block. The Block is only placed if the reachability
662 * changes, and this must be re-evaluated in compute_Phi(). */
664 for (phi = get_Block_phis(y->node); phi != NULL; phi = get_Phi_next(phi)) {
665 node_t *p = get_irn_node(phi);
666 add_node_to_cprop(p, env);
669 } /* add_node_to_cprop */
672 * Check whether a type is neither Top or a constant.
673 * Note: U is handled like Top here, R is a constant.
675 * @param type the type to check
677 static int type_is_neither_top_nor_const(const lattice_elem_t type) {
678 if (is_tarval(type.tv)) {
679 if (type.tv == tarval_top)
681 if (tarval_is_constant(type.tv))
691 * Split the partitions if caused by the first entry on the worklist.
693 * @param env the environment
695 static void cause_splits(environment_t *env) {
696 partition_t *X, *Y, *Z;
702 /* remove the first partition from the worklist */
704 env->worklist = X->wl_next;
707 dump_partition("Cause_split: ", X);
708 end_idx = env->end_idx;
709 for (i = -1; i <= X->max_user_inputs; ++i) {
710 /* empty the touched set: already done, just clear the list */
713 list_for_each_entry(node_t, x, &X->entries, node_list) {
719 num_edges = get_irn_n_outs(x->node);
721 while (x->next_edge <= num_edges) {
722 ir_def_use_edge *edge = &x->node->out[x->next_edge];
724 /* check if we have necessary edges */
732 /* ignore the "control input" for non-pinned nodes
733 if we are running in GCSE mode */
734 if (i < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
737 y = get_irn_node(succ);
738 if (is_constant_type(y->type)) {
739 code = get_irn_opcode(succ);
740 if (code == iro_Sub || code == iro_Cmp)
741 add_node_to_cprop(y, env);
744 /* Partitions of constants should not be split simply because their Nodes have unequal
745 functions or incongruent inputs. */
746 if (type_is_neither_top_nor_const(y->type) &&
747 (! is_Phi(y->node) || is_live_input(y->node, i))) {
749 add_to_touched(Y, env);
750 add_to_partition_touched(y);
755 for (Z = env->touched; Z != NULL; Z = Z->touched_next) {
756 /* remove it from the touched set */
759 if (Z->n_nodes != Z->n_touched) {
760 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
761 split(Z, Z->touched, env);
763 /* Empty local Z.touched. */
764 for (e = Z->touched; e != NULL; e = e->next) {
774 * Implements split_by_what(): Split a partition by characteristics given
775 * by the what function.
777 * @param X the partition to split
778 * @param What a function returning an Id for every node of the partition X
779 * @param P a list head to store the result partitions
780 * @param env the environment
784 static list_head *split_by_what(partition_t *X, what_func What,
785 list_head *P, environment_t *env) {
788 listmap_entry_t *iter;
791 /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
793 list_for_each_entry(node_t, x, &X->entries, node_list) {
794 void *id = What(x, env);
795 listmap_entry_t *entry;
798 /* input not allowed, ignore */
801 /* Add x to map[What(x)]. */
802 entry = listmap_find(&map, id);
803 x->next = entry->list;
806 /* Let P be a set of Partitions. */
808 /* for all sets S except one in the range of map do */
809 for (iter = map.values; iter != NULL; iter = iter->next) {
810 if (iter->next == NULL) {
811 /* this is the last entry, ignore */
816 /* Add SPLIT( X, S ) to P. */
817 DB((dbg, LEVEL_2, "Split part%d by what\n", X->nr));
818 R = split(X, S, env);
819 list_add(&R->split_list, P);
822 list_add(&X->split_list, P);
826 } /* split_by_what */
828 /** lambda n.(n.type) */
829 static void *lambda_type(const node_t *node, environment_t *env) {
831 return node->type.tv;
834 /** lambda n.(n.opcode) */
835 static void *lambda_opcode(const node_t *node, environment_t *env) {
836 opcode_key_t key, *entry;
837 ir_node *irn = node->node;
839 key.code = get_irn_opcode(irn);
840 key.mode = get_irn_mode(irn);
844 switch (get_irn_opcode(irn)) {
846 key.u.proj = get_Proj_proj(irn);
849 key.u.ent = get_Sel_entity(irn);
855 entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
857 } /* lambda_opcode */
859 /** lambda n.(n[i].partition) */
860 static void *lambda_partition(const node_t *node, environment_t *env) {
861 ir_node *skipped = skip_Proj(node->node);
864 int i = env->lambda_input;
866 if (i >= get_irn_arity(node->node)) {
867 /* we are outside the allowed range */
871 /* ignore the "control input" for non-pinned nodes
872 if we are running in GCSE mode */
873 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
876 pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
877 p = get_irn_node(pred);
880 } /* lambda_partition */
883 * Checks whether a type is a constant.
885 static int is_type_constant(lattice_elem_t type) {
886 if (is_tarval(type.tv))
887 return tarval_is_constant(type.tv);
888 /* else it is a symconst */
893 * Implements split_by().
895 * @param X the partition to split
896 * @param env the environment
898 static void split_by(partition_t *X, environment_t *env) {
904 DB((dbg, LEVEL_2, "WHAT = lambda n.(n.type) on part%d\n", X->nr));
905 P = split_by_what(X, lambda_type, P, env);
907 partition_t *Y = list_entry(P->next, partition_t, split_list);
909 list_del(&Y->split_list);
910 if (Y->n_nodes > 1) {
911 lattice_elem_t type = get_partition_type(Y);
913 /* we do not want split the TOP or constant partitions */
914 if (type.tv != tarval_top && !is_type_constant(type)) {
919 DB((dbg, LEVEL_2, "WHAT = lambda n.(n.opcode) on part%d\n", Y->nr));
920 Q = split_by_what(Y, lambda_opcode, Q, env);
924 partition_t *Z = list_entry(Q->next, partition_t, split_list);
925 int max_arity = Z->max_arity;
926 list_head *R = &hR, *S = &hS, *T;
928 list_del(&Z->split_list);
930 if (Z->n_nodes > 1) {
935 * BEWARE: during splitting by input 2 for instance we might
936 * create new partitions which are different by input 1, so collect
937 * them and split further.
939 list_add(&Z->split_list, R);
940 for (input = max_arity - 1; input >= -1; --input) {
942 partition_t *Z_prime = list_entry(R->next, partition_t, split_list);
944 list_del(&Z_prime->split_list);
945 if (Z_prime->n_nodes > 1) {
946 env->lambda_input = input;
947 DB((dbg, LEVEL_2, "WHAT = lambda n.(n[%d].partition) on part%d\n", input, Z_prime->nr));
948 S = split_by_what(Z_prime, lambda_partition, S, env);
950 list_add(&Z_prime->split_list, S);
952 } while (!list_empty(R));
958 } while (!list_empty(Q));
961 } while (!list_empty(P));
965 * (Re-)compute the type for a given node.
967 * @param node the node
969 static void default_compute(node_t *node) {
971 ir_node *irn = node->node;
972 node_t *block = get_irn_node(get_nodes_block(irn));
974 if (block->type.tv == tarval_unreachable) {
975 node->type.tv = tarval_top;
979 /* if any of the data inputs have type top, the result is type top */
980 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
981 ir_node *pred = get_irn_n(irn, i);
982 node_t *p = get_irn_node(pred);
984 if (p->type.tv == tarval_top) {
985 node->type.tv = tarval_top;
990 if (get_irn_mode(node->node) == mode_X)
991 node->type.tv = tarval_reachable;
993 node->type.tv = computed_value(irn);
994 } /* default_compute */
997 * (Re-)compute the type for a Block node.
999 * @param node the node
1001 static void compute_Block(node_t *node) {
1003 ir_node *block = node->node;
1005 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1006 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1008 if (pred->type.tv == tarval_reachable) {
1009 /* A block is reachable, if at least of predecessor is reachable. */
1010 node->type.tv = tarval_reachable;
1014 node->type.tv = tarval_top;
1015 } /* compute_Block */
1018 * (Re-)compute the type for a Bad node.
1020 * @param node the node
1022 static void compute_Bad(node_t *node) {
1023 /* Bad nodes ALWAYS compute Top */
1024 node->type.tv = tarval_top;
1028 * (Re-)compute the type for an Unknown node.
1030 * @param node the node
1032 static void compute_Unknown(node_t *node) {
1033 /* While Unknown nodes compute Top, but this is dangerous:
1034 * a if (unknown) would lead to BOTH control flows unreachable.
1035 * While this is correct in the given semantics, it would destroy the Firm
1037 * For now, we compute bottom here.
1039 node->type.tv = tarval_bottom;
1040 } /* compute_Unknown */
1043 * (Re-)compute the type for a Jmp node.
1045 * @param node the node
1047 static void compute_Jmp(node_t *node) {
1048 node_t *block = get_irn_node(get_nodes_block(node->node));
1050 node->type = block->type;
1054 * (Re-)compute the type for the End node.
1056 * @param node the node
1058 static void compute_End(node_t *node) {
1059 /* the End node is NOT dead of course */
1060 node->type.tv = tarval_reachable;
1064 * (Re-)compute the type for a SymConst node.
1066 * @param node the node
1068 static void compute_SymConst(node_t *node) {
1069 ir_node *irn = node->node;
1070 node_t *block = get_irn_node(get_nodes_block(irn));
1072 if (block->type.tv == tarval_unreachable) {
1073 node->type.tv = tarval_top;
1076 switch (get_SymConst_kind(irn)) {
1077 case symconst_addr_ent:
1078 /* case symconst_addr_name: cannot handle this yet */
1079 node->type.sym = get_SymConst_symbol(irn);
1082 node->type.tv = computed_value(irn);
1084 } /* compute_SymConst */
1087 * (Re-)compute the type for a Phi node.
1089 * @param node the node
1091 static void compute_Phi(node_t *node) {
1093 ir_node *phi = node->node;
1094 lattice_elem_t type;
1096 /* if a Phi is in a unreachable block, its type is TOP */
1097 node_t *block = get_irn_node(get_nodes_block(phi));
1099 if (block->type.tv == tarval_unreachable) {
1100 node->type.tv = tarval_top;
1104 /* Phi implements the Meet operation */
1105 type.tv = tarval_top;
1106 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
1107 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
1108 node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
1110 if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
1111 /* ignore TOP inputs: We must check here for unreachable blocks,
1112 because Firm constants live in the Start Block are NEVER Top.
1113 Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
1114 comes from a unreachable input. */
1117 if (pred->type.tv == tarval_bottom) {
1118 node->type.tv = tarval_bottom;
1120 } else if (type.tv == tarval_top) {
1121 /* first constant found */
1123 } else if (type.tv != pred->type.tv) {
1124 /* different constants or tarval_bottom */
1125 node->type.tv = tarval_bottom;
1128 /* else nothing, constants are the same */
1134 * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
1136 * @param node the node
1138 static void compute_Add(node_t *node) {
1139 ir_node *sub = node->node;
1140 node_t *l = get_irn_node(get_Add_left(sub));
1141 node_t *r = get_irn_node(get_Add_right(sub));
1142 lattice_elem_t a = l->type;
1143 lattice_elem_t b = r->type;
1144 node_t *block = get_irn_node(get_nodes_block(sub));
1147 if (block->type.tv == tarval_unreachable) {
1148 node->type.tv = tarval_top;
1152 if (a.tv == tarval_top || b.tv == tarval_top) {
1153 node->type.tv = tarval_top;
1154 } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
1155 node->type.tv = tarval_bottom;
1157 /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
1158 must call tarval_add() first to handle this case! */
1159 if (is_tarval(a.tv)) {
1160 if (is_tarval(b.tv)) {
1161 node->type.tv = tarval_add(a.tv, b.tv);
1164 mode = get_tarval_mode(a.tv);
1165 if (a.tv == get_mode_null(mode)) {
1169 } else if (is_tarval(b.tv)) {
1170 mode = get_tarval_mode(b.tv);
1171 if (b.tv == get_mode_null(mode)) {
1176 node->type.tv = tarval_bottom;
1181 * Returns true if a type is a constant.
1183 static int is_con(const lattice_elem_t type) {
1184 return is_entity(type.sym.entity_p) || tarval_is_constant(type.tv);
1188 * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
1190 * @param node the node
1192 static void compute_Sub(node_t *node) {
1193 ir_node *sub = node->node;
1194 node_t *l = get_irn_node(get_Sub_left(sub));
1195 node_t *r = get_irn_node(get_Sub_right(sub));
1196 lattice_elem_t a = l->type;
1197 lattice_elem_t b = r->type;
1198 node_t *block = get_irn_node(get_nodes_block(sub));
1200 if (block->type.tv == tarval_unreachable) {
1201 node->type.tv = tarval_top;
1204 if (a.tv == tarval_top || b.tv == tarval_top) {
1205 node->type.tv = tarval_top;
1206 } else if (is_con(a) && is_con(b)) {
1207 if (is_tarval(a.tv) && is_tarval(b.tv)) {
1208 node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
1209 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
1211 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
1214 node->type.tv = tarval_bottom;
1216 } else if (r->part == l->part &&
1217 (!mode_is_float(get_irn_mode(l->node)))) {
1218 if (node->type.tv == tarval_top) {
1220 * BEWARE: a - a is NOT always 0 for floating Point values, as
1221 * NaN op NaN = NaN, so we must check this here.
1223 ir_mode *mode = get_irn_mode(sub);
1224 node->type.tv = get_mode_null(mode);
1226 node->type.tv = tarval_bottom;
1229 node->type.tv = tarval_bottom;
1234 * (Re-)compute the type for Cmp.
1236 * @param node the node
1238 static void compute_Cmp(node_t *node) {
1239 ir_node *cmp = node->node;
1240 node_t *l = get_irn_node(get_Cmp_left(cmp));
1241 node_t *r = get_irn_node(get_Cmp_right(cmp));
1242 lattice_elem_t a = l->type;
1243 lattice_elem_t b = r->type;
1245 if (a.tv == tarval_top || b.tv == tarval_top) {
1246 node->type.tv = tarval_top;
1247 } else if (is_con(a) && is_con(b)) {
1248 /* both nodes are constants, we can propbably do something */
1249 node->type.tv = tarval_b_true;
1250 } else if (r->part == l->part) {
1251 /* both nodes congruent, we can probably do something */
1252 node->type.tv = tarval_b_true;
1254 node->type.tv = tarval_bottom;
1256 } /* compute_Proj_Cmp */
1259 * (Re-)compute the type for a Proj(Cmp).
1261 * @param node the node
1262 * @param cond the predecessor Cmp node
1264 static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
1265 ir_node *proj = node->node;
1266 node_t *l = get_irn_node(get_Cmp_left(cmp));
1267 node_t *r = get_irn_node(get_Cmp_right(cmp));
1268 lattice_elem_t a = l->type;
1269 lattice_elem_t b = r->type;
1270 pn_Cmp pnc = get_Proj_proj(proj);
1272 if (a.tv == tarval_top || b.tv == tarval_top) {
1273 node->type.tv = tarval_top;
1274 } else if (is_con(a) && is_con(b)) {
1275 default_compute(node);
1276 } else if (r->part == l->part &&
1277 (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
1278 if (node->type.tv == tarval_top) {
1280 * BEWARE: a == a is NOT always True for floating Point values, as
1281 * NaN != NaN is defined, so we must check this here.
1283 node->type.tv = new_tarval_from_long(pnc & pn_Cmp_Eq, mode_b);
1285 node->type.tv = tarval_bottom;
1288 node->type.tv = tarval_bottom;
1290 } /* compute_Proj_Cmp */
1293 * (Re-)compute the type for a Proj(Cond).
1295 * @param node the node
1296 * @param cond the predecessor Cond node
1298 static void compute_Proj_Cond(node_t *node, ir_node *cond) {
1299 ir_node *proj = node->node;
1300 long pnc = get_Proj_proj(proj);
1301 ir_node *sel = get_Cond_selector(cond);
1302 node_t *selector = get_irn_node(sel);
1304 if (get_irn_mode(sel) == mode_b) {
1306 if (pnc == pn_Cond_true) {
1307 if (selector->type.tv == tarval_b_false) {
1308 node->type.tv = tarval_unreachable;
1309 } else if (selector->type.tv == tarval_b_true) {
1310 node->type.tv = tarval_reachable;
1311 } else if (selector->type.tv == tarval_bottom) {
1312 node->type.tv = tarval_reachable;
1314 assert(selector->type.tv == tarval_top);
1315 node->type.tv = tarval_unreachable;
1318 assert(pnc == pn_Cond_false);
1320 if (selector->type.tv == tarval_b_false) {
1321 node->type.tv = tarval_reachable;
1322 } else if (selector->type.tv == tarval_b_true) {
1323 node->type.tv = tarval_unreachable;
1324 } else if (selector->type.tv == tarval_bottom) {
1325 node->type.tv = tarval_reachable;
1327 assert(selector->type.tv == tarval_top);
1328 node->type.tv = tarval_unreachable;
1333 if (selector->type.tv == tarval_bottom) {
1334 node->type.tv = tarval_reachable;
1335 } else if (selector->type.tv == tarval_top) {
1336 node->type.tv = tarval_unreachable;
1338 long value = get_tarval_long(selector->type.tv);
1339 if (pnc == get_Cond_defaultProj(cond)) {
1340 /* default switch, have to check ALL other cases */
1343 for (i = get_irn_n_outs(cond) - 1; i >= 0; --i) {
1344 ir_node *succ = get_irn_out(cond, i);
1348 if (value == get_Proj_proj(succ)) {
1349 /* we found a match, will NOT take the default case */
1350 node->type.tv = tarval_unreachable;
1354 /* all cases checked, no match, will take default case */
1355 node->type.tv = tarval_reachable;
1358 node->type.tv = value == pnc ? tarval_reachable : tarval_unreachable;
1362 } /* compute_Proj_Cond */
1365 * (Re-)compute the type for a Proj-Nodes.
1367 * @param node the node
1369 static void compute_Proj(node_t *node) {
1370 ir_node *proj = node->node;
1371 ir_mode *mode = get_irn_mode(proj);
1372 node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
1373 ir_node *pred = get_Proj_pred(proj);
1375 if (get_Proj_proj(proj) == pn_Start_X_initial_exec && is_Start(pred)) {
1376 /* The initial_exec node is ALWAYS reachable. */
1377 node->type.tv = tarval_reachable;
1381 if (block->type.tv == tarval_unreachable) {
1382 /* a Proj in a unreachable Block stay Top */
1383 node->type.tv = tarval_top;
1386 if (get_irn_node(pred)->type.tv == tarval_top) {
1387 /* if the predecessor is Top, its Proj follow */
1388 node->type.tv = tarval_top;
1392 if (mode == mode_M) {
1393 /* mode M is always bottom */
1394 node->type.tv = tarval_bottom;
1397 if (mode != mode_X) {
1399 compute_Proj_Cmp(node, pred);
1401 default_compute(node);
1404 /* handle mode_X nodes */
1406 switch (get_irn_opcode(pred)) {
1408 /* the Proj_X from the Start is always reachable.
1409 However this is already handled at the top. */
1410 node->type.tv = tarval_reachable;
1413 compute_Proj_Cond(node, pred);
1416 default_compute(node);
1418 } /* compute_Proj */
1421 * (Re-)compute the type for a Confirm-Nodes.
1423 * @param node the node
1425 static void compute_Confirm(node_t *node) {
1426 ir_node *confirm = node->node;
1427 node_t *pred = get_irn_node(get_Confirm_value(confirm));
1429 if (get_Confirm_cmp(confirm) == pn_Cmp_Eq) {
1430 node_t *bound = get_irn_node(get_Confirm_bound(confirm));
1432 if (is_con(bound->type)) {
1433 /* is equal to a constant */
1434 node->type = bound->type;
1438 /* a Confirm is a copy OR a Const */
1439 node->type = pred->type;
1440 } /* compute_Confirm */
1443 * (Re-)compute the type for a given node.
1445 * @param node the node
1447 static void compute(node_t *node) {
1448 compute_func func = (compute_func)node->node->op->ops.generic;
1455 * Propagate constant evaluation.
1457 * @param env the environment
1459 static void propagate(environment_t *env) {
1462 lattice_elem_t old_type;
1467 while (env->cprop != NULL) {
1468 /* remove the first partition X from cprop */
1471 env->cprop = X->cprop_next;
1473 DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
1476 while (! list_empty(&X->cprop)) {
1477 /* remove the first Node x from X.cprop */
1478 x = list_entry(X->cprop.next, node_t, cprop_list);
1479 list_del(&x->cprop_list);
1482 /* compute a new type for x */
1484 DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
1486 if (x->type.tv != old_type.tv) {
1487 verify_type(old_type, x->type);
1488 DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
1490 if (x->on_fallen == 0) {
1491 /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
1492 not already on the list. */
1497 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
1499 for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
1500 ir_node *succ = get_irn_out(x->node, i);
1501 node_t *y = get_irn_node(succ);
1503 /* Add y to y.partition.cprop. */
1504 add_node_to_cprop(y, env);
1509 if (n_fallen > 0 && n_fallen != X->n_nodes) {
1510 DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
1511 Y = split(X, fallen, env);
1515 /* remove the nodes from the fallen list */
1516 for (x = fallen; x != NULL; x = x->next)
1525 * Get the leader for a given node from its congruence class.
1527 * @param irn the node
1529 static ir_node *get_leader(node_t *node) {
1530 partition_t *part = node->part;
1532 if (part->n_nodes > 1) {
1533 DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
1535 return get_first_node(part)->node;
1541 * Return non-zero if the control flow predecessor node pred
1542 * is the only reachable control flow exit of its block.
1544 * @param pred the control flow exit
1546 static int can_exchange(ir_node *pred) {
1549 else if (is_Jmp(pred))
1551 else if (get_irn_mode(pred) == mode_T) {
1554 /* if the predecessor block has more than one
1555 reachable outputs we cannot remove the block */
1557 for (i = get_irn_n_outs(pred) - 1; i >= 0; --i) {
1558 ir_node *proj = get_irn_out(pred, i);
1561 /* skip non-control flow Proj's */
1562 if (get_irn_mode(proj) != mode_X)
1565 node = get_irn_node(proj);
1566 if (node->type.tv == tarval_reachable) {
1577 * Block Post-Walker, apply the analysis results on control flow by
1578 * shortening Phi's and Block inputs.
1580 static void apply_cf(ir_node *block, void *ctx) {
1581 node_t *node = get_irn_node(block);
1583 ir_node **ins, **in_X;
1584 ir_node *phi, *next;
1587 if (block == get_irg_end_block(current_ir_graph) ||
1588 block == get_irg_start_block(current_ir_graph)) {
1589 /* the EndBlock is always reachable even if the analysis
1590 finds out the opposite :-) */
1593 if (node->type.tv == tarval_unreachable) {
1594 /* mark dead blocks */
1595 set_Block_dead(block);
1599 n = get_Block_n_cfgpreds(block);
1602 /* only one predecessor combine */
1603 ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
1605 if (can_exchange(pred))
1606 exchange(block, get_nodes_block(pred));
1610 NEW_ARR_A(ir_node *, in_X, n);
1612 for (i = 0; i < n; ++i) {
1613 ir_node *pred = get_Block_cfgpred(block, i);
1614 node_t *node = get_irn_node(pred);
1616 if (node->type.tv == tarval_reachable) {
1623 NEW_ARR_A(ir_node *, ins, n);
1624 for (phi = get_Block_phis(block); phi != NULL; phi = next) {
1625 node_t *node = get_irn_node(phi);
1627 next = get_Phi_next(phi);
1628 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
1629 /* this Phi is replaced by a constant */
1630 tarval *tv = node->type.tv;
1631 ir_node *c = new_r_Const(current_ir_graph, block, get_tarval_mode(tv), tv);
1633 set_irn_node(c, node);
1635 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
1639 for (i = 0; i < n; ++i) {
1640 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1642 if (pred->type.tv == tarval_reachable) {
1643 ins[j++] = get_Phi_pred(phi, i);
1647 /* this Phi is replaced by a single predecessor */
1648 ir_node *s = ins[0];
1651 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, s));
1654 set_irn_in(phi, j, ins);
1660 /* this Block has only one live predecessor */
1661 ir_node *pred = skip_Proj(in_X[0]);
1663 if (can_exchange(pred))
1664 exchange(block, get_nodes_block(pred));
1666 set_irn_in(block, k, in_X);
1671 * Post-Walker, apply the analysis results;
1673 static void apply_result(ir_node *irn, void *ctx) {
1674 node_t *node = get_irn_node(irn);
1677 if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
1678 /* blocks already handled, do not touch the End node */
1680 node_t *block = get_irn_node(get_nodes_block(irn));
1682 if (block->type.tv == tarval_unreachable) {
1683 ir_node *bad = get_irg_bad(current_ir_graph);
1685 /* here, bad might already have a node, but this can be safely ignored
1686 as long as bad has at least ONE valid node */
1687 set_irn_node(bad, node);
1689 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
1692 else if (node->type.tv == tarval_unreachable) {
1693 ir_node *bad = get_irg_bad(current_ir_graph);
1695 /* see comment above */
1696 set_irn_node(bad, node);
1698 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
1701 else if (get_irn_mode(irn) == mode_X) {
1704 ir_node *cond = get_Proj_pred(irn);
1706 if (is_Cond(cond)) {
1707 node_t *sel = get_irn_node(get_Cond_selector(cond));
1709 if (is_tarval(sel->type.tv) && tarval_is_constant(sel->type.tv)) {
1710 /* Cond selector is a constant, make a Jmp */
1711 ir_node *jmp = new_r_Jmp(current_ir_graph, block->node);
1712 set_irn_node(jmp, node);
1714 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
1720 /* normal data node */
1721 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
1722 tarval *tv = node->type.tv;
1724 if (! is_Const(irn)) {
1725 /* can be replaced by a constant */
1726 ir_node *c = new_r_Const(current_ir_graph, block->node, get_tarval_mode(tv), tv);
1727 set_irn_node(c, node);
1729 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
1732 } else if (is_entity(node->type.sym.entity_p)) {
1733 if (! is_SymConst(irn)) {
1734 /* can be replaced by a Symconst */
1735 ir_node *symc = new_r_SymConst(current_ir_graph, block->node, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
1736 set_irn_node(symc, node);
1739 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
1740 exchange(irn, symc);
1743 ir_node *leader = get_leader(node);
1745 if (leader != irn) {
1746 DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
1747 exchange(irn, leader);
1752 } /* apply_result */
1754 #define SET(code) op_##code->ops.generic = (op_func)compute_##code
1757 * sets the generic functions to compute.
1759 static void set_compute_functions(void) {
1762 /* set the default compute function */
1763 for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
1764 ir_op *op = get_irp_opcode(i);
1765 op->ops.generic = (op_func)default_compute;
1768 /* set specific functions */
1781 } /* set_compute_functions */
1783 static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
1784 ir_node *irn = local != NULL ? local : n;
1785 node_t *node = get_irn_node(irn);
1787 ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
1791 void combo(ir_graph *irg) {
1795 ir_graph *rem = current_ir_graph;
1797 current_ir_graph = irg;
1799 /* register a debug mask */
1800 FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
1801 //firm_dbg_set_mask(dbg, SET_LEVEL_3);
1803 DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
1805 obstack_init(&env.obst);
1806 env.worklist = NULL;
1810 #ifdef DEBUG_libfirm
1811 env.dbg_list = NULL;
1813 env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
1814 env.type2id_map = pmap_create();
1815 env.end_idx = get_opt_global_cse() ? 0 : -1;
1816 env.lambda_input = 0;
1818 assure_irg_outs(irg);
1820 /* we have our own value_of function */
1821 set_value_of_func(get_node_tarval);
1823 set_compute_functions();
1824 DEBUG_ONLY(part_nr = 0);
1826 /* create the initial partition and place it on the work list */
1827 env.initial = new_partition(&env);
1828 add_to_worklist(env.initial, &env);
1829 irg_walk_graph(irg, init_block_phis, create_initial_partitions, &env);
1831 /* Place the START Node's partition on cprop.
1832 Place the START Node on its local worklist. */
1833 initial_X = get_irg_initial_exec(irg);
1834 start = get_irn_node(initial_X);
1835 add_node_to_cprop(start, &env);
1839 if (env.worklist != NULL)
1841 } while (env.cprop != NULL || env.worklist != NULL);
1843 dump_all_partitions(&env);
1846 set_dump_node_vcgattr_hook(dump_partition_hook);
1847 dump_ir_block_graph(irg, "-partition");
1848 set_dump_node_vcgattr_hook(NULL);
1850 (void)dump_partition_hook;
1853 /* apply the result */
1854 irg_block_walk_graph(irg, NULL, apply_cf, &env);
1855 irg_walk_graph(irg, NULL, apply_result, &env);
1857 pmap_destroy(env.type2id_map);
1858 del_set(env.opcode2id_map);
1859 obstack_free(&env.obst, NULL);
1861 /* restore value_of() default behavior */
1862 set_value_of_func(NULL);
1863 current_ir_graph = rem;