2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Cliff Click's Combined Analysis/Optimization
23 * @author Michael Beck
26 * Note further that we use the terminology from Click's work here, which is different
27 * in some cases from Firm terminology. Especially, Click's type is a
28 * Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
36 #include "iroptimize.h"
44 #include "irgraph_t.h"
51 #include "iropt_dbg.h"
61 /* define this to check that all type translations are monotone */
62 #undef VERIFY_MONOTONE
64 /* define this to check the consistency of partitions */
65 #define CHECK_PARTITIONS
67 typedef struct node_t node_t;
68 typedef struct partition_t partition_t;
69 typedef struct opcode_key_t opcode_key_t;
70 typedef struct listmap_entry_t listmap_entry_t;
72 /** The type of the compute function. */
73 typedef void (*compute_func)(node_t *node);
79 ir_opcode code; /**< The Firm opcode. */
80 ir_mode *mode; /**< The mode of all nodes in the partition. */
81 int arity; /**< The arity of this opcode (needed for Phi etc. */
83 long proj; /**< For Proj nodes, its proj number */
84 ir_entity *ent; /**< For Sel Nodes, its entity */
89 * An entry in the list_map.
91 struct listmap_entry_t {
92 void *id; /**< The id. */
93 node_t *list; /**< The associated list for this id. */
94 listmap_entry_t *next; /**< Link to the next entry in the map. */
97 /** We must map id's to lists. */
98 typedef struct listmap_t {
99 set *map; /**< Map id's to listmap_entry_t's */
100 listmap_entry_t *values; /**< List of all values in the map. */
104 * A lattice element. Because we handle constants and symbolic constants different, we
105 * have to use this union.
116 ir_node *node; /**< The IR-node itself. */
117 list_head node_list; /**< Double-linked list of leader/follower entries. */
118 list_head cprop_list; /**< Double-linked partition.cprop list. */
119 partition_t *part; /**< points to the partition this node belongs to */
120 node_t *next; /**< Next node on local list (partition.touched, fallen). */
121 node_t *race_next; /**< Next node on race list. */
122 lattice_elem_t type; /**< The associated lattice element "type". */
123 int max_user_input; /**< Maximum input number of Def-Use edges. */
124 int next_edge; /**< Index of the next Def-Use edge to use. */
125 int n_followers; /**< Number of Follower in the outs set. */
126 unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
127 unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
128 unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
129 unsigned is_follower:1; /**< Set, if this node is a follower. */
130 unsigned by_all_const:1; /**< Set, if this node was once evaluated by all constants. */
131 unsigned flagged:2; /**< 2 Bits, set if this node was visited by race 1 or 2. */
135 * A partition containing congruent nodes.
138 list_head Leader; /**< The head of partition Leader node list. */
139 list_head Follower; /**< The head of partition Follower node list. */
140 list_head cprop; /**< The head of partition.cprop list. */
141 partition_t *wl_next; /**< Next entry in the work list if any. */
142 partition_t *touched_next; /**< Points to the next partition in the touched set. */
143 partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
144 partition_t *split_next; /**< Points to the next partition in the list that must be split by split_by(). */
145 node_t *touched; /**< The partition.touched set of this partition. */
146 unsigned n_leader; /**< Number of entries in this partition.Leader. */
147 unsigned n_touched; /**< Number of entries in the partition.touched. */
148 int max_user_inputs; /**< Maximum number of user inputs of all entries. */
149 unsigned on_worklist:1; /**< Set, if this partition is in the work list. */
150 unsigned on_touched:1; /**< Set, if this partition is on the touched set. */
151 unsigned on_cprop:1; /**< Set, if this partition is on the cprop list. */
152 unsigned type_is_T_or_C:1;/**< Set, if all nodes in this partition have type Top or Constant. */
154 partition_t *dbg_next; /**< Link all partitions for debugging */
155 unsigned nr; /**< A unique number for (what-)mapping, >0. */
159 typedef struct environment_t {
160 struct obstack obst; /**< obstack to allocate data structures. */
161 partition_t *worklist; /**< The work list. */
162 partition_t *cprop; /**< The constant propagation list. */
163 partition_t *touched; /**< the touched set. */
164 partition_t *initial; /**< The initial partition. */
165 set *opcode2id_map; /**< The opcodeMode->id map. */
166 pmap *type2id_map; /**< The type->id map. */
167 int end_idx; /**< -1 for local and 0 for global congruences. */
168 int lambda_input; /**< Captured argument for lambda_partition(). */
169 char nonstd_cond; /**< Set, if a Condb note has a non-Cmp predecessor. */
170 char modified; /**< Set, if the graph was modified. */
172 partition_t *dbg_list; /**< List of all partitions. */
176 /** Type of the what function. */
177 typedef void *(*what_func)(const node_t *node, environment_t *env);
179 #define get_irn_node(follower) ((node_t *)get_irn_link(follower))
180 #define set_irn_node(follower, node) set_irn_link(follower, node)
182 /* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
183 #undef tarval_unreachable
184 #define tarval_unreachable tarval_top
187 /** The debug module handle. */
188 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
190 /** Next partition number. */
191 DEBUG_ONLY(static unsigned part_nr = 0);
193 /** The tarval returned by Unknown nodes. */
194 static tarval *tarval_UNKNOWN;
197 static node_t *identity(node_t *node);
199 #ifdef CHECK_PARTITIONS
203 static void check_partition(const partition_t *T) {
207 list_for_each_entry(node_t, node, &T->Leader, node_list) {
208 assert(node->is_follower == 0);
209 assert(node->flagged == 0);
210 assert(node->part == T);
213 assert(n == T->n_leader);
215 list_for_each_entry(node_t, node, &T->Follower, node_list) {
216 assert(node->is_follower == 1);
217 assert(node->flagged == 0);
218 assert(node->part == T);
220 } /* check_partition */
222 static void check_all_partitions(environment_t *env) {
227 for (P = env->dbg_list; P != NULL; P = P->dbg_next) {
229 list_for_each_entry(node_t, node, &P->Follower, node_list) {
230 node_t *leader = identity(node);
232 assert(leader != node && leader->part == node->part);
241 static void do_check_list(const node_t *list, int ofs, const partition_t *Z) {
244 #define NEXT(e) *((const node_t **)((char *)(e) + (ofs)))
245 for (e = list; e != NULL; e = NEXT(e)) {
246 assert(e->part == Z);
249 } /* ido_check_list */
252 * Check a local list.
254 static void check_list(const node_t *list, const partition_t *Z) {
255 do_check_list(list, offsetof(node_t, next), Z);
259 #define check_partition(T)
260 #define check_list(list, Z)
261 #define check_all_partitions(env)
262 #endif /* CHECK_PARTITIONS */
265 static INLINE lattice_elem_t get_partition_type(const partition_t *X);
268 * Dump partition to output.
270 static void dump_partition(const char *msg, const partition_t *part) {
273 lattice_elem_t type = get_partition_type(part);
275 DB((dbg, LEVEL_2, "%s part%u%s (%u, %+F) {\n ",
276 msg, part->nr, part->type_is_T_or_C ? "*" : "",
277 part->n_leader, type));
278 list_for_each_entry(node_t, node, &part->Leader, node_list) {
279 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
282 if (! list_empty(&part->Follower)) {
283 DB((dbg, LEVEL_2, "\n---\n "));
285 list_for_each_entry(node_t, node, &part->Follower, node_list) {
286 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
290 DB((dbg, LEVEL_2, "\n}\n"));
291 } /* dump_partition */
296 static void do_dump_list(const char *msg, const node_t *node, int ofs) {
300 #define GET_LINK(p, ofs) *((const node_t **)((char *)(p) + (ofs)))
302 DB((dbg, LEVEL_3, "%s = {\n ", msg));
303 for (p = node; p != NULL; p = GET_LINK(p, ofs)) {
304 DB((dbg, LEVEL_3, "%s%+F", first ? "" : ", ", p->node));
307 DB((dbg, LEVEL_3, "\n}\n"));
315 static void dump_race_list(const char *msg, const node_t *list) {
316 do_dump_list(msg, list, offsetof(node_t, race_next));
320 * Dumps a local list.
322 static void dump_list(const char *msg, const node_t *list) {
323 do_dump_list(msg, list, offsetof(node_t, next));
327 * Dump all partitions.
329 static void dump_all_partitions(const environment_t *env) {
330 const partition_t *P;
332 DB((dbg, LEVEL_2, "All partitions\n===============\n"));
333 for (P = env->dbg_list; P != NULL; P = P->dbg_next)
334 dump_partition("", P);
338 #define dump_partition(msg, part)
339 #define dump_race_list(msg, list)
340 #define dump_list(msg, list)
341 #define dump_all_partitions(env)
344 #if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
346 * Verify that a type transition is monotone
348 static void verify_type(const lattice_elem_t old_type, const lattice_elem_t new_type) {
349 if (old_type.tv == new_type.tv) {
353 if (old_type.tv == tarval_top) {
354 /* from Top down-to is always allowed */
357 if (old_type.tv == tarval_reachable) {
358 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
360 if (new_type.tv == tarval_bottom || new_type.tv == tarval_reachable) {
364 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
367 #define verify_type(old_type, new_type)
371 * Compare two pointer values of a listmap.
373 static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
374 const listmap_entry_t *e1 = elt;
375 const listmap_entry_t *e2 = key;
378 return e1->id != e2->id;
379 } /* listmap_cmp_ptr */
382 * Initializes a listmap.
384 * @param map the listmap
386 static void listmap_init(listmap_t *map) {
387 map->map = new_set(listmap_cmp_ptr, 16);
392 * Terminates a listmap.
394 * @param map the listmap
396 static void listmap_term(listmap_t *map) {
401 * Return the associated listmap entry for a given id.
403 * @param map the listmap
404 * @param id the id to search for
406 * @return the asociated listmap entry for the given id
408 static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
409 listmap_entry_t key, *entry;
414 entry = set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
416 if (entry->list == NULL) {
417 /* a new entry, put into the list */
418 entry->next = map->values;
425 * Calculate the hash value for an opcode map entry.
427 * @param entry an opcode map entry
429 * @return a hash value for the given opcode map entry
431 static unsigned opcode_hash(const opcode_key_t *entry) {
432 return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent);
436 * Compare two entries in the opcode map.
438 static int cmp_opcode(const void *elt, const void *key, size_t size) {
439 const opcode_key_t *o1 = elt;
440 const opcode_key_t *o2 = key;
443 return o1->code != o2->code || o1->mode != o2->mode ||
444 o1->arity != o2->arity ||
445 o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent;
449 * Compare two Def-Use edges for input position.
451 static int cmp_def_use_edge(const void *a, const void *b) {
452 const ir_def_use_edge *ea = a;
453 const ir_def_use_edge *eb = b;
455 /* no overrun, because range is [-1, MAXINT] */
456 return ea->pos - eb->pos;
457 } /* cmp_def_use_edge */
460 * We need the Def-Use edges sorted.
462 static void sort_irn_outs(node_t *node) {
463 ir_node *irn = node->node;
464 int n_outs = get_irn_n_outs(irn);
467 qsort(&irn->out[1], n_outs, sizeof(irn->out[0]), cmp_def_use_edge);
469 node->max_user_input = irn->out[n_outs].pos;
470 } /* sort_irn_outs */
473 * Return the type of a node.
475 * @param irn an IR-node
477 * @return the associated type of this node
479 static INLINE lattice_elem_t get_node_type(const ir_node *irn) {
480 return get_irn_node(irn)->type;
481 } /* get_node_type */
484 * Return the tarval of a node.
486 * @param irn an IR-node
488 * @return the associated type of this node
490 static INLINE tarval *get_node_tarval(const ir_node *irn) {
491 lattice_elem_t type = get_node_type(irn);
493 if (is_tarval(type.tv))
495 return tarval_bottom;
496 } /* get_node_type */
499 * Add a partition to the worklist.
501 static INLINE void add_to_worklist(partition_t *X, environment_t *env) {
502 assert(X->on_worklist == 0);
503 X->wl_next = env->worklist;
506 } /* add_to_worklist */
509 * Create a new empty partition.
511 * @param env the environment
513 * @return a newly allocated partition
515 static INLINE partition_t *new_partition(environment_t *env) {
516 partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
518 INIT_LIST_HEAD(&part->Leader);
519 INIT_LIST_HEAD(&part->Follower);
520 INIT_LIST_HEAD(&part->cprop);
521 part->wl_next = NULL;
522 part->touched_next = NULL;
523 part->cprop_next = NULL;
524 part->split_next = NULL;
525 part->touched = NULL;
528 part->max_user_inputs = 0;
529 part->on_worklist = 0;
530 part->on_touched = 0;
532 part->type_is_T_or_C = 0;
534 part->dbg_next = env->dbg_list;
535 env->dbg_list = part;
536 part->nr = part_nr++;
540 } /* new_partition */
543 * Get the first node from a partition.
545 static INLINE node_t *get_first_node(const partition_t *X) {
546 return list_entry(X->Leader.next, node_t, node_list);
547 } /* get_first_node */
550 * Return the type of a partition (assuming partition is non-empty and
551 * all elements have the same type).
553 * @param X a partition
555 * @return the type of the first element of the partition
557 static INLINE lattice_elem_t get_partition_type(const partition_t *X) {
558 const node_t *first = get_first_node(X);
560 } /* get_partition_type */
563 * Creates a partition node for the given IR-node and place it
564 * into the given partition.
566 * @param irn an IR-node
567 * @param part a partition to place the node in
568 * @param env the environment
570 * @return the created node
572 static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env) {
573 /* create a partition node and place it in the partition */
574 node_t *node = obstack_alloc(&env->obst, sizeof(*node));
576 INIT_LIST_HEAD(&node->node_list);
577 INIT_LIST_HEAD(&node->cprop_list);
581 node->race_next = NULL;
582 node->type.tv = tarval_top;
583 node->max_user_input = 0;
585 node->n_followers = 0;
586 node->on_touched = 0;
589 node->is_follower = 0;
590 node->by_all_const = 0;
592 set_irn_node(irn, node);
594 list_add_tail(&node->node_list, &part->Leader);
598 } /* create_partition_node */
601 * Pre-Walker, init all Block-Phi lists.
603 static void init_block_phis(ir_node *irn, void *env) {
607 set_Block_phis(irn, NULL);
609 } /* init_block_phis */
612 * Post-Walker, initialize all Nodes' type to U or top and place
613 * all nodes into the TOP partition.
615 static void create_initial_partitions(ir_node *irn, void *ctx) {
616 environment_t *env = ctx;
617 partition_t *part = env->initial;
620 node = create_partition_node(irn, part, env);
622 if (node->max_user_input > part->max_user_inputs)
623 part->max_user_inputs = node->max_user_input;
626 add_Block_phi(get_nodes_block(irn), irn);
627 } else if (is_Cond(irn)) {
628 /* check if all Cond's have a Cmp predecessor. */
629 if (get_irn_mode(irn) == mode_b && !is_Cmp(skip_Proj(get_Cond_selector(irn))))
630 env->nonstd_cond = 1;
633 } /* create_initial_partitions */
636 * Add a node to the entry.partition.touched set and
637 * node->partition to the touched set if not already there.
640 * @param env the environment
642 static INLINE void add_to_touched(node_t *y, environment_t *env) {
643 if (y->on_touched == 0) {
644 partition_t *part = y->part;
646 y->next = part->touched;
651 if (part->on_touched == 0) {
652 part->touched_next = env->touched;
654 part->on_touched = 1;
657 check_list(part->touched, part);
659 } /* add_to_touched */
662 * Place a node on the cprop list.
665 * @param env the environment
667 static void add_to_cprop(node_t *y, environment_t *env) {
668 /* Add y to y.partition.cprop. */
669 if (y->on_cprop == 0) {
670 partition_t *Y = y->part;
672 list_add_tail(&y->cprop_list, &Y->cprop);
675 DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
677 /* place its partition on the cprop list */
678 if (Y->on_cprop == 0) {
679 Y->cprop_next = env->cprop;
684 if (get_irn_mode(y->node) == mode_T) {
685 /* mode_T nodes always produce tarval_bottom, so we must explicitly
686 add it's Proj's to get constant evaluation to work */
689 for (i = get_irn_n_outs(y->node) - 1; i >= 0; --i) {
690 node_t *proj = get_irn_node(get_irn_out(y->node, i));
692 add_to_cprop(proj, env);
694 } else if (is_Block(y->node)) {
695 /* Due to the way we handle Phi's, we must place all Phis of a block on the list
696 * if someone placed the block. The Block is only placed if the reachability
697 * changes, and this must be re-evaluated in compute_Phi(). */
699 for (phi = get_Block_phis(y->node); phi != NULL; phi = get_Phi_next(phi)) {
700 node_t *p = get_irn_node(phi);
701 add_to_cprop(p, env);
707 * Update the worklist: If Z is on worklist then add Z' to worklist.
708 * Else add the smaller of Z and Z' to worklist.
710 * @param Z the Z partition
711 * @param Z_prime the Z' partition, a previous part of Z
712 * @param env the environment
714 static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env) {
715 if (Z->on_worklist || Z_prime->n_leader < Z->n_leader) {
716 add_to_worklist(Z_prime, env);
718 add_to_worklist(Z, env);
720 } /* update_worklist */
723 * Make all inputs to x no longer be F.def_use edges.
727 static void move_edges_to_leader(node_t *x) {
728 ir_node *irn = x->node;
731 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
732 node_t *pred = get_irn_node(get_irn_n(irn, i));
737 n = get_irn_n_outs(p);
738 for (j = 1; j <= pred->n_followers; ++j) {
739 if (p->out[j].pos == i && p->out[j].use == irn) {
740 /* found a follower edge to x, move it to the Leader */
741 ir_def_use_edge edge = p->out[j];
743 /* remove this edge from the Follower set */
744 p->out[j] = p->out[pred->n_followers];
747 /* sort it into the leader set */
748 for (k = pred->n_followers + 2; k <= n; ++k) {
749 if (p->out[k].pos >= edge.pos)
751 p->out[k - 1] = p->out[k];
753 /* place the new edge here */
754 p->out[k - 1] = edge;
756 /* edge found and moved */
761 } /* move_edges_to_leader */
764 * Split a partition that has NO followers by a local list.
766 * @param Z partition to split
767 * @param g a (non-empty) node list
768 * @param env the environment
770 * @return a new partition containing the nodes of g
772 static partition_t *split_no_followers(partition_t *Z, node_t *g, environment_t *env) {
773 partition_t *Z_prime;
778 dump_partition("Splitting ", Z);
779 dump_list("by list ", g);
783 /* Remove g from Z. */
784 for (node = g; node != NULL; node = node->next) {
785 assert(node->part == Z);
786 list_del(&node->node_list);
789 assert(n < Z->n_leader);
792 /* Move g to a new partition, Z'. */
793 Z_prime = new_partition(env);
795 for (node = g; node != NULL; node = node->next) {
796 list_add_tail(&node->node_list, &Z_prime->Leader);
797 node->part = Z_prime;
798 if (node->max_user_input > max_input)
799 max_input = node->max_user_input;
801 Z_prime->max_user_inputs = max_input;
802 Z_prime->n_leader = n;
805 check_partition(Z_prime);
807 /* for now, copy the type info tag, it will be adjusted in split_by(). */
808 Z_prime->type_is_T_or_C = Z->type_is_T_or_C;
810 update_worklist(Z, Z_prime, env);
812 dump_partition("Now ", Z);
813 dump_partition("Created new ", Z_prime);
815 } /* split_no_followers */
818 * Make the Follower -> Leader transition for a node.
822 static void follower_to_leader(node_t *n) {
823 assert(n->is_follower == 1);
825 DB((dbg, LEVEL_2, "%+F make the follower -> leader transition\n", n->node));
827 move_edges_to_leader(n);
828 list_del(&n->node_list);
829 list_add_tail(&n->node_list, &n->part->Leader);
831 } /* follower_to_leader */
834 * The environment for one race step.
836 typedef struct step_env {
837 node_t *initial; /**< The initial node list. */
838 node_t *unwalked; /**< The unwalked node list. */
839 node_t *walked; /**< The walked node list. */
840 int index; /**< Next index of Follower use_def edge. */
841 unsigned side; /**< side number. */
845 * Return non-zero, if a input is a real follower
847 * @param irn the node to check
848 * @param input number of the input
850 static int is_real_follower(const ir_node *irn, int input) {
853 switch (get_irn_opcode(irn)) {
856 /* ignore the Confirm bound input */
862 /* ignore the Mux sel input */
867 /* dead inputs are not follower edges */
868 ir_node *block = get_nodes_block(irn);
869 node_t *pred = get_irn_node(get_Block_cfgpred(block, input));
871 if (pred->type.tv == tarval_unreachable)
881 /* only a Sub x,0 / Shift x,0 might be a follower */
888 pred = get_irn_node(get_irn_n(irn, input));
889 if (is_tarval(pred->type.tv) && tarval_is_null(pred->type.tv))
893 pred = get_irn_node(get_irn_n(irn, input));
894 if (is_tarval(pred->type.tv) && tarval_is_one(pred->type.tv))
898 pred = get_irn_node(get_irn_n(irn, input));
899 if (is_tarval(pred->type.tv) && tarval_is_all_one(pred->type.tv))
904 /* all inputs are followers */
907 assert(!"opcode not implemented yet");
914 * Do one step in the race.
916 static int step(step_env *env) {
919 if (env->initial != NULL) {
920 /* Move node from initial to unwalked */
922 env->initial = n->race_next;
924 n->race_next = env->unwalked;
930 while (env->unwalked != NULL) {
931 /* let n be the first node in unwalked */
933 while (env->index < n->n_followers) {
934 const ir_def_use_edge *edge = &n->node->out[1 + env->index];
936 /* let m be n.F.def_use[index] */
937 node_t *m = get_irn_node(edge->use);
939 assert(m->is_follower);
941 * Some inputs, like the get_Confirm_bound are NOT
942 * real followers, sort them out.
944 if (! is_real_follower(m->node, edge->pos)) {
950 /* only followers from our partition */
951 if (m->part != n->part)
954 if ((m->flagged & env->side) == 0) {
955 m->flagged |= env->side;
957 if (m->flagged != 3) {
958 /* visited the first time */
959 /* add m to unwalked not as first node (we might still need to
960 check for more follower node */
961 m->race_next = n->race_next;
965 /* else already visited by the other side and on the other list */
968 /* move n to walked */
969 env->unwalked = n->race_next;
970 n->race_next = env->walked;
978 * Clear the flags from a list and check for
979 * nodes that where touched from both sides.
981 * @param list the list
983 static int clear_flags(node_t *list) {
987 for (n = list; n != NULL; n = n->race_next) {
988 if (n->flagged == 3) {
989 /* we reach a follower from both sides, this will split congruent
990 * inputs and make it a leader. */
991 follower_to_leader(n);
1000 * Split a partition by a local list using the race.
1002 * @param pX pointer to the partition to split, might be changed!
1003 * @param gg a (non-empty) node list
1004 * @param env the environment
1006 * @return a new partition containing the nodes of gg
1008 static partition_t *split(partition_t **pX, node_t *gg, environment_t *env) {
1009 partition_t *X = *pX;
1010 partition_t *X_prime;
1012 step_env env1, env2, *winner;
1013 node_t *g, *h, *node, *t;
1014 int max_input, transitions;
1016 DEBUG_ONLY(static int run = 0;)
1018 DB((dbg, LEVEL_2, "Run %d ", run++));
1019 if (list_empty(&X->Follower)) {
1020 /* if the partition has NO follower, we can use the fast
1021 splitting algorithm. */
1022 return split_no_followers(X, gg, env);
1024 /* else do the race */
1026 dump_partition("Splitting ", X);
1027 dump_list("by list ", gg);
1029 INIT_LIST_HEAD(&tmp);
1031 /* Remove gg from X.Leader and put into g */
1033 for (node = gg; node != NULL; node = node->next) {
1034 assert(node->part == X);
1035 assert(node->is_follower == 0);
1037 list_del(&node->node_list);
1038 list_add_tail(&node->node_list, &tmp);
1039 node->race_next = g;
1044 list_for_each_entry(node_t, node, &X->Leader, node_list) {
1045 node->race_next = h;
1048 /* restore X.Leader */
1049 list_splice(&tmp, &X->Leader);
1052 env1.unwalked = NULL;
1058 env2.unwalked = NULL;
1073 assert(winner->initial == NULL);
1074 assert(winner->unwalked == NULL);
1076 /* clear flags from walked/unwalked */
1077 transitions = clear_flags(env1.unwalked);
1078 transitions |= clear_flags(env1.walked);
1079 transitions |= clear_flags(env2.unwalked);
1080 transitions |= clear_flags(env2.walked);
1082 dump_race_list("winner ", winner->walked);
1084 /* Move walked_{winner} to a new partition, X'. */
1085 X_prime = new_partition(env);
1088 for (node = winner->walked; node != NULL; node = node->race_next) {
1089 list_del(&node->node_list);
1090 node->part = X_prime;
1091 if (node->is_follower) {
1092 list_add_tail(&node->node_list, &X_prime->Follower);
1094 list_add_tail(&node->node_list, &X_prime->Leader);
1097 if (node->max_user_input > max_input)
1098 max_input = node->max_user_input;
1100 X_prime->n_leader = n;
1101 X_prime->max_user_inputs = max_input;
1102 X->n_leader -= X_prime->n_leader;
1104 /* for now, copy the type info tag, it will be adjusted in split_by(). */
1105 X_prime->type_is_T_or_C = X->type_is_T_or_C;
1108 * Even if a follower was not checked by both sides, it might have
1109 * loose its congruence, so we need to check this case for all follower.
1111 list_for_each_entry_safe(node_t, node, t, &X_prime->Follower, node_list) {
1112 if (identity(node) == node) {
1113 follower_to_leader(node);
1119 check_partition(X_prime);
1121 /* X' is the smaller part */
1122 add_to_worklist(X_prime, env);
1125 * If there where follower to leader transitions, ensure that the nodes
1126 * can be split out if necessary.
1129 /* place partitions on the cprop list */
1130 if (X_prime->on_cprop == 0) {
1131 X_prime->cprop_next = env->cprop;
1132 env->cprop = X_prime;
1133 X_prime->on_cprop = 1;
1137 dump_partition("Now ", X);
1138 dump_partition("Created new ", X_prime);
1140 /* we have to ensure that the partition containing g is returned */
1141 if (winner == &env2) {
1150 * Returns non-zero if the i'th input of a Phi node is live.
1152 * @param phi a Phi-node
1153 * @param i an input number
1155 * @return non-zero if the i'th input of the given Phi node is live
1157 static int is_live_input(ir_node *phi, int i) {
1159 ir_node *block = get_nodes_block(phi);
1160 ir_node *pred = get_Block_cfgpred(block, i);
1161 lattice_elem_t type = get_node_type(pred);
1163 return type.tv != tarval_unreachable;
1165 /* else it's the control input, always live */
1167 } /* is_live_input */
1170 * Return non-zero if a type is a constant.
1172 static int is_constant_type(lattice_elem_t type) {
1173 if (type.tv != tarval_bottom && type.tv != tarval_top)
1176 } /* is_constant_type */
1179 * Check whether a type is neither Top or a constant.
1180 * Note: U is handled like Top here, R is a constant.
1182 * @param type the type to check
1184 static int type_is_neither_top_nor_const(const lattice_elem_t type) {
1185 if (is_tarval(type.tv)) {
1186 if (type.tv == tarval_top)
1188 if (tarval_is_constant(type.tv))
1198 * Collect nodes to the touched list.
1200 * @param list the list which contains the nodes that must be evaluated
1201 * @param idx the index of the def_use edge to evaluate
1202 * @param env the environment
1204 static void collect_touched(list_head *list, int idx, environment_t *env) {
1206 int end_idx = env->end_idx;
1208 list_for_each_entry(node_t, x, list, node_list) {
1212 /* leader edges start AFTER follower edges */
1213 x->next_edge = x->n_followers + 1;
1215 num_edges = get_irn_n_outs(x->node);
1217 /* for all edges in x.L.def_use_{idx} */
1218 while (x->next_edge <= num_edges) {
1219 const ir_def_use_edge *edge = &x->node->out[x->next_edge];
1222 /* check if we have necessary edges */
1223 if (edge->pos > idx)
1230 /* ignore the "control input" for non-pinned nodes
1231 if we are running in GCSE mode */
1232 if (idx < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
1235 y = get_irn_node(succ);
1236 assert(get_irn_n(succ, idx) == x->node);
1238 /* ignore block edges touching followers */
1239 if (idx == -1 && y->is_follower)
1242 if (is_constant_type(y->type)) {
1243 ir_opcode code = get_irn_opcode(succ);
1244 if (code == iro_Sub || code == iro_Eor || code == iro_Cmp)
1245 add_to_cprop(y, env);
1248 /* Partitions of constants should not be split simply because their Nodes have unequal
1249 functions or incongruent inputs. */
1250 if (type_is_neither_top_nor_const(y->type) &&
1251 (! is_Phi(y->node) || is_live_input(y->node, idx))) {
1252 add_to_touched(y, env);
1256 } /* collect_touched */
1259 * Split the partitions if caused by the first entry on the worklist.
1261 * @param env the environment
1263 static void cause_splits(environment_t *env) {
1264 partition_t *X, *Z, *N;
1267 /* remove the first partition from the worklist */
1269 env->worklist = X->wl_next;
1272 dump_partition("Cause_split: ", X);
1274 /* combine temporary leader and follower list */
1275 for (idx = -1; idx <= X->max_user_inputs; ++idx) {
1276 /* empty the touched set: already done, just clear the list */
1277 env->touched = NULL;
1279 collect_touched(&X->Leader, idx, env);
1280 collect_touched(&X->Follower, idx, env);
1282 for (Z = env->touched; Z != NULL; Z = N) {
1284 node_t *touched = Z->touched;
1285 unsigned n_touched = Z->n_touched;
1287 assert(Z->touched != NULL);
1289 /* beware, split might change Z */
1290 N = Z->touched_next;
1292 /* remove it from the touched set */
1295 /* Empty local Z.touched. */
1296 for (e = touched; e != NULL; e = e->next) {
1297 assert(e->is_follower == 0);
1303 if (0 < n_touched && n_touched < Z->n_leader) {
1304 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1305 split(&Z, touched, env);
1307 assert(n_touched <= Z->n_leader);
1310 } /* cause_splits */
1313 * Implements split_by_what(): Split a partition by characteristics given
1314 * by the what function.
1316 * @param X the partition to split
1317 * @param What a function returning an Id for every node of the partition X
1318 * @param P a list to store the result partitions
1319 * @param env the environment
1323 static partition_t *split_by_what(partition_t *X, what_func What,
1324 partition_t **P, environment_t *env) {
1327 listmap_entry_t *iter;
1330 /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
1332 list_for_each_entry(node_t, x, &X->Leader, node_list) {
1333 void *id = What(x, env);
1334 listmap_entry_t *entry;
1337 /* input not allowed, ignore */
1340 /* Add x to map[What(x)]. */
1341 entry = listmap_find(&map, id);
1342 x->next = entry->list;
1345 /* Let P be a set of Partitions. */
1347 /* for all sets S except one in the range of map do */
1348 for (iter = map.values; iter != NULL; iter = iter->next) {
1349 if (iter->next == NULL) {
1350 /* this is the last entry, ignore */
1355 /* Add SPLIT( X, S ) to P. */
1356 DB((dbg, LEVEL_2, "Split part%d by what\n", X->nr));
1357 R = split(&X, S, env);
1367 } /* split_by_what */
1369 /** lambda n.(n.type) */
1370 static void *lambda_type(const node_t *node, environment_t *env) {
1372 return node->type.tv;
1375 /** lambda n.(n.opcode) */
1376 static void *lambda_opcode(const node_t *node, environment_t *env) {
1377 opcode_key_t key, *entry;
1378 ir_node *irn = node->node;
1380 key.code = get_irn_opcode(irn);
1381 key.mode = get_irn_mode(irn);
1382 key.arity = get_irn_arity(irn);
1386 switch (get_irn_opcode(irn)) {
1388 key.u.proj = get_Proj_proj(irn);
1391 key.u.ent = get_Sel_entity(irn);
1397 entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
1399 } /* lambda_opcode */
1401 /** lambda n.(n[i].partition) */
1402 static void *lambda_partition(const node_t *node, environment_t *env) {
1403 ir_node *skipped = skip_Proj(node->node);
1406 int i = env->lambda_input;
1408 if (i >= get_irn_arity(node->node)) {
1409 /* we are outside the allowed range */
1413 /* ignore the "control input" for non-pinned nodes
1414 if we are running in GCSE mode */
1415 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1418 pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
1419 p = get_irn_node(pred);
1422 } /* lambda_partition */
1425 * Returns true if a type is a constant.
1427 static int is_con(const lattice_elem_t type) {
1428 /* be conservative */
1429 if (is_tarval(type.tv))
1430 return tarval_is_constant(type.tv);
1431 return is_entity(type.sym.entity_p);
1435 * Implements split_by().
1437 * @param X the partition to split
1438 * @param env the environment
1440 static void split_by(partition_t *X, environment_t *env) {
1441 partition_t *I, *P = NULL;
1444 dump_partition("split_by", X);
1446 if (X->n_leader == 1) {
1447 /* we have only one leader, no need to split, just check it's type */
1448 node_t *x = get_first_node(X);
1449 X->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1453 DB((dbg, LEVEL_2, "WHAT = lambda n.(n.type) on part%d\n", X->nr));
1454 P = split_by_what(X, lambda_type, &P, env);
1456 /* adjust the type tags, we have split partitions by type */
1457 for (I = P; I != NULL; I = I->split_next) {
1458 node_t *x = get_first_node(I);
1459 I->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1466 if (Y->n_leader > 1) {
1467 /* we do not want split the TOP or constant partitions */
1468 if (! Y->type_is_T_or_C) {
1469 partition_t *Q = NULL;
1471 DB((dbg, LEVEL_2, "WHAT = lambda n.(n.opcode) on part%d\n", Y->nr));
1472 Q = split_by_what(Y, lambda_opcode, &Q, env);
1478 if (Z->n_leader > 1) {
1479 const node_t *first = get_first_node(Z);
1480 int arity = get_irn_arity(first->node);
1484 * BEWARE: during splitting by input 2 for instance we might
1485 * create new partitions which are different by input 1, so collect
1486 * them and split further.
1488 Z->split_next = NULL;
1491 for (input = arity - 1; input >= -1; --input) {
1493 partition_t *Z_prime = R;
1496 if (Z_prime->n_leader > 1) {
1497 env->lambda_input = input;
1498 DB((dbg, LEVEL_2, "WHAT = lambda n.(n[%d].partition) on part%d\n", input, Z_prime->nr));
1499 S = split_by_what(Z_prime, lambda_partition, &S, env);
1501 Z_prime->split_next = S;
1504 } while (R != NULL);
1509 } while (Q != NULL);
1512 } while (P != NULL);
1516 * (Re-)compute the type for a given node.
1518 * @param node the node
1520 static void default_compute(node_t *node) {
1522 ir_node *irn = node->node;
1523 node_t *block = get_irn_node(get_nodes_block(irn));
1525 if (block->type.tv == tarval_unreachable) {
1526 node->type.tv = tarval_top;
1530 /* if any of the data inputs have type top, the result is type top */
1531 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
1532 ir_node *pred = get_irn_n(irn, i);
1533 node_t *p = get_irn_node(pred);
1535 if (p->type.tv == tarval_top) {
1536 node->type.tv = tarval_top;
1541 if (get_irn_mode(node->node) == mode_X)
1542 node->type.tv = tarval_reachable;
1544 node->type.tv = computed_value(irn);
1545 } /* default_compute */
1548 * (Re-)compute the type for a Block node.
1550 * @param node the node
1552 static void compute_Block(node_t *node) {
1554 ir_node *block = node->node;
1556 if (block == get_irg_start_block(current_ir_graph)) {
1557 /* start block is always reachable */
1558 node->type.tv = tarval_reachable;
1562 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1563 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1565 if (pred->type.tv == tarval_reachable) {
1566 /* A block is reachable, if at least of predecessor is reachable. */
1567 node->type.tv = tarval_reachable;
1571 node->type.tv = tarval_top;
1572 } /* compute_Block */
1575 * (Re-)compute the type for a Bad node.
1577 * @param node the node
1579 static void compute_Bad(node_t *node) {
1580 /* Bad nodes ALWAYS compute Top */
1581 node->type.tv = tarval_top;
1585 * (Re-)compute the type for an Unknown node.
1587 * @param node the node
1589 static void compute_Unknown(node_t *node) {
1590 /* While Unknown nodes should compute Top this is dangerous:
1591 * a Top input to a Cond would lead to BOTH control flows unreachable.
1592 * While this is correct in the given semantics, it would destroy the Firm
1595 * It would be safe to compute Top IF it can be assured, that only Cmp
1596 * nodes are inputs to Conds. We check that first.
1597 * This is the way Frontends typically build Firm, but some optimizations
1598 * (cond_eval for instance) might replace them by Phib's...
1600 node->type.tv = tarval_UNKNOWN;
1601 } /* compute_Unknown */
1604 * (Re-)compute the type for a Jmp node.
1606 * @param node the node
1608 static void compute_Jmp(node_t *node) {
1609 node_t *block = get_irn_node(get_nodes_block(node->node));
1611 node->type = block->type;
1615 * (Re-)compute the type for the End node.
1617 * @param node the node
1619 static void compute_End(node_t *node) {
1620 /* the End node is NOT dead of course */
1621 node->type.tv = tarval_reachable;
1625 * (Re-)compute the type for a SymConst node.
1627 * @param node the node
1629 static void compute_SymConst(node_t *node) {
1630 ir_node *irn = node->node;
1631 node_t *block = get_irn_node(get_nodes_block(irn));
1633 if (block->type.tv == tarval_unreachable) {
1634 node->type.tv = tarval_top;
1637 switch (get_SymConst_kind(irn)) {
1638 case symconst_addr_ent:
1639 /* case symconst_addr_name: cannot handle this yet */
1640 node->type.sym = get_SymConst_symbol(irn);
1643 node->type.tv = computed_value(irn);
1645 } /* compute_SymConst */
1648 * (Re-)compute the type for a Phi node.
1650 * @param node the node
1652 static void compute_Phi(node_t *node) {
1654 ir_node *phi = node->node;
1655 lattice_elem_t type;
1657 /* if a Phi is in a unreachable block, its type is TOP */
1658 node_t *block = get_irn_node(get_nodes_block(phi));
1660 if (block->type.tv == tarval_unreachable) {
1661 node->type.tv = tarval_top;
1665 /* Phi implements the Meet operation */
1666 type.tv = tarval_top;
1667 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
1668 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
1669 node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
1671 if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
1672 /* ignore TOP inputs: We must check here for unreachable blocks,
1673 because Firm constants live in the Start Block are NEVER Top.
1674 Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
1675 comes from a unreachable input. */
1678 if (pred->type.tv == tarval_bottom) {
1679 node->type.tv = tarval_bottom;
1681 } else if (type.tv == tarval_top) {
1682 /* first constant found */
1684 } else if (type.tv != pred->type.tv) {
1685 /* different constants or tarval_bottom */
1686 node->type.tv = tarval_bottom;
1689 /* else nothing, constants are the same */
1695 * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
1697 * @param node the node
1699 static void compute_Add(node_t *node) {
1700 ir_node *sub = node->node;
1701 node_t *l = get_irn_node(get_Add_left(sub));
1702 node_t *r = get_irn_node(get_Add_right(sub));
1703 lattice_elem_t a = l->type;
1704 lattice_elem_t b = r->type;
1707 if (a.tv == tarval_top || b.tv == tarval_top) {
1708 node->type.tv = tarval_top;
1709 } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
1710 node->type.tv = tarval_bottom;
1712 /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
1713 must call tarval_add() first to handle this case! */
1714 if (is_tarval(a.tv)) {
1715 if (is_tarval(b.tv)) {
1716 node->type.tv = tarval_add(a.tv, b.tv);
1719 mode = get_tarval_mode(a.tv);
1720 if (a.tv == get_mode_null(mode)) {
1724 } else if (is_tarval(b.tv)) {
1725 mode = get_tarval_mode(b.tv);
1726 if (b.tv == get_mode_null(mode)) {
1731 node->type.tv = tarval_bottom;
1736 * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
1738 * @param node the node
1740 static void compute_Sub(node_t *node) {
1741 ir_node *sub = node->node;
1742 node_t *l = get_irn_node(get_Sub_left(sub));
1743 node_t *r = get_irn_node(get_Sub_right(sub));
1744 lattice_elem_t a = l->type;
1745 lattice_elem_t b = r->type;
1748 if (a.tv == tarval_top || b.tv == tarval_top) {
1749 node->type.tv = tarval_top;
1750 } else if (is_con(a) && is_con(b)) {
1751 if (is_tarval(a.tv) && is_tarval(b.tv)) {
1752 node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
1753 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
1755 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
1758 node->type.tv = tarval_bottom;
1760 node->by_all_const = 1;
1761 } else if (r->part == l->part &&
1762 (!mode_is_float(get_irn_mode(l->node)))) {
1764 * BEWARE: a - a is NOT always 0 for floating Point values, as
1765 * NaN op NaN = NaN, so we must check this here.
1767 ir_mode *mode = get_irn_mode(sub);
1768 tv = get_mode_null(mode);
1770 /* if the node was ONCE evaluated by all constants, but now
1771 this breakes AND we cat by partition a different result, switch to bottom.
1772 This happens because initially all nodes are in the same partition ... */
1773 if (node->by_all_const && node->type.tv != tv)
1777 node->type.tv = tarval_bottom;
1782 * (Re-)compute the type for an Eor. Special case: both nodes are congruent.
1784 * @param node the node
1786 static void compute_Eor(node_t *node) {
1787 ir_node *eor = node->node;
1788 node_t *l = get_irn_node(get_Eor_left(eor));
1789 node_t *r = get_irn_node(get_Eor_right(eor));
1790 lattice_elem_t a = l->type;
1791 lattice_elem_t b = r->type;
1794 if (a.tv == tarval_top || b.tv == tarval_top) {
1795 node->type.tv = tarval_top;
1796 } else if (is_con(a) && is_con(b)) {
1797 if (is_tarval(a.tv) && is_tarval(b.tv)) {
1798 node->type.tv = tarval_eor(a.tv, b.tv);
1799 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
1801 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
1804 node->type.tv = tarval_bottom;
1806 node->by_all_const = 1;
1807 } else if (r->part == l->part) {
1808 ir_mode *mode = get_irn_mode(eor);
1809 tv = get_mode_null(mode);
1811 /* if the node was ONCE evaluated by all constants, but now
1812 this breakes AND we cat by partition a different result, switch to bottom.
1813 This happens because initially all nodes are in the same partition ... */
1814 if (node->by_all_const && node->type.tv != tv)
1818 node->type.tv = tarval_bottom;
1823 * (Re-)compute the type for Cmp.
1825 * @param node the node
1827 static void compute_Cmp(node_t *node) {
1828 ir_node *cmp = node->node;
1829 node_t *l = get_irn_node(get_Cmp_left(cmp));
1830 node_t *r = get_irn_node(get_Cmp_right(cmp));
1831 lattice_elem_t a = l->type;
1832 lattice_elem_t b = r->type;
1834 if (a.tv == tarval_top || b.tv == tarval_top) {
1837 * Top is congruent to any other value, we can
1838 * calculate the compare result.
1840 node->type.tv = tarval_b_true;
1842 node->type.tv = tarval_top;
1844 } else if (is_con(a) && is_con(b)) {
1845 /* both nodes are constants, we can probably do something */
1846 node->type.tv = tarval_b_true;
1847 } else if (r->part == l->part) {
1848 /* both nodes congruent, we can probably do something */
1849 node->type.tv = tarval_b_true;
1851 node->type.tv = tarval_bottom;
1853 } /* compute_Proj_Cmp */
1856 * (Re-)compute the type for a Proj(Cmp).
1858 * @param node the node
1859 * @param cond the predecessor Cmp node
1861 static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
1862 ir_node *proj = node->node;
1863 node_t *l = get_irn_node(get_Cmp_left(cmp));
1864 node_t *r = get_irn_node(get_Cmp_right(cmp));
1865 lattice_elem_t a = l->type;
1866 lattice_elem_t b = r->type;
1867 pn_Cmp pnc = get_Proj_proj(proj);
1870 if (a.tv == tarval_top || b.tv == tarval_top) {
1873 tv = new_tarval_from_long((pnc & pn_Cmp_Eq) ^ pn_Cmp_Eq, mode_b);
1876 node->type.tv = tarval_top;
1878 } else if (is_con(a) && is_con(b)) {
1879 default_compute(node);
1880 node->by_all_const = 1;
1881 } else if (r->part == l->part &&
1882 (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
1884 * BEWARE: a == a is NOT always True for floating Point values, as
1885 * NaN != NaN is defined, so we must check this here.
1887 tv = new_tarval_from_long(pnc & pn_Cmp_Eq, mode_b);
1892 /* if the node was ONCE evaluated by all constants, but now
1893 this breakes AND we cat by partition a different result, switch to bottom.
1894 This happens because initially all nodes are in the same partition ... */
1895 if (node->by_all_const && node->type.tv != tv)
1899 node->type.tv = tarval_bottom;
1901 } /* compute_Proj_Cmp */
1904 * (Re-)compute the type for a Proj(Cond).
1906 * @param node the node
1907 * @param cond the predecessor Cond node
1909 static void compute_Proj_Cond(node_t *node, ir_node *cond) {
1910 ir_node *proj = node->node;
1911 long pnc = get_Proj_proj(proj);
1912 ir_node *sel = get_Cond_selector(cond);
1913 node_t *selector = get_irn_node(sel);
1915 if (get_irn_mode(sel) == mode_b) {
1917 if (pnc == pn_Cond_true) {
1918 if (selector->type.tv == tarval_b_false) {
1919 node->type.tv = tarval_unreachable;
1920 } else if (selector->type.tv == tarval_b_true) {
1921 node->type.tv = tarval_reachable;
1922 } else if (selector->type.tv == tarval_bottom) {
1923 node->type.tv = tarval_reachable;
1925 assert(selector->type.tv == tarval_top);
1926 node->type.tv = tarval_unreachable;
1929 assert(pnc == pn_Cond_false);
1931 if (selector->type.tv == tarval_b_false) {
1932 node->type.tv = tarval_reachable;
1933 } else if (selector->type.tv == tarval_b_true) {
1934 node->type.tv = tarval_unreachable;
1935 } else if (selector->type.tv == tarval_bottom) {
1936 node->type.tv = tarval_reachable;
1938 assert(selector->type.tv == tarval_top);
1939 node->type.tv = tarval_unreachable;
1944 if (selector->type.tv == tarval_bottom) {
1945 node->type.tv = tarval_reachable;
1946 } else if (selector->type.tv == tarval_top) {
1947 node->type.tv = tarval_unreachable;
1949 long value = get_tarval_long(selector->type.tv);
1950 if (pnc == get_Cond_defaultProj(cond)) {
1951 /* default switch, have to check ALL other cases */
1954 for (i = get_irn_n_outs(cond) - 1; i >= 0; --i) {
1955 ir_node *succ = get_irn_out(cond, i);
1959 if (value == get_Proj_proj(succ)) {
1960 /* we found a match, will NOT take the default case */
1961 node->type.tv = tarval_unreachable;
1965 /* all cases checked, no match, will take default case */
1966 node->type.tv = tarval_reachable;
1969 node->type.tv = value == pnc ? tarval_reachable : tarval_unreachable;
1973 } /* compute_Proj_Cond */
1976 * (Re-)compute the type for a Proj-Node.
1978 * @param node the node
1980 static void compute_Proj(node_t *node) {
1981 ir_node *proj = node->node;
1982 ir_mode *mode = get_irn_mode(proj);
1983 node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
1984 ir_node *pred = get_Proj_pred(proj);
1986 if (block->type.tv == tarval_unreachable) {
1987 /* a Proj in a unreachable Block stay Top */
1988 node->type.tv = tarval_top;
1991 if (get_irn_node(pred)->type.tv == tarval_top) {
1992 /* if the predecessor is Top, its Proj follow */
1993 node->type.tv = tarval_top;
1997 if (mode == mode_M) {
1998 /* mode M is always bottom */
1999 node->type.tv = tarval_bottom;
2002 if (mode != mode_X) {
2004 compute_Proj_Cmp(node, pred);
2006 default_compute(node);
2009 /* handle mode_X nodes */
2011 switch (get_irn_opcode(pred)) {
2013 /* the Proj_X from the Start is always reachable.
2014 However this is already handled at the top. */
2015 node->type.tv = tarval_reachable;
2018 compute_Proj_Cond(node, pred);
2021 default_compute(node);
2023 } /* compute_Proj */
2026 * (Re-)compute the type for a Confirm.
2028 * @param node the node
2030 static void compute_Confirm(node_t *node) {
2031 ir_node *confirm = node->node;
2032 node_t *pred = get_irn_node(get_Confirm_value(confirm));
2034 if (get_Confirm_cmp(confirm) == pn_Cmp_Eq) {
2035 node_t *bound = get_irn_node(get_Confirm_bound(confirm));
2037 if (is_con(bound->type)) {
2038 /* is equal to a constant */
2039 node->type = bound->type;
2043 /* a Confirm is a copy OR a Const */
2044 node->type = pred->type;
2045 } /* compute_Confirm */
2048 * (Re-)compute the type for a Max.
2050 * @param node the node
2052 static void compute_Max(node_t *node) {
2053 ir_node *op = node->node;
2054 node_t *l = get_irn_node(get_binop_left(op));
2055 node_t *r = get_irn_node(get_binop_right(op));
2056 lattice_elem_t a = l->type;
2057 lattice_elem_t b = r->type;
2059 if (a.tv == tarval_top || b.tv == tarval_top) {
2060 node->type.tv = tarval_top;
2061 } else if (is_con(a) && is_con(b)) {
2062 /* both nodes are constants, we can probably do something */
2064 /* this case handles symconsts as well */
2067 ir_mode *mode = get_irn_mode(op);
2068 tarval *tv_min = get_mode_min(mode);
2072 else if (b.tv == tv_min)
2074 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2075 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2076 node->type.tv = a.tv;
2078 node->type.tv = b.tv;
2080 node->type.tv = tarval_bad;
2083 } else if (r->part == l->part) {
2084 /* both nodes congruent, we can probably do something */
2087 node->type.tv = tarval_bottom;
2092 * (Re-)compute the type for a Min.
2094 * @param node the node
2096 static void compute_Min(node_t *node) {
2097 ir_node *op = node->node;
2098 node_t *l = get_irn_node(get_binop_left(op));
2099 node_t *r = get_irn_node(get_binop_right(op));
2100 lattice_elem_t a = l->type;
2101 lattice_elem_t b = r->type;
2103 if (a.tv == tarval_top || b.tv == tarval_top) {
2104 node->type.tv = tarval_top;
2105 } else if (is_con(a) && is_con(b)) {
2106 /* both nodes are constants, we can probably do something */
2108 /* this case handles symconsts as well */
2111 ir_mode *mode = get_irn_mode(op);
2112 tarval *tv_max = get_mode_max(mode);
2116 else if (b.tv == tv_max)
2118 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2119 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2120 node->type.tv = a.tv;
2122 node->type.tv = b.tv;
2124 node->type.tv = tarval_bad;
2127 } else if (r->part == l->part) {
2128 /* both nodes congruent, we can probably do something */
2131 node->type.tv = tarval_bottom;
2136 * (Re-)compute the type for a given node.
2138 * @param node the node
2140 static void compute(node_t *node) {
2143 if (is_no_Block(node->node)) {
2144 node_t *block = get_irn_node(get_nodes_block(node->node));
2146 if (block->type.tv == tarval_unreachable) {
2147 node->type.tv = tarval_top;
2152 func = (compute_func)node->node->op->ops.generic;
2158 * Identity functions: Note that one might thing that identity() is just a
2159 * synonym for equivalent_node(). While this is true, we cannot use it for the algorithm
2160 * here, because it expects that the identity node is one of the inputs, which is NOT
2161 * always true for equivalent_node() which can handle (and does sometimes) DAGs.
2162 * So, we have our own implementation, which copies some parts of equivalent_node()
2166 * Calculates the Identity for Phi nodes
2168 static node_t *identity_Phi(node_t *node) {
2169 ir_node *phi = node->node;
2170 ir_node *block = get_nodes_block(phi);
2171 node_t *n_part = NULL;
2174 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
2175 node_t *pred_X = get_irn_node(get_Block_cfgpred(block, i));
2177 if (pred_X->type.tv == tarval_reachable) {
2178 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
2182 else if (n_part->part != pred->part) {
2183 /* incongruent inputs, not a follower */
2188 /* if n_part is NULL here, all inputs path are dead, the Phi computes
2189 * tarval_top, is in the TOP partition and should NOT being split! */
2190 assert(n_part != NULL);
2192 } /* identity_Phi */
2195 * Calculates the Identity for commutative 0 neutral nodes.
2197 static node_t *identity_comm_zero_binop(node_t *node) {
2198 ir_node *op = node->node;
2199 node_t *a = get_irn_node(get_binop_left(op));
2200 node_t *b = get_irn_node(get_binop_right(op));
2201 ir_mode *mode = get_irn_mode(op);
2204 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2205 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2208 /* node: no input should be tarval_top, else the binop would be also
2209 * Top and not being split. */
2210 zero = get_mode_null(mode);
2211 if (a->type.tv == zero)
2213 if (b->type.tv == zero)
2216 } /* identity_comm_zero_binop */
2219 * Calculates the Identity for Shift nodes.
2221 static node_t *identity_shift(node_t *node) {
2222 ir_node *op = node->node;
2223 node_t *b = get_irn_node(get_binop_right(op));
2224 ir_mode *mode = get_irn_mode(b->node);
2227 /* node: no input should be tarval_top, else the binop would be also
2228 * Top and not being split. */
2229 zero = get_mode_null(mode);
2230 if (b->type.tv == zero)
2231 return get_irn_node(get_binop_left(op));
2233 } /* identity_shift */
2236 * Calculates the Identity for Mul nodes.
2238 static node_t *identity_Mul(node_t *node) {
2239 ir_node *op = node->node;
2240 node_t *a = get_irn_node(get_Mul_left(op));
2241 node_t *b = get_irn_node(get_Mul_right(op));
2242 ir_mode *mode = get_irn_mode(op);
2245 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2246 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2249 /* node: no input should be tarval_top, else the binop would be also
2250 * Top and not being split. */
2251 one = get_mode_one(mode);
2252 if (a->type.tv == one)
2254 if (b->type.tv == one)
2257 } /* identity_Mul */
2260 * Calculates the Identity for Sub nodes.
2262 static node_t *identity_Sub(node_t *node) {
2263 ir_node *sub = node->node;
2264 node_t *b = get_irn_node(get_Sub_right(sub));
2265 ir_mode *mode = get_irn_mode(sub);
2267 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2268 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2271 /* node: no input should be tarval_top, else the binop would be also
2272 * Top and not being split. */
2273 if (b->type.tv == get_mode_null(mode))
2274 return get_irn_node(get_Sub_left(sub));
2276 } /* identity_Mul */
2279 * Calculates the Identity for And nodes.
2281 static node_t *identity_And(node_t *node) {
2282 ir_node *and = node->node;
2283 node_t *a = get_irn_node(get_And_left(and));
2284 node_t *b = get_irn_node(get_And_right(and));
2285 tarval *neutral = get_mode_all_one(get_irn_mode(and));
2287 /* node: no input should be tarval_top, else the And would be also
2288 * Top and not being split. */
2289 if (a->type.tv == neutral)
2291 if (b->type.tv == neutral)
2294 } /* identity_And */
2297 * Calculates the Identity for Confirm nodes.
2299 static node_t *identity_Confirm(node_t *node) {
2300 ir_node *confirm = node->node;
2302 /* a Confirm is always a Copy */
2303 return get_irn_node(get_Confirm_value(confirm));
2304 } /* identity_Confirm */
2307 * Calculates the Identity for Mux nodes.
2309 static node_t *identity_Mux(node_t *node) {
2310 ir_node *mux = node->node;
2311 node_t *t = get_irn_node(get_Mux_true(mux));
2312 node_t *f = get_irn_node(get_Mux_false(mux));
2315 if (t->part == f->part)
2318 /* for now, the 1-input identity is not supported */
2320 sel = get_irn_node(get_Mux_sel(mux));
2322 /* Mux sel input is mode_b, so it is always a tarval */
2323 if (sel->type.tv == tarval_b_true)
2325 if (sel->type.tv == tarval_b_false)
2329 } /* identity_Mux */
2332 * Calculates the Identity for Min nodes.
2334 static node_t *identity_Min(node_t *node) {
2335 ir_node *op = node->node;
2336 node_t *a = get_irn_node(get_binop_left(op));
2337 node_t *b = get_irn_node(get_binop_right(op));
2338 ir_mode *mode = get_irn_mode(op);
2341 if (a->part == b->part) {
2342 /* leader of multiple predecessors */
2346 /* works even with NaN */
2347 tv_max = get_mode_max(mode);
2348 if (a->type.tv == tv_max)
2350 if (b->type.tv == tv_max)
2353 } /* identity_Min */
2356 * Calculates the Identity for Max nodes.
2358 static node_t *identity_Max(node_t *node) {
2359 ir_node *op = node->node;
2360 node_t *a = get_irn_node(get_binop_left(op));
2361 node_t *b = get_irn_node(get_binop_right(op));
2362 ir_mode *mode = get_irn_mode(op);
2365 if (a->part == b->part) {
2366 /* leader of multiple predecessors */
2370 /* works even with NaN */
2371 tv_min = get_mode_min(mode);
2372 if (a->type.tv == tv_min)
2374 if (b->type.tv == tv_min)
2377 } /* identity_Max */
2380 * Calculates the Identity for nodes.
2382 static node_t *identity(node_t *node) {
2383 ir_node *irn = node->node;
2385 switch (get_irn_opcode(irn)) {
2387 return identity_Phi(node);
2389 return identity_Mul(node);
2393 return identity_comm_zero_binop(node);
2398 return identity_shift(node);
2400 return identity_And(node);
2402 return identity_Sub(node);
2404 return identity_Confirm(node);
2406 return identity_Mux(node);
2408 return identity_Min(node);
2410 return identity_Max(node);
2417 * Node follower is a (new) follower of leader, segregate Leader
2420 static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader) {
2421 ir_node *l = leader->node;
2422 int j, i, n = get_irn_n_outs(l);
2424 DB((dbg, LEVEL_2, "%+F is a follower of %+F\n", follower, leader->node));
2425 /* The leader edges must remain sorted, but follower edges can
2427 for (i = leader->n_followers + 1; i <= n; ++i) {
2428 if (l->out[i].use == follower) {
2429 ir_def_use_edge t = l->out[i];
2431 for (j = i - 1; j >= leader->n_followers + 1; --j)
2432 l->out[j + 1] = l->out[j];
2433 ++leader->n_followers;
2434 l->out[leader->n_followers] = t;
2438 } /* segregate_def_use_chain_1 */
2441 * Node follower is a (new) follower of leader, segregate Leader
2442 * out edges. If follower is a n-congruent Input identity, all follower
2443 * inputs congruent to follower are also leader.
2445 * @param follower the follower IR node
2447 static void segregate_def_use_chain(const ir_node *follower) {
2450 for (i = get_irn_arity(follower) - 1; i >= 0; --i) {
2451 node_t *pred = get_irn_node(get_irn_n(follower, i));
2453 segregate_def_use_chain_1(follower, pred);
2455 } /* segregate_def_use_chain */
2458 * Propagate constant evaluation.
2460 * @param env the environment
2462 static void propagate(environment_t *env) {
2465 lattice_elem_t old_type;
2467 unsigned n_fallen, old_type_was_T_or_C;
2470 while (env->cprop != NULL) {
2471 void *oldopcode = NULL;
2473 /* remove the first partition X from cprop */
2476 env->cprop = X->cprop_next;
2478 old_type_was_T_or_C = X->type_is_T_or_C;
2480 DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
2483 while (! list_empty(&X->cprop)) {
2484 /* remove the first Node x from X.cprop */
2485 x = list_entry(X->cprop.next, node_t, cprop_list);
2486 //assert(x->part == X);
2487 list_del(&x->cprop_list);
2490 if (x->is_follower && identity(x) == x) {
2491 /* check the opcode first */
2492 if (oldopcode == NULL) {
2493 oldopcode = lambda_opcode(get_first_node(X), env);
2495 if (oldopcode != lambda_opcode(x, env)) {
2496 if (x->on_fallen == 0) {
2497 /* different opcode -> x falls out of this partition */
2502 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2506 /* x will make the follower -> leader transition */
2507 follower_to_leader(x);
2510 /* compute a new type for x */
2512 DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
2514 if (x->type.tv != old_type.tv) {
2515 verify_type(old_type, x->type);
2516 DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
2518 if (x->on_fallen == 0) {
2519 /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
2520 not already on the list. */
2525 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2527 for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
2528 ir_node *succ = get_irn_out(x->node, i);
2529 node_t *y = get_irn_node(succ);
2531 /* Add y to y.partition.cprop. */
2532 add_to_cprop(y, env);
2537 if (n_fallen > 0 && n_fallen != X->n_leader) {
2538 DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
2539 Y = split(&X, fallen, env);
2541 * We have split out fallen node. The type of the result
2542 * partition is NOT set yet.
2544 Y->type_is_T_or_C = 0;
2548 /* remove the flags from the fallen list */
2549 for (x = fallen; x != NULL; x = x->next)
2552 if (old_type_was_T_or_C) {
2555 if (Y->on_worklist == 0)
2556 add_to_worklist(Y, env);
2558 /* check if some nodes will make the leader -> follower transition */
2559 list_for_each_entry_safe(node_t, y, tmp, &Y->Leader, node_list) {
2560 if (y->type.tv != tarval_top && ! is_con(y->type)) {
2561 node_t *eq_node = identity(y);
2563 if (eq_node != y && eq_node->part == y->part) {
2564 DB((dbg, LEVEL_2, "Node %+F is a follower of %+F\n", y->node, eq_node->node));
2565 /* move to Follower */
2567 list_del(&y->node_list);
2568 list_add_tail(&y->node_list, &Y->Follower);
2571 segregate_def_use_chain(y->node);
2581 * Get the leader for a given node from its congruence class.
2583 * @param irn the node
2585 static ir_node *get_leader(node_t *node) {
2586 partition_t *part = node->part;
2588 if (part->n_leader > 1 || node->is_follower) {
2589 if (node->is_follower) {
2590 DB((dbg, LEVEL_2, "Replacing follower %+F\n", node->node));
2593 DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
2595 return get_first_node(part)->node;
2601 * Return non-zero if the control flow predecessor node pred
2602 * is the only reachable control flow exit of its block.
2604 * @param pred the control flow exit
2606 static int can_exchange(ir_node *pred) {
2609 else if (is_Jmp(pred))
2611 else if (get_irn_mode(pred) == mode_T) {
2614 /* if the predecessor block has more than one
2615 reachable outputs we cannot remove the block */
2617 for (i = get_irn_n_outs(pred) - 1; i >= 0; --i) {
2618 ir_node *proj = get_irn_out(pred, i);
2621 /* skip non-control flow Proj's */
2622 if (get_irn_mode(proj) != mode_X)
2625 node = get_irn_node(proj);
2626 if (node->type.tv == tarval_reachable) {
2634 } /* can_exchange */
2637 * Block Post-Walker, apply the analysis results on control flow by
2638 * shortening Phi's and Block inputs.
2640 static void apply_cf(ir_node *block, void *ctx) {
2641 environment_t *env = ctx;
2642 node_t *node = get_irn_node(block);
2644 ir_node **ins, **in_X;
2645 ir_node *phi, *next;
2647 n = get_Block_n_cfgpreds(block);
2649 if (node->type.tv == tarval_unreachable) {
2652 for (i = n - 1; i >= 0; --i) {
2653 ir_node *pred = get_Block_cfgpred(block, i);
2655 if (! is_Bad(pred)) {
2656 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
2658 if (pred_bl->flagged == 0) {
2659 pred_bl->flagged = 3;
2661 if (pred_bl->type.tv == tarval_reachable) {
2663 * We will remove an edge from block to its pred.
2664 * This might leave the pred block as an endless loop
2666 if (! is_backedge(block, i))
2667 keep_alive(pred_bl->node);
2673 /* the EndBlock is always reachable even if the analysis
2674 finds out the opposite :-) */
2675 if (block != get_irg_end_block(current_ir_graph)) {
2676 /* mark dead blocks */
2677 set_Block_dead(block);
2678 DB((dbg, LEVEL_1, "Removing dead %+F\n", block));
2680 /* the endblock is unreachable */
2681 set_irn_in(block, 0, NULL);
2687 /* only one predecessor combine */
2688 ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
2690 if (can_exchange(pred)) {
2691 ir_node *new_block = get_nodes_block(pred);
2692 DB((dbg, LEVEL_1, "Fuse %+F with %+F\n", block, new_block));
2693 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
2694 exchange(block, new_block);
2695 node->node = new_block;
2701 NEW_ARR_A(ir_node *, in_X, n);
2703 for (i = 0; i < n; ++i) {
2704 ir_node *pred = get_Block_cfgpred(block, i);
2705 node_t *node = get_irn_node(pred);
2707 if (node->type.tv == tarval_reachable) {
2710 DB((dbg, LEVEL_1, "Removing dead input %d from %+F (%+F)\n", i, block, pred));
2711 if (! is_Bad(pred)) {
2712 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
2714 if (pred_bl->flagged == 0) {
2715 pred_bl->flagged = 3;
2717 if (pred_bl->type.tv == tarval_reachable) {
2719 * We will remove an edge from block to its pred.
2720 * This might leave the pred block as an endless loop
2722 if (! is_backedge(block, i))
2723 keep_alive(pred_bl->node);
2732 NEW_ARR_A(ir_node *, ins, n);
2733 for (phi = get_Block_phis(block); phi != NULL; phi = next) {
2734 node_t *node = get_irn_node(phi);
2736 next = get_Phi_next(phi);
2737 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
2738 /* this Phi is replaced by a constant */
2739 tarval *tv = node->type.tv;
2740 ir_node *c = new_r_Const(current_ir_graph, block, get_tarval_mode(tv), tv);
2742 set_irn_node(c, node);
2744 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
2745 DBG_OPT_COMBO(phi, c, FS_OPT_COMBO_CONST);
2750 for (i = 0; i < n; ++i) {
2751 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
2753 if (pred->type.tv == tarval_reachable) {
2754 ins[j++] = get_Phi_pred(phi, i);
2758 /* this Phi is replaced by a single predecessor */
2759 ir_node *s = ins[0];
2760 node_t *phi_node = get_irn_node(phi);
2763 DB((dbg, LEVEL_1, "%+F is replaced by %+F because of cf change\n", phi, s));
2764 DBG_OPT_COMBO(phi, s, FS_OPT_COMBO_FOLLOWER);
2769 set_irn_in(phi, j, ins);
2776 /* this Block has only one live predecessor */
2777 ir_node *pred = skip_Proj(in_X[0]);
2779 if (can_exchange(pred)) {
2780 ir_node *new_block = get_nodes_block(pred);
2781 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
2782 exchange(block, new_block);
2783 node->node = new_block;
2787 set_irn_in(block, k, in_X);
2793 * Post-Walker, apply the analysis results;
2795 static void apply_result(ir_node *irn, void *ctx) {
2796 environment_t *env = ctx;
2797 node_t *node = get_irn_node(irn);
2799 if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
2800 /* blocks already handled, do not touch the End node */
2802 node_t *block = get_irn_node(get_nodes_block(irn));
2804 if (block->type.tv == tarval_unreachable) {
2805 ir_node *bad = get_irg_bad(current_ir_graph);
2807 /* here, bad might already have a node, but this can be safely ignored
2808 as long as bad has at least ONE valid node */
2809 set_irn_node(bad, node);
2811 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
2815 else if (node->type.tv == tarval_unreachable) {
2816 /* don't kick away Unknown */
2817 if (! is_Unknown(irn)) {
2818 ir_node *bad = get_irg_bad(current_ir_graph);
2820 /* see comment above */
2821 set_irn_node(bad, node);
2823 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
2828 else if (get_irn_mode(irn) == mode_X) {
2831 ir_node *cond = get_Proj_pred(irn);
2833 if (is_Cond(cond)) {
2834 node_t *sel = get_irn_node(get_Cond_selector(cond));
2836 if (is_tarval(sel->type.tv) && tarval_is_constant(sel->type.tv)) {
2837 /* Cond selector is a constant and the Proj is reachable, make a Jmp */
2838 ir_node *jmp = new_r_Jmp(current_ir_graph, block->node);
2839 set_irn_node(jmp, node);
2841 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
2842 DBG_OPT_COMBO(irn, jmp, FS_OPT_COMBO_CF);
2849 /* normal data node */
2850 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
2851 tarval *tv = node->type.tv;
2854 * Beware: never replace mode_T nodes by constants. Currently we must mark
2855 * mode_T nodes with constants, but do NOT replace them.
2857 if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
2858 /* can be replaced by a constant */
2859 ir_node *c = new_r_Const(current_ir_graph, block->node, get_tarval_mode(tv), tv);
2860 set_irn_node(c, node);
2862 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
2863 DBG_OPT_COMBO(irn, c, FS_OPT_COMBO_CONST);
2867 } else if (is_entity(node->type.sym.entity_p)) {
2868 if (! is_SymConst(irn)) {
2869 /* can be replaced by a Symconst */
2870 ir_node *symc = new_r_SymConst(current_ir_graph, block->node, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
2871 set_irn_node(symc, node);
2874 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
2875 DBG_OPT_COMBO(irn, symc, FS_OPT_COMBO_CONST);
2876 exchange(irn, symc);
2879 } else if (is_Confirm(irn)) {
2880 /* Confirms are always follower, but do not kill them here */
2882 ir_node *leader = get_leader(node);
2884 if (leader != irn) {
2885 DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
2886 if (node->is_follower)
2887 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_FOLLOWER);
2889 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_CONGRUENT);
2890 exchange(irn, leader);
2896 } /* apply_result */
2899 * Fix the keep-alives by deleting unreachable ones.
2901 static void apply_end(ir_node *end, environment_t *env) {
2902 int i, j, n = get_End_n_keepalives(end);
2906 NEW_ARR_A(ir_node *, in, n);
2908 /* fix the keep alive */
2909 for (i = j = 0; i < n; i++) {
2910 ir_node *ka = get_End_keepalive(end, i);
2911 node_t *node = get_irn_node(ka);
2914 node = get_irn_node(get_nodes_block(ka));
2916 if (node->type.tv != tarval_unreachable)
2920 set_End_keepalives(end, j, in);
2925 #define SET(code) op_##code->ops.generic = (op_func)compute_##code
2928 * sets the generic functions to compute.
2930 static void set_compute_functions(void) {
2933 /* set the default compute function */
2934 for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
2935 ir_op *op = get_irp_opcode(i);
2936 op->ops.generic = (op_func)default_compute;
2939 /* set specific functions */
2959 } /* set_compute_functions */
2961 static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
2962 #ifdef DEBUG_libfirm
2963 ir_node *irn = local != NULL ? local : n;
2964 node_t *node = get_irn_node(irn);
2966 ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
2971 void combo(ir_graph *irg) {
2973 ir_node *initial_bl;
2975 ir_graph *rem = current_ir_graph;
2977 current_ir_graph = irg;
2979 /* register a debug mask */
2980 FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
2981 //firm_dbg_set_mask(dbg, SET_LEVEL_3);
2983 DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
2985 obstack_init(&env.obst);
2986 env.worklist = NULL;
2990 #ifdef DEBUG_libfirm
2991 env.dbg_list = NULL;
2993 env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
2994 env.type2id_map = pmap_create();
2995 env.end_idx = get_opt_global_cse() ? 0 : -1;
2996 env.lambda_input = 0;
2997 env.nonstd_cond = 0;
3000 assure_irg_outs(irg);
3001 assure_cf_loop(irg);
3004 /* we have our own value_of function */
3005 set_value_of_func(get_node_tarval);
3007 set_compute_functions();
3008 DEBUG_ONLY(part_nr = 0);
3010 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
3012 /* create the initial partition and place it on the work list */
3013 env.initial = new_partition(&env);
3014 add_to_worklist(env.initial, &env);
3015 irg_walk_graph(irg, init_block_phis, create_initial_partitions, &env);
3018 tarval_UNKNOWN = env.nonstd_cond ? tarval_bad : tarval_top;
3020 tarval_UNKNOWN = tarval_bad;
3023 /* all nodes on the initial partition have type Top */
3024 env.initial->type_is_T_or_C = 1;
3026 /* Place the START Node's partition on cprop.
3027 Place the START Node on its local worklist. */
3028 initial_bl = get_irg_start_block(irg);
3029 start = get_irn_node(initial_bl);
3030 add_to_cprop(start, &env);
3034 if (env.worklist != NULL)
3036 } while (env.cprop != NULL || env.worklist != NULL);
3038 dump_all_partitions(&env);
3039 check_all_partitions(&env);
3042 set_dump_node_vcgattr_hook(dump_partition_hook);
3043 dump_ir_block_graph(irg, "-partition");
3044 set_dump_node_vcgattr_hook(NULL);
3046 (void)dump_partition_hook;
3049 /* apply the result */
3050 irg_block_walk_graph(irg, NULL, apply_cf, &env);
3051 irg_walk_graph(irg, NULL, apply_result, &env);
3052 apply_end(get_irg_end(irg), &env);
3055 /* control flow might changed */
3056 set_irg_outs_inconsistent(irg);
3057 set_irg_extblk_inconsistent(irg);
3058 set_irg_doms_inconsistent(irg);
3059 set_irg_loopinfo_inconsistent(irg);
3062 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
3064 pmap_destroy(env.type2id_map);
3065 del_set(env.opcode2id_map);
3066 obstack_free(&env.obst, NULL);
3068 /* restore value_of() default behavior */
3069 set_value_of_func(NULL);
3070 current_ir_graph = rem;