2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Cliff Click's Combined Analysis/Optimization
23 * @author Michael Beck
26 * This is a slightly enhanced version of Cliff Clicks combo algorithm
27 * - support for commutative nodes is added, Add(a,b) and Add(b,a) ARE congruent
28 * - supports all Firm direct (by a data edge) identities except Mux
29 * (Mux can be a 2-input or 1-input identity, only 2-input is implemented yet)
30 * - supports Confirm nodes (handle them like Copies but do NOT remove them)
31 * - support for global congruences is implemented but not tested yet
33 * Note further that we use the terminology from Click's work here, which is different
34 * in some cases from Firm terminology. Especially, Click's type is a
35 * Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
43 #include "iroptimize.h"
51 #include "irgraph_t.h"
58 #include "iropt_dbg.h"
68 /* define this to check that all type translations are monotone */
69 #undef VERIFY_MONOTONE
71 /* define this to check the consistency of partitions */
72 #define CHECK_PARTITIONS
74 typedef struct node_t node_t;
75 typedef struct partition_t partition_t;
76 typedef struct opcode_key_t opcode_key_t;
77 typedef struct listmap_entry_t listmap_entry_t;
79 /** The type of the compute function. */
80 typedef void (*compute_func)(node_t *node);
86 ir_opcode code; /**< The Firm opcode. */
87 ir_mode *mode; /**< The mode of all nodes in the partition. */
88 int arity; /**< The arity of this opcode (needed for Phi etc. */
90 long proj; /**< For Proj nodes, its proj number */
91 ir_entity *ent; /**< For Sel Nodes, its entity */
96 * An entry in the list_map.
98 struct listmap_entry_t {
99 void *id; /**< The id. */
100 node_t *list; /**< The associated list for this id. */
101 listmap_entry_t *next; /**< Link to the next entry in the map. */
104 /** We must map id's to lists. */
105 typedef struct listmap_t {
106 set *map; /**< Map id's to listmap_entry_t's */
107 listmap_entry_t *values; /**< List of all values in the map. */
111 * A lattice element. Because we handle constants and symbolic constants different, we
112 * have to use this union.
123 ir_node *node; /**< The IR-node itself. */
124 list_head node_list; /**< Double-linked list of leader/follower entries. */
125 list_head cprop_list; /**< Double-linked partition.cprop list. */
126 partition_t *part; /**< points to the partition this node belongs to */
127 node_t *next; /**< Next node on local list (partition.touched, fallen). */
128 node_t *race_next; /**< Next node on race list. */
129 lattice_elem_t type; /**< The associated lattice element "type". */
130 int max_user_input; /**< Maximum input number of Def-Use edges. */
131 int next_edge; /**< Index of the next Def-Use edge to use. */
132 int n_followers; /**< Number of Follower in the outs set. */
133 unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
134 unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
135 unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
136 unsigned is_follower:1; /**< Set, if this node is a follower. */
137 unsigned by_all_const:1; /**< Set, if this node was once evaluated by all constants. */
138 unsigned flagged:2; /**< 2 Bits, set if this node was visited by race 1 or 2. */
142 * A partition containing congruent nodes.
145 list_head Leader; /**< The head of partition Leader node list. */
146 list_head Follower; /**< The head of partition Follower node list. */
147 list_head cprop; /**< The head of partition.cprop list. */
148 partition_t *wl_next; /**< Next entry in the work list if any. */
149 partition_t *touched_next; /**< Points to the next partition in the touched set. */
150 partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
151 partition_t *split_next; /**< Points to the next partition in the list that must be split by split_by(). */
152 node_t *touched; /**< The partition.touched set of this partition. */
153 unsigned n_leader; /**< Number of entries in this partition.Leader. */
154 unsigned n_touched; /**< Number of entries in the partition.touched. */
155 int max_user_inputs; /**< Maximum number of user inputs of all entries. */
156 unsigned on_worklist:1; /**< Set, if this partition is in the work list. */
157 unsigned on_touched:1; /**< Set, if this partition is on the touched set. */
158 unsigned on_cprop:1; /**< Set, if this partition is on the cprop list. */
159 unsigned type_is_T_or_C:1;/**< Set, if all nodes in this partition have type Top or Constant. */
161 partition_t *dbg_next; /**< Link all partitions for debugging */
162 unsigned nr; /**< A unique number for (what-)mapping, >0. */
166 typedef struct environment_t {
167 struct obstack obst; /**< obstack to allocate data structures. */
168 partition_t *worklist; /**< The work list. */
169 partition_t *cprop; /**< The constant propagation list. */
170 partition_t *touched; /**< the touched set. */
171 partition_t *initial; /**< The initial partition. */
172 set *opcode2id_map; /**< The opcodeMode->id map. */
173 pmap *type2id_map; /**< The type->id map. */
174 int end_idx; /**< -1 for local and 0 for global congruences. */
175 int lambda_input; /**< Captured argument for lambda_partition(). */
176 char nonstd_cond; /**< Set, if a Condb note has a non-Cmp predecessor. */
177 char modified; /**< Set, if the graph was modified. */
178 char commutative; /**< Set, if commutation nodes should be handled specially. */
180 partition_t *dbg_list; /**< List of all partitions. */
184 /** Type of the what function. */
185 typedef void *(*what_func)(const node_t *node, environment_t *env);
187 #define get_irn_node(follower) ((node_t *)get_irn_link(follower))
188 #define set_irn_node(follower, node) set_irn_link(follower, node)
190 /* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
191 #undef tarval_unreachable
192 #define tarval_unreachable tarval_top
195 /** The debug module handle. */
196 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
198 /** The what reason. */
199 DEBUG_ONLY(static const char *what_reason;)
201 /** Next partition number. */
202 DEBUG_ONLY(static unsigned part_nr = 0);
204 /** The tarval returned by Unknown nodes. */
205 static tarval *tarval_UNKNOWN;
208 static node_t *identity(node_t *node);
210 #ifdef CHECK_PARTITIONS
214 static void check_partition(const partition_t *T) {
218 list_for_each_entry(node_t, node, &T->Leader, node_list) {
219 assert(node->is_follower == 0);
220 assert(node->flagged == 0);
221 assert(node->part == T);
224 assert(n == T->n_leader);
226 list_for_each_entry(node_t, node, &T->Follower, node_list) {
227 assert(node->is_follower == 1);
228 assert(node->flagged == 0);
229 assert(node->part == T);
231 } /* check_partition */
234 * check that all leader nodes in the partition have the same opcode.
236 static void check_opcode(const partition_t *Z) {
241 list_for_each_entry(node_t, node, &Z->Leader, node_list) {
242 ir_node *irn = node->node;
245 key.code = get_irn_opcode(irn);
246 key.mode = get_irn_mode(irn);
247 key.arity = get_irn_arity(irn);
251 switch (get_irn_opcode(irn)) {
253 key.u.proj = get_Proj_proj(irn);
256 key.u.ent = get_Sel_entity(irn);
263 assert(key.code == get_irn_opcode(irn));
264 assert(key.mode == get_irn_mode(irn));
265 assert(key.arity == get_irn_arity(irn));
267 switch (get_irn_opcode(irn)) {
269 assert(key.u.proj == get_Proj_proj(irn));
272 assert(key.u.ent == get_Sel_entity(irn));
281 static void check_all_partitions(environment_t *env) {
286 for (P = env->dbg_list; P != NULL; P = P->dbg_next) {
288 if (! P->type_is_T_or_C)
290 list_for_each_entry(node_t, node, &P->Follower, node_list) {
291 node_t *leader = identity(node);
293 assert(leader != node && leader->part == node->part);
302 static void do_check_list(const node_t *list, int ofs, const partition_t *Z) {
305 #define NEXT(e) *((const node_t **)((char *)(e) + (ofs)))
306 for (e = list; e != NULL; e = NEXT(e)) {
307 assert(e->part == Z);
310 } /* ido_check_list */
313 * Check a local list.
315 static void check_list(const node_t *list, const partition_t *Z) {
316 do_check_list(list, offsetof(node_t, next), Z);
320 #define check_partition(T)
321 #define check_list(list, Z)
322 #define check_all_partitions(env)
323 #endif /* CHECK_PARTITIONS */
326 static INLINE lattice_elem_t get_partition_type(const partition_t *X);
329 * Dump partition to output.
331 static void dump_partition(const char *msg, const partition_t *part) {
334 lattice_elem_t type = get_partition_type(part);
336 DB((dbg, LEVEL_2, "%s part%u%s (%u, %+F) {\n ",
337 msg, part->nr, part->type_is_T_or_C ? "*" : "",
338 part->n_leader, type));
339 list_for_each_entry(node_t, node, &part->Leader, node_list) {
340 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
343 if (! list_empty(&part->Follower)) {
344 DB((dbg, LEVEL_2, "\n---\n "));
346 list_for_each_entry(node_t, node, &part->Follower, node_list) {
347 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
351 DB((dbg, LEVEL_2, "\n}\n"));
352 } /* dump_partition */
357 static void do_dump_list(const char *msg, const node_t *node, int ofs) {
361 #define GET_LINK(p, ofs) *((const node_t **)((char *)(p) + (ofs)))
363 DB((dbg, LEVEL_3, "%s = {\n ", msg));
364 for (p = node; p != NULL; p = GET_LINK(p, ofs)) {
365 DB((dbg, LEVEL_3, "%s%+F", first ? "" : ", ", p->node));
368 DB((dbg, LEVEL_3, "\n}\n"));
376 static void dump_race_list(const char *msg, const node_t *list) {
377 do_dump_list(msg, list, offsetof(node_t, race_next));
378 } /* dump_race_list */
381 * Dumps a local list.
383 static void dump_list(const char *msg, const node_t *list) {
384 do_dump_list(msg, list, offsetof(node_t, next));
388 * Dump all partitions.
390 static void dump_all_partitions(const environment_t *env) {
391 const partition_t *P;
393 DB((dbg, LEVEL_2, "All partitions\n===============\n"));
394 for (P = env->dbg_list; P != NULL; P = P->dbg_next)
395 dump_partition("", P);
396 } /* dump_all_partitions */
401 static void dump_split_list(const partition_t *list) {
402 const partition_t *p;
404 DB((dbg, LEVEL_2, "Split by %s produced = {\n", what_reason));
405 for (p = list; p != NULL; p = p->split_next)
406 DB((dbg, LEVEL_2, "part%u, ", p->nr));
407 DB((dbg, LEVEL_2, "\n}\n"));
408 } /* dump_split_list */
411 #define dump_partition(msg, part)
412 #define dump_race_list(msg, list)
413 #define dump_list(msg, list)
414 #define dump_all_partitions(env)
415 #define dump_split_list(list)
418 #if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
420 * Verify that a type transition is monotone
422 static void verify_type(const lattice_elem_t old_type, const lattice_elem_t new_type) {
423 if (old_type.tv == new_type.tv) {
427 if (old_type.tv == tarval_top) {
428 /* from Top down-to is always allowed */
431 if (old_type.tv == tarval_reachable) {
432 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
434 if (new_type.tv == tarval_bottom || new_type.tv == tarval_reachable) {
438 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
441 #define verify_type(old_type, new_type)
445 * Compare two pointer values of a listmap.
447 static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
448 const listmap_entry_t *e1 = elt;
449 const listmap_entry_t *e2 = key;
452 return e1->id != e2->id;
453 } /* listmap_cmp_ptr */
456 * Initializes a listmap.
458 * @param map the listmap
460 static void listmap_init(listmap_t *map) {
461 map->map = new_set(listmap_cmp_ptr, 16);
466 * Terminates a listmap.
468 * @param map the listmap
470 static void listmap_term(listmap_t *map) {
475 * Return the associated listmap entry for a given id.
477 * @param map the listmap
478 * @param id the id to search for
480 * @return the associated listmap entry for the given id
482 static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
483 listmap_entry_t key, *entry;
488 entry = set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
490 if (entry->list == NULL) {
491 /* a new entry, put into the list */
492 entry->next = map->values;
499 * Calculate the hash value for an opcode map entry.
501 * @param entry an opcode map entry
503 * @return a hash value for the given opcode map entry
505 static unsigned opcode_hash(const opcode_key_t *entry) {
506 return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent) + entry->arity;
510 * Compare two entries in the opcode map.
512 static int cmp_opcode(const void *elt, const void *key, size_t size) {
513 const opcode_key_t *o1 = elt;
514 const opcode_key_t *o2 = key;
517 return o1->code != o2->code || o1->mode != o2->mode ||
518 o1->arity != o2->arity ||
519 o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent;
523 * Compare two Def-Use edges for input position.
525 static int cmp_def_use_edge(const void *a, const void *b) {
526 const ir_def_use_edge *ea = a;
527 const ir_def_use_edge *eb = b;
529 /* no overrun, because range is [-1, MAXINT] */
530 return ea->pos - eb->pos;
531 } /* cmp_def_use_edge */
534 * We need the Def-Use edges sorted.
536 static void sort_irn_outs(node_t *node) {
537 ir_node *irn = node->node;
538 int n_outs = get_irn_n_outs(irn);
541 qsort(&irn->out[1], n_outs, sizeof(irn->out[0]), cmp_def_use_edge);
543 node->max_user_input = irn->out[n_outs].pos;
544 } /* sort_irn_outs */
547 * Return the type of a node.
549 * @param irn an IR-node
551 * @return the associated type of this node
553 static INLINE lattice_elem_t get_node_type(const ir_node *irn) {
554 return get_irn_node(irn)->type;
555 } /* get_node_type */
558 * Return the tarval of a node.
560 * @param irn an IR-node
562 * @return the associated type of this node
564 static INLINE tarval *get_node_tarval(const ir_node *irn) {
565 lattice_elem_t type = get_node_type(irn);
567 if (is_tarval(type.tv))
569 return tarval_bottom;
570 } /* get_node_type */
573 * Add a partition to the worklist.
575 static INLINE void add_to_worklist(partition_t *X, environment_t *env) {
576 assert(X->on_worklist == 0);
577 DB((dbg, LEVEL_2, "Adding part%d to worklist\n", X->nr));
578 X->wl_next = env->worklist;
581 } /* add_to_worklist */
584 * Create a new empty partition.
586 * @param env the environment
588 * @return a newly allocated partition
590 static INLINE partition_t *new_partition(environment_t *env) {
591 partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
593 INIT_LIST_HEAD(&part->Leader);
594 INIT_LIST_HEAD(&part->Follower);
595 INIT_LIST_HEAD(&part->cprop);
596 part->wl_next = NULL;
597 part->touched_next = NULL;
598 part->cprop_next = NULL;
599 part->split_next = NULL;
600 part->touched = NULL;
603 part->max_user_inputs = 0;
604 part->on_worklist = 0;
605 part->on_touched = 0;
607 part->type_is_T_or_C = 0;
609 part->dbg_next = env->dbg_list;
610 env->dbg_list = part;
611 part->nr = part_nr++;
615 } /* new_partition */
618 * Get the first node from a partition.
620 static INLINE node_t *get_first_node(const partition_t *X) {
621 return list_entry(X->Leader.next, node_t, node_list);
622 } /* get_first_node */
625 * Return the type of a partition (assuming partition is non-empty and
626 * all elements have the same type).
628 * @param X a partition
630 * @return the type of the first element of the partition
632 static INLINE lattice_elem_t get_partition_type(const partition_t *X) {
633 const node_t *first = get_first_node(X);
635 } /* get_partition_type */
638 * Creates a partition node for the given IR-node and place it
639 * into the given partition.
641 * @param irn an IR-node
642 * @param part a partition to place the node in
643 * @param env the environment
645 * @return the created node
647 static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env) {
648 /* create a partition node and place it in the partition */
649 node_t *node = obstack_alloc(&env->obst, sizeof(*node));
651 INIT_LIST_HEAD(&node->node_list);
652 INIT_LIST_HEAD(&node->cprop_list);
656 node->race_next = NULL;
657 node->type.tv = tarval_top;
658 node->max_user_input = 0;
660 node->n_followers = 0;
661 node->on_touched = 0;
664 node->is_follower = 0;
665 node->by_all_const = 0;
667 set_irn_node(irn, node);
669 list_add_tail(&node->node_list, &part->Leader);
673 } /* create_partition_node */
676 * Pre-Walker, init all Block-Phi lists.
678 static void init_block_phis(ir_node *irn, void *env) {
682 set_Block_phis(irn, NULL);
684 } /* init_block_phis */
687 * Post-Walker, initialize all Nodes' type to U or top and place
688 * all nodes into the TOP partition.
690 static void create_initial_partitions(ir_node *irn, void *ctx) {
691 environment_t *env = ctx;
692 partition_t *part = env->initial;
695 node = create_partition_node(irn, part, env);
697 if (node->max_user_input > part->max_user_inputs)
698 part->max_user_inputs = node->max_user_input;
701 add_Block_phi(get_nodes_block(irn), irn);
702 } else if (is_Cond(irn)) {
703 /* check if all Cond's have a Cmp predecessor. */
704 if (get_irn_mode(irn) == mode_b && !is_Cmp(skip_Proj(get_Cond_selector(irn))))
705 env->nonstd_cond = 1;
707 } /* create_initial_partitions */
710 * Add a node to the entry.partition.touched set and
711 * node->partition to the touched set if not already there.
714 * @param env the environment
716 static INLINE void add_to_touched(node_t *y, environment_t *env) {
717 if (y->on_touched == 0) {
718 partition_t *part = y->part;
720 y->next = part->touched;
725 if (part->on_touched == 0) {
726 part->touched_next = env->touched;
728 part->on_touched = 1;
731 check_list(part->touched, part);
733 } /* add_to_touched */
736 * Place a node on the cprop list.
739 * @param env the environment
741 static void add_to_cprop(node_t *y, environment_t *env) {
742 /* Add y to y.partition.cprop. */
743 if (y->on_cprop == 0) {
744 partition_t *Y = y->part;
746 list_add_tail(&y->cprop_list, &Y->cprop);
749 DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
751 /* place its partition on the cprop list */
752 if (Y->on_cprop == 0) {
753 Y->cprop_next = env->cprop;
758 if (get_irn_mode(y->node) == mode_T) {
759 /* mode_T nodes always produce tarval_bottom, so we must explicitly
760 add it's Proj's to get constant evaluation to work */
763 for (i = get_irn_n_outs(y->node) - 1; i >= 0; --i) {
764 node_t *proj = get_irn_node(get_irn_out(y->node, i));
766 add_to_cprop(proj, env);
768 } else if (is_Block(y->node)) {
769 /* Due to the way we handle Phi's, we must place all Phis of a block on the list
770 * if someone placed the block. The Block is only placed if the reachability
771 * changes, and this must be re-evaluated in compute_Phi(). */
773 for (phi = get_Block_phis(y->node); phi != NULL; phi = get_Phi_next(phi)) {
774 node_t *p = get_irn_node(phi);
775 add_to_cprop(p, env);
781 * Update the worklist: If Z is on worklist then add Z' to worklist.
782 * Else add the smaller of Z and Z' to worklist.
784 * @param Z the Z partition
785 * @param Z_prime the Z' partition, a previous part of Z
786 * @param env the environment
788 static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env) {
789 if (Z->on_worklist || Z_prime->n_leader < Z->n_leader) {
790 add_to_worklist(Z_prime, env);
792 add_to_worklist(Z, env);
794 } /* update_worklist */
797 * Make all inputs to x no longer be F.def_use edges.
801 static void move_edges_to_leader(node_t *x) {
802 ir_node *irn = x->node;
805 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
806 node_t *pred = get_irn_node(get_irn_n(irn, i));
811 n = get_irn_n_outs(p);
812 for (j = 1; j <= pred->n_followers; ++j) {
813 if (p->out[j].pos == i && p->out[j].use == irn) {
814 /* found a follower edge to x, move it to the Leader */
815 ir_def_use_edge edge = p->out[j];
817 /* remove this edge from the Follower set */
818 p->out[j] = p->out[pred->n_followers];
821 /* sort it into the leader set */
822 for (k = pred->n_followers + 2; k <= n; ++k) {
823 if (p->out[k].pos >= edge.pos)
825 p->out[k - 1] = p->out[k];
827 /* place the new edge here */
828 p->out[k - 1] = edge;
830 /* edge found and moved */
835 } /* move_edges_to_leader */
838 * Split a partition that has NO followers by a local list.
840 * @param Z partition to split
841 * @param g a (non-empty) node list
842 * @param env the environment
844 * @return a new partition containing the nodes of g
846 static partition_t *split_no_followers(partition_t *Z, node_t *g, environment_t *env) {
847 partition_t *Z_prime;
852 dump_partition("Splitting ", Z);
853 dump_list("by list ", g);
857 /* Remove g from Z. */
858 for (node = g; node != NULL; node = node->next) {
859 assert(node->part == Z);
860 list_del(&node->node_list);
863 assert(n < Z->n_leader);
866 /* Move g to a new partition, Z'. */
867 Z_prime = new_partition(env);
869 for (node = g; node != NULL; node = node->next) {
870 list_add_tail(&node->node_list, &Z_prime->Leader);
871 node->part = Z_prime;
872 if (node->max_user_input > max_input)
873 max_input = node->max_user_input;
875 Z_prime->max_user_inputs = max_input;
876 Z_prime->n_leader = n;
879 check_partition(Z_prime);
881 /* for now, copy the type info tag, it will be adjusted in split_by(). */
882 Z_prime->type_is_T_or_C = Z->type_is_T_or_C;
884 update_worklist(Z, Z_prime, env);
886 dump_partition("Now ", Z);
887 dump_partition("Created new ", Z_prime);
889 } /* split_no_followers */
892 * Make the Follower -> Leader transition for a node.
896 static void follower_to_leader(node_t *n) {
897 assert(n->is_follower == 1);
899 DB((dbg, LEVEL_2, "%+F make the follower -> leader transition\n", n->node));
901 move_edges_to_leader(n);
902 list_del(&n->node_list);
903 list_add_tail(&n->node_list, &n->part->Leader);
905 } /* follower_to_leader */
908 * The environment for one race step.
910 typedef struct step_env {
911 node_t *initial; /**< The initial node list. */
912 node_t *unwalked; /**< The unwalked node list. */
913 node_t *walked; /**< The walked node list. */
914 int index; /**< Next index of Follower use_def edge. */
915 unsigned side; /**< side number. */
919 * Return non-zero, if a input is a real follower
921 * @param irn the node to check
922 * @param input number of the input
924 static int is_real_follower(const ir_node *irn, int input) {
927 switch (get_irn_opcode(irn)) {
930 /* ignore the Confirm bound input */
936 /* ignore the Mux sel input */
941 /* dead inputs are not follower edges */
942 ir_node *block = get_nodes_block(irn);
943 node_t *pred = get_irn_node(get_Block_cfgpred(block, input));
945 if (pred->type.tv == tarval_unreachable)
955 /* only a Sub x,0 / Shift x,0 might be a follower */
962 pred = get_irn_node(get_irn_n(irn, input));
963 if (is_tarval(pred->type.tv) && tarval_is_null(pred->type.tv))
967 pred = get_irn_node(get_irn_n(irn, input));
968 if (is_tarval(pred->type.tv) && tarval_is_one(pred->type.tv))
972 pred = get_irn_node(get_irn_n(irn, input));
973 if (is_tarval(pred->type.tv) && tarval_is_all_one(pred->type.tv))
978 /* all inputs are followers */
981 assert(!"opcode not implemented yet");
985 } /* is_real_follower */
988 * Do one step in the race.
990 static int step(step_env *env) {
993 if (env->initial != NULL) {
994 /* Move node from initial to unwalked */
996 env->initial = n->race_next;
998 n->race_next = env->unwalked;
1004 while (env->unwalked != NULL) {
1005 /* let n be the first node in unwalked */
1007 while (env->index < n->n_followers) {
1008 const ir_def_use_edge *edge = &n->node->out[1 + env->index];
1010 /* let m be n.F.def_use[index] */
1011 node_t *m = get_irn_node(edge->use);
1013 assert(m->is_follower);
1015 * Some inputs, like the get_Confirm_bound are NOT
1016 * real followers, sort them out.
1018 if (! is_real_follower(m->node, edge->pos)) {
1024 /* only followers from our partition */
1025 if (m->part != n->part)
1028 if ((m->flagged & env->side) == 0) {
1029 m->flagged |= env->side;
1031 if (m->flagged != 3) {
1032 /* visited the first time */
1033 /* add m to unwalked not as first node (we might still need to
1034 check for more follower node */
1035 m->race_next = n->race_next;
1039 /* else already visited by the other side and on the other list */
1042 /* move n to walked */
1043 env->unwalked = n->race_next;
1044 n->race_next = env->walked;
1052 * Clear the flags from a list and check for
1053 * nodes that where touched from both sides.
1055 * @param list the list
1057 static int clear_flags(node_t *list) {
1061 for (n = list; n != NULL; n = n->race_next) {
1062 if (n->flagged == 3) {
1063 /* we reach a follower from both sides, this will split congruent
1064 * inputs and make it a leader. */
1065 follower_to_leader(n);
1074 * Split a partition by a local list using the race.
1076 * @param pX pointer to the partition to split, might be changed!
1077 * @param gg a (non-empty) node list
1078 * @param env the environment
1080 * @return a new partition containing the nodes of gg
1082 static partition_t *split(partition_t **pX, node_t *gg, environment_t *env) {
1083 partition_t *X = *pX;
1084 partition_t *X_prime;
1086 step_env env1, env2, *winner;
1087 node_t *g, *h, *node, *t;
1088 int max_input, transitions;
1090 DEBUG_ONLY(static int run = 0;)
1092 DB((dbg, LEVEL_2, "Run %d ", run++));
1093 if (list_empty(&X->Follower)) {
1094 /* if the partition has NO follower, we can use the fast
1095 splitting algorithm. */
1096 return split_no_followers(X, gg, env);
1098 /* else do the race */
1100 dump_partition("Splitting ", X);
1101 dump_list("by list ", gg);
1103 INIT_LIST_HEAD(&tmp);
1105 /* Remove gg from X.Leader and put into g */
1107 for (node = gg; node != NULL; node = node->next) {
1108 assert(node->part == X);
1109 assert(node->is_follower == 0);
1111 list_del(&node->node_list);
1112 list_add_tail(&node->node_list, &tmp);
1113 node->race_next = g;
1118 list_for_each_entry(node_t, node, &X->Leader, node_list) {
1119 node->race_next = h;
1122 /* restore X.Leader */
1123 list_splice(&tmp, &X->Leader);
1126 env1.unwalked = NULL;
1132 env2.unwalked = NULL;
1147 assert(winner->initial == NULL);
1148 assert(winner->unwalked == NULL);
1150 /* clear flags from walked/unwalked */
1151 transitions = clear_flags(env1.unwalked);
1152 transitions |= clear_flags(env1.walked);
1153 transitions |= clear_flags(env2.unwalked);
1154 transitions |= clear_flags(env2.walked);
1156 dump_race_list("winner ", winner->walked);
1158 /* Move walked_{winner} to a new partition, X'. */
1159 X_prime = new_partition(env);
1162 for (node = winner->walked; node != NULL; node = node->race_next) {
1163 list_del(&node->node_list);
1164 node->part = X_prime;
1165 if (node->is_follower) {
1166 list_add_tail(&node->node_list, &X_prime->Follower);
1168 list_add_tail(&node->node_list, &X_prime->Leader);
1171 if (node->max_user_input > max_input)
1172 max_input = node->max_user_input;
1174 X_prime->n_leader = n;
1175 X_prime->max_user_inputs = max_input;
1176 X->n_leader -= X_prime->n_leader;
1178 /* for now, copy the type info tag, it will be adjusted in split_by(). */
1179 X_prime->type_is_T_or_C = X->type_is_T_or_C;
1182 * Even if a follower was not checked by both sides, it might have
1183 * loose its congruence, so we need to check this case for all follower.
1185 list_for_each_entry_safe(node_t, node, t, &X_prime->Follower, node_list) {
1186 if (identity(node) == node) {
1187 follower_to_leader(node);
1193 check_partition(X_prime);
1195 /* X' is the smaller part */
1196 add_to_worklist(X_prime, env);
1199 * If there where follower to leader transitions, ensure that the nodes
1200 * can be split out if necessary.
1203 /* place partitions on the cprop list */
1204 if (X_prime->on_cprop == 0) {
1205 X_prime->cprop_next = env->cprop;
1206 env->cprop = X_prime;
1207 X_prime->on_cprop = 1;
1211 dump_partition("Now ", X);
1212 dump_partition("Created new ", X_prime);
1214 /* we have to ensure that the partition containing g is returned */
1215 if (winner == &env2) {
1224 * Returns non-zero if the i'th input of a Phi node is live.
1226 * @param phi a Phi-node
1227 * @param i an input number
1229 * @return non-zero if the i'th input of the given Phi node is live
1231 static int is_live_input(ir_node *phi, int i) {
1233 ir_node *block = get_nodes_block(phi);
1234 ir_node *pred = get_Block_cfgpred(block, i);
1235 lattice_elem_t type = get_node_type(pred);
1237 return type.tv != tarval_unreachable;
1239 /* else it's the control input, always live */
1241 } /* is_live_input */
1244 * Return non-zero if a type is a constant.
1246 static int is_constant_type(lattice_elem_t type) {
1247 if (type.tv != tarval_bottom && type.tv != tarval_top)
1250 } /* is_constant_type */
1253 * Check whether a type is neither Top or a constant.
1254 * Note: U is handled like Top here, R is a constant.
1256 * @param type the type to check
1258 static int type_is_neither_top_nor_const(const lattice_elem_t type) {
1259 if (is_tarval(type.tv)) {
1260 if (type.tv == tarval_top)
1262 if (tarval_is_constant(type.tv))
1269 } /* type_is_neither_top_nor_const */
1272 * Collect nodes to the touched list.
1274 * @param list the list which contains the nodes that must be evaluated
1275 * @param idx the index of the def_use edge to evaluate
1276 * @param env the environment
1278 static void collect_touched(list_head *list, int idx, environment_t *env) {
1280 int end_idx = env->end_idx;
1282 list_for_each_entry(node_t, x, list, node_list) {
1286 /* leader edges start AFTER follower edges */
1287 x->next_edge = x->n_followers + 1;
1289 num_edges = get_irn_n_outs(x->node);
1291 /* for all edges in x.L.def_use_{idx} */
1292 while (x->next_edge <= num_edges) {
1293 const ir_def_use_edge *edge = &x->node->out[x->next_edge];
1296 /* check if we have necessary edges */
1297 if (edge->pos > idx)
1304 /* only non-commutative nodes */
1305 if (env->commutative &&
1306 (idx == 0 || idx == 1) && is_op_commutative(get_irn_op(succ)))
1309 /* ignore the "control input" for non-pinned nodes
1310 if we are running in GCSE mode */
1311 if (idx < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
1314 y = get_irn_node(succ);
1315 assert(get_irn_n(succ, idx) == x->node);
1317 /* ignore block edges touching followers */
1318 if (idx == -1 && y->is_follower)
1321 if (is_constant_type(y->type)) {
1322 ir_opcode code = get_irn_opcode(succ);
1323 if (code == iro_Sub || code == iro_Cmp)
1324 add_to_cprop(y, env);
1327 /* Partitions of constants should not be split simply because their Nodes have unequal
1328 functions or incongruent inputs. */
1329 if (type_is_neither_top_nor_const(y->type) &&
1330 (! is_Phi(y->node) || is_live_input(y->node, idx))) {
1331 add_to_touched(y, env);
1335 } /* collect_touched */
1338 * Collect commutative nodes to the touched list.
1340 * @param list the list which contains the nodes that must be evaluated
1341 * @param env the environment
1343 static void collect_commutative_touched(list_head *list, environment_t *env) {
1346 list_for_each_entry(node_t, x, list, node_list) {
1349 num_edges = get_irn_n_outs(x->node);
1351 x->next_edge = x->n_followers + 1;
1353 /* for all edges in x.L.def_use_{idx} */
1354 while (x->next_edge <= num_edges) {
1355 const ir_def_use_edge *edge = &x->node->out[x->next_edge];
1358 /* check if we have necessary edges */
1368 /* only commutative nodes */
1369 if (!is_op_commutative(get_irn_op(succ)))
1372 y = get_irn_node(succ);
1373 if (is_constant_type(y->type)) {
1374 ir_opcode code = get_irn_opcode(succ);
1375 if (code == iro_Eor)
1376 add_to_cprop(y, env);
1379 /* Partitions of constants should not be split simply because their Nodes have unequal
1380 functions or incongruent inputs. */
1381 if (type_is_neither_top_nor_const(y->type)) {
1382 add_to_touched(y, env);
1386 } /* collect_commutative_touched */
1389 * Split the partitions if caused by the first entry on the worklist.
1391 * @param env the environment
1393 static void cause_splits(environment_t *env) {
1394 partition_t *X, *Z, *N;
1397 /* remove the first partition from the worklist */
1399 env->worklist = X->wl_next;
1402 dump_partition("Cause_split: ", X);
1404 if (env->commutative) {
1405 /* handle commutative nodes first */
1407 /* empty the touched set: already done, just clear the list */
1408 env->touched = NULL;
1410 collect_commutative_touched(&X->Leader, env);
1411 collect_commutative_touched(&X->Follower, env);
1413 for (Z = env->touched; Z != NULL; Z = N) {
1415 node_t *touched = Z->touched;
1416 unsigned n_touched = Z->n_touched;
1418 assert(Z->touched != NULL);
1420 /* beware, split might change Z */
1421 N = Z->touched_next;
1423 /* remove it from the touched set */
1426 /* Empty local Z.touched. */
1427 for (e = touched; e != NULL; e = e->next) {
1428 assert(e->is_follower == 0);
1434 if (0 < n_touched && n_touched < Z->n_leader) {
1435 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1436 split(&Z, touched, env);
1438 assert(n_touched <= Z->n_leader);
1442 /* combine temporary leader and follower list */
1443 for (idx = -1; idx <= X->max_user_inputs; ++idx) {
1444 /* empty the touched set: already done, just clear the list */
1445 env->touched = NULL;
1447 collect_touched(&X->Leader, idx, env);
1448 collect_touched(&X->Follower, idx, env);
1450 for (Z = env->touched; Z != NULL; Z = N) {
1452 node_t *touched = Z->touched;
1453 unsigned n_touched = Z->n_touched;
1455 assert(Z->touched != NULL);
1457 /* beware, split might change Z */
1458 N = Z->touched_next;
1460 /* remove it from the touched set */
1463 /* Empty local Z.touched. */
1464 for (e = touched; e != NULL; e = e->next) {
1465 assert(e->is_follower == 0);
1471 if (0 < n_touched && n_touched < Z->n_leader) {
1472 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1473 split(&Z, touched, env);
1475 assert(n_touched <= Z->n_leader);
1478 } /* cause_splits */
1481 * Implements split_by_what(): Split a partition by characteristics given
1482 * by the what function.
1484 * @param X the partition to split
1485 * @param What a function returning an Id for every node of the partition X
1486 * @param P a list to store the result partitions
1487 * @param env the environment
1491 static partition_t *split_by_what(partition_t *X, what_func What,
1492 partition_t **P, environment_t *env) {
1495 listmap_entry_t *iter;
1498 /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
1500 list_for_each_entry(node_t, x, &X->Leader, node_list) {
1501 void *id = What(x, env);
1502 listmap_entry_t *entry;
1505 /* input not allowed, ignore */
1508 /* Add x to map[What(x)]. */
1509 entry = listmap_find(&map, id);
1510 x->next = entry->list;
1513 /* Let P be a set of Partitions. */
1515 /* for all sets S except one in the range of map do */
1516 for (iter = map.values; iter != NULL; iter = iter->next) {
1517 if (iter->next == NULL) {
1518 /* this is the last entry, ignore */
1523 /* Add SPLIT( X, S ) to P. */
1524 DB((dbg, LEVEL_2, "Split part%d by WHAT = %s\n", X->nr, what_reason));
1525 R = split(&X, S, env);
1535 } /* split_by_what */
1537 /** lambda n.(n.type) */
1538 static void *lambda_type(const node_t *node, environment_t *env) {
1540 return node->type.tv;
1543 /** lambda n.(n.opcode) */
1544 static void *lambda_opcode(const node_t *node, environment_t *env) {
1545 opcode_key_t key, *entry;
1546 ir_node *irn = node->node;
1548 key.code = get_irn_opcode(irn);
1549 key.mode = get_irn_mode(irn);
1550 key.arity = get_irn_arity(irn);
1554 switch (get_irn_opcode(irn)) {
1556 key.u.proj = get_Proj_proj(irn);
1559 key.u.ent = get_Sel_entity(irn);
1565 entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
1567 } /* lambda_opcode */
1569 /** lambda n.(n[i].partition) */
1570 static void *lambda_partition(const node_t *node, environment_t *env) {
1571 ir_node *skipped = skip_Proj(node->node);
1574 int i = env->lambda_input;
1576 if (i >= get_irn_arity(node->node)) {
1578 * We are outside the allowed range: This can happen even
1579 * if we have split by opcode first: doing so might move Followers
1580 * to Leaders and those will have a different opcode!
1581 * Note that in this case the partition is on the cprop list and will be
1587 /* ignore the "control input" for non-pinned nodes
1588 if we are running in GCSE mode */
1589 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1592 pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
1593 p = get_irn_node(pred);
1596 } /* lambda_partition */
1598 /** lambda n.(n[i].partition) for commutative nodes */
1599 static void *lambda_commutative_partition(const node_t *node, environment_t *env) {
1600 ir_node *irn = node->node;
1601 ir_node *skipped = skip_Proj(irn);
1602 ir_node *pred, *left, *right;
1604 partition_t *pl, *pr;
1605 int i = env->lambda_input;
1607 if (i >= get_irn_arity(node->node)) {
1609 * We are outside the allowed range: This can happen even
1610 * if we have split by opcode first: doing so might move Followers
1611 * to Leaders and those will have a different opcode!
1612 * Note that in this case the partition is on the cprop list and will be
1618 /* ignore the "control input" for non-pinned nodes
1619 if we are running in GCSE mode */
1620 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1624 pred = get_irn_n(skipped, i);
1625 p = get_irn_node(pred);
1629 if (is_op_commutative(get_irn_op(irn))) {
1630 /* normalize partition order by returning the "smaller" on input 0,
1631 the "bigger" on input 1. */
1632 left = get_binop_left(irn);
1633 pl = get_irn_node(left)->part;
1634 right = get_binop_right(irn);
1635 pr = get_irn_node(right)->part;
1638 return pl < pr ? pl : pr;
1640 return pl > pr ? pl : pr;
1642 /* a not split out Follower */
1643 pred = get_irn_n(irn, i);
1644 p = get_irn_node(pred);
1648 } /* lambda_commutative_partition */
1651 * Returns true if a type is a constant.
1653 static int is_con(const lattice_elem_t type) {
1654 /* be conservative */
1655 if (is_tarval(type.tv))
1656 return tarval_is_constant(type.tv);
1657 return is_entity(type.sym.entity_p);
1661 * Implements split_by().
1663 * @param X the partition to split
1664 * @param env the environment
1666 static void split_by(partition_t *X, environment_t *env) {
1667 partition_t *I, *P = NULL;
1670 dump_partition("split_by", X);
1672 if (X->n_leader == 1) {
1673 /* we have only one leader, no need to split, just check it's type */
1674 node_t *x = get_first_node(X);
1675 X->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1679 DEBUG_ONLY(what_reason = "lambda n.(n.type)";)
1680 P = split_by_what(X, lambda_type, &P, env);
1683 /* adjust the type tags, we have split partitions by type */
1684 for (I = P; I != NULL; I = I->split_next) {
1685 node_t *x = get_first_node(I);
1686 I->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1693 if (Y->n_leader > 1) {
1694 /* we do not want split the TOP or constant partitions */
1695 if (! Y->type_is_T_or_C) {
1696 partition_t *Q = NULL;
1698 DEBUG_ONLY(what_reason = "lambda n.(n.opcode)";)
1699 Q = split_by_what(Y, lambda_opcode, &Q, env);
1706 if (Z->n_leader > 1) {
1707 const node_t *first = get_first_node(Z);
1708 int arity = get_irn_arity(first->node);
1710 what_func what = lambda_partition;
1711 DEBUG_ONLY(char buf[64];)
1713 if (env->commutative && is_op_commutative(get_irn_op(first->node)))
1714 what = lambda_commutative_partition;
1717 * BEWARE: during splitting by input 2 for instance we might
1718 * create new partitions which are different by input 1, so collect
1719 * them and split further.
1721 Z->split_next = NULL;
1724 for (input = arity - 1; input >= -1; --input) {
1726 partition_t *Z_prime = R;
1729 if (Z_prime->n_leader > 1) {
1730 env->lambda_input = input;
1731 DEBUG_ONLY(snprintf(buf, sizeof(buf), "lambda n.(n[%d].partition)", input);)
1732 DEBUG_ONLY(what_reason = buf;)
1733 S = split_by_what(Z_prime, what, &S, env);
1736 Z_prime->split_next = S;
1739 } while (R != NULL);
1744 } while (Q != NULL);
1747 } while (P != NULL);
1751 * (Re-)compute the type for a given node.
1753 * @param node the node
1755 static void default_compute(node_t *node) {
1757 ir_node *irn = node->node;
1758 node_t *block = get_irn_node(get_nodes_block(irn));
1760 if (block->type.tv == tarval_unreachable) {
1761 node->type.tv = tarval_top;
1765 /* if any of the data inputs have type top, the result is type top */
1766 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
1767 ir_node *pred = get_irn_n(irn, i);
1768 node_t *p = get_irn_node(pred);
1770 if (p->type.tv == tarval_top) {
1771 node->type.tv = tarval_top;
1776 if (get_irn_mode(node->node) == mode_X)
1777 node->type.tv = tarval_reachable;
1779 node->type.tv = computed_value(irn);
1780 } /* default_compute */
1783 * (Re-)compute the type for a Block node.
1785 * @param node the node
1787 static void compute_Block(node_t *node) {
1789 ir_node *block = node->node;
1791 if (block == get_irg_start_block(current_ir_graph)) {
1792 /* start block is always reachable */
1793 node->type.tv = tarval_reachable;
1797 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1798 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1800 if (pred->type.tv == tarval_reachable) {
1801 /* A block is reachable, if at least of predecessor is reachable. */
1802 node->type.tv = tarval_reachable;
1806 node->type.tv = tarval_top;
1807 } /* compute_Block */
1810 * (Re-)compute the type for a Bad node.
1812 * @param node the node
1814 static void compute_Bad(node_t *node) {
1815 /* Bad nodes ALWAYS compute Top */
1816 node->type.tv = tarval_top;
1820 * (Re-)compute the type for an Unknown node.
1822 * @param node the node
1824 static void compute_Unknown(node_t *node) {
1825 /* While Unknown nodes should compute Top this is dangerous:
1826 * a Top input to a Cond would lead to BOTH control flows unreachable.
1827 * While this is correct in the given semantics, it would destroy the Firm
1830 * It would be safe to compute Top IF it can be assured, that only Cmp
1831 * nodes are inputs to Conds. We check that first.
1832 * This is the way Frontends typically build Firm, but some optimizations
1833 * (cond_eval for instance) might replace them by Phib's...
1835 node->type.tv = tarval_UNKNOWN;
1836 } /* compute_Unknown */
1839 * (Re-)compute the type for a Jmp node.
1841 * @param node the node
1843 static void compute_Jmp(node_t *node) {
1844 node_t *block = get_irn_node(get_nodes_block(node->node));
1846 node->type = block->type;
1850 * (Re-)compute the type for the End node.
1852 * @param node the node
1854 static void compute_End(node_t *node) {
1855 /* the End node is NOT dead of course */
1856 node->type.tv = tarval_reachable;
1860 * (Re-)compute the type for a SymConst node.
1862 * @param node the node
1864 static void compute_SymConst(node_t *node) {
1865 ir_node *irn = node->node;
1866 node_t *block = get_irn_node(get_nodes_block(irn));
1868 if (block->type.tv == tarval_unreachable) {
1869 node->type.tv = tarval_top;
1872 switch (get_SymConst_kind(irn)) {
1873 case symconst_addr_ent:
1874 /* case symconst_addr_name: cannot handle this yet */
1875 node->type.sym = get_SymConst_symbol(irn);
1878 node->type.tv = computed_value(irn);
1880 } /* compute_SymConst */
1883 * (Re-)compute the type for a Phi node.
1885 * @param node the node
1887 static void compute_Phi(node_t *node) {
1889 ir_node *phi = node->node;
1890 lattice_elem_t type;
1892 /* if a Phi is in a unreachable block, its type is TOP */
1893 node_t *block = get_irn_node(get_nodes_block(phi));
1895 if (block->type.tv == tarval_unreachable) {
1896 node->type.tv = tarval_top;
1900 /* Phi implements the Meet operation */
1901 type.tv = tarval_top;
1902 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
1903 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
1904 node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
1906 if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
1907 /* ignore TOP inputs: We must check here for unreachable blocks,
1908 because Firm constants live in the Start Block are NEVER Top.
1909 Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
1910 comes from a unreachable input. */
1913 if (pred->type.tv == tarval_bottom) {
1914 node->type.tv = tarval_bottom;
1916 } else if (type.tv == tarval_top) {
1917 /* first constant found */
1919 } else if (type.tv != pred->type.tv) {
1920 /* different constants or tarval_bottom */
1921 node->type.tv = tarval_bottom;
1924 /* else nothing, constants are the same */
1930 * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
1932 * @param node the node
1934 static void compute_Add(node_t *node) {
1935 ir_node *sub = node->node;
1936 node_t *l = get_irn_node(get_Add_left(sub));
1937 node_t *r = get_irn_node(get_Add_right(sub));
1938 lattice_elem_t a = l->type;
1939 lattice_elem_t b = r->type;
1942 if (a.tv == tarval_top || b.tv == tarval_top) {
1943 node->type.tv = tarval_top;
1944 } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
1945 node->type.tv = tarval_bottom;
1947 /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
1948 must call tarval_add() first to handle this case! */
1949 if (is_tarval(a.tv)) {
1950 if (is_tarval(b.tv)) {
1951 node->type.tv = tarval_add(a.tv, b.tv);
1954 mode = get_tarval_mode(a.tv);
1955 if (a.tv == get_mode_null(mode)) {
1959 } else if (is_tarval(b.tv)) {
1960 mode = get_tarval_mode(b.tv);
1961 if (b.tv == get_mode_null(mode)) {
1966 node->type.tv = tarval_bottom;
1971 * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
1973 * @param node the node
1975 static void compute_Sub(node_t *node) {
1976 ir_node *sub = node->node;
1977 node_t *l = get_irn_node(get_Sub_left(sub));
1978 node_t *r = get_irn_node(get_Sub_right(sub));
1979 lattice_elem_t a = l->type;
1980 lattice_elem_t b = r->type;
1983 if (a.tv == tarval_top || b.tv == tarval_top) {
1984 node->type.tv = tarval_top;
1985 } else if (is_con(a) && is_con(b)) {
1986 if (is_tarval(a.tv) && is_tarval(b.tv)) {
1987 node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
1988 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
1990 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
1993 node->type.tv = tarval_bottom;
1995 node->by_all_const = 1;
1996 } else if (r->part == l->part &&
1997 (!mode_is_float(get_irn_mode(l->node)))) {
1999 * BEWARE: a - a is NOT always 0 for floating Point values, as
2000 * NaN op NaN = NaN, so we must check this here.
2002 ir_mode *mode = get_irn_mode(sub);
2003 tv = get_mode_null(mode);
2005 /* if the node was ONCE evaluated by all constants, but now
2006 this breaks AND we get from the argument partitions a different
2007 result, switch to bottom.
2008 This happens because initially all nodes are in the same partition ... */
2009 if (node->by_all_const && node->type.tv != tv)
2013 node->type.tv = tarval_bottom;
2018 * (Re-)compute the type for an Eor. Special case: both nodes are congruent.
2020 * @param node the node
2022 static void compute_Eor(node_t *node) {
2023 ir_node *eor = node->node;
2024 node_t *l = get_irn_node(get_Eor_left(eor));
2025 node_t *r = get_irn_node(get_Eor_right(eor));
2026 lattice_elem_t a = l->type;
2027 lattice_elem_t b = r->type;
2030 if (a.tv == tarval_top || b.tv == tarval_top) {
2031 node->type.tv = tarval_top;
2032 } else if (is_con(a) && is_con(b)) {
2033 if (is_tarval(a.tv) && is_tarval(b.tv)) {
2034 node->type.tv = tarval_eor(a.tv, b.tv);
2035 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
2037 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
2040 node->type.tv = tarval_bottom;
2042 node->by_all_const = 1;
2043 } else if (r->part == l->part) {
2044 ir_mode *mode = get_irn_mode(eor);
2045 tv = get_mode_null(mode);
2047 /* if the node was ONCE evaluated by all constants, but now
2048 this breaks AND we get from the argument partitions a different
2049 result, switch to bottom.
2050 This happens because initially all nodes are in the same partition ... */
2051 if (node->by_all_const && node->type.tv != tv)
2055 node->type.tv = tarval_bottom;
2060 * (Re-)compute the type for Cmp.
2062 * @param node the node
2064 static void compute_Cmp(node_t *node) {
2065 ir_node *cmp = node->node;
2066 node_t *l = get_irn_node(get_Cmp_left(cmp));
2067 node_t *r = get_irn_node(get_Cmp_right(cmp));
2068 lattice_elem_t a = l->type;
2069 lattice_elem_t b = r->type;
2071 if (a.tv == tarval_top || b.tv == tarval_top) {
2074 * Top is congruent to any other value, we can
2075 * calculate the compare result.
2077 node->type.tv = tarval_b_true;
2079 node->type.tv = tarval_top;
2081 } else if (is_con(a) && is_con(b)) {
2082 /* both nodes are constants, we can probably do something */
2083 node->type.tv = tarval_b_true;
2084 } else if (r->part == l->part) {
2085 /* both nodes congruent, we can probably do something */
2086 node->type.tv = tarval_b_true;
2088 node->type.tv = tarval_bottom;
2090 } /* compute_Proj_Cmp */
2093 * (Re-)compute the type for a Proj(Cmp).
2095 * @param node the node
2096 * @param cond the predecessor Cmp node
2098 static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
2099 ir_node *proj = node->node;
2100 node_t *l = get_irn_node(get_Cmp_left(cmp));
2101 node_t *r = get_irn_node(get_Cmp_right(cmp));
2102 lattice_elem_t a = l->type;
2103 lattice_elem_t b = r->type;
2104 pn_Cmp pnc = get_Proj_proj(proj);
2107 if (a.tv == tarval_top || b.tv == tarval_top) {
2110 tv = new_tarval_from_long((pnc & pn_Cmp_Eq) ^ pn_Cmp_Eq, mode_b);
2113 node->type.tv = tarval_top;
2115 } else if (is_con(a) && is_con(b)) {
2116 default_compute(node);
2117 node->by_all_const = 1;
2118 } else if (r->part == l->part &&
2119 (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
2121 * BEWARE: a == a is NOT always True for floating Point values, as
2122 * NaN != NaN is defined, so we must check this here.
2124 tv = new_tarval_from_long(pnc & pn_Cmp_Eq, mode_b);
2129 /* if the node was ONCE evaluated by all constants, but now
2130 this breaks AND we get from the argument partitions a different
2131 result, switch to bottom.
2132 This happens because initially all nodes are in the same partition ... */
2133 if (node->by_all_const && node->type.tv != tv)
2137 node->type.tv = tarval_bottom;
2139 } /* compute_Proj_Cmp */
2142 * (Re-)compute the type for a Proj(Cond).
2144 * @param node the node
2145 * @param cond the predecessor Cond node
2147 static void compute_Proj_Cond(node_t *node, ir_node *cond) {
2148 ir_node *proj = node->node;
2149 long pnc = get_Proj_proj(proj);
2150 ir_node *sel = get_Cond_selector(cond);
2151 node_t *selector = get_irn_node(sel);
2153 if (get_irn_mode(sel) == mode_b) {
2155 if (pnc == pn_Cond_true) {
2156 if (selector->type.tv == tarval_b_false) {
2157 node->type.tv = tarval_unreachable;
2158 } else if (selector->type.tv == tarval_b_true) {
2159 node->type.tv = tarval_reachable;
2160 } else if (selector->type.tv == tarval_bottom) {
2161 node->type.tv = tarval_reachable;
2163 assert(selector->type.tv == tarval_top);
2164 node->type.tv = tarval_unreachable;
2167 assert(pnc == pn_Cond_false);
2169 if (selector->type.tv == tarval_b_false) {
2170 node->type.tv = tarval_reachable;
2171 } else if (selector->type.tv == tarval_b_true) {
2172 node->type.tv = tarval_unreachable;
2173 } else if (selector->type.tv == tarval_bottom) {
2174 node->type.tv = tarval_reachable;
2176 assert(selector->type.tv == tarval_top);
2177 node->type.tv = tarval_unreachable;
2182 if (selector->type.tv == tarval_bottom) {
2183 node->type.tv = tarval_reachable;
2184 } else if (selector->type.tv == tarval_top) {
2185 node->type.tv = tarval_unreachable;
2187 long value = get_tarval_long(selector->type.tv);
2188 if (pnc == get_Cond_defaultProj(cond)) {
2189 /* default switch, have to check ALL other cases */
2192 for (i = get_irn_n_outs(cond) - 1; i >= 0; --i) {
2193 ir_node *succ = get_irn_out(cond, i);
2197 if (value == get_Proj_proj(succ)) {
2198 /* we found a match, will NOT take the default case */
2199 node->type.tv = tarval_unreachable;
2203 /* all cases checked, no match, will take default case */
2204 node->type.tv = tarval_reachable;
2207 node->type.tv = value == pnc ? tarval_reachable : tarval_unreachable;
2211 } /* compute_Proj_Cond */
2214 * (Re-)compute the type for a Proj-Node.
2216 * @param node the node
2218 static void compute_Proj(node_t *node) {
2219 ir_node *proj = node->node;
2220 ir_mode *mode = get_irn_mode(proj);
2221 node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
2222 ir_node *pred = get_Proj_pred(proj);
2224 if (block->type.tv == tarval_unreachable) {
2225 /* a Proj in a unreachable Block stay Top */
2226 node->type.tv = tarval_top;
2229 if (get_irn_node(pred)->type.tv == tarval_top) {
2230 /* if the predecessor is Top, its Proj follow */
2231 node->type.tv = tarval_top;
2235 if (mode == mode_M) {
2236 /* mode M is always bottom */
2237 node->type.tv = tarval_bottom;
2240 if (mode != mode_X) {
2242 compute_Proj_Cmp(node, pred);
2244 default_compute(node);
2247 /* handle mode_X nodes */
2249 switch (get_irn_opcode(pred)) {
2251 /* the Proj_X from the Start is always reachable.
2252 However this is already handled at the top. */
2253 node->type.tv = tarval_reachable;
2256 compute_Proj_Cond(node, pred);
2259 default_compute(node);
2261 } /* compute_Proj */
2264 * (Re-)compute the type for a Confirm.
2266 * @param node the node
2268 static void compute_Confirm(node_t *node) {
2269 ir_node *confirm = node->node;
2270 node_t *pred = get_irn_node(get_Confirm_value(confirm));
2272 if (get_Confirm_cmp(confirm) == pn_Cmp_Eq) {
2273 node_t *bound = get_irn_node(get_Confirm_bound(confirm));
2275 if (is_con(bound->type)) {
2276 /* is equal to a constant */
2277 node->type = bound->type;
2281 /* a Confirm is a copy OR a Const */
2282 node->type = pred->type;
2283 } /* compute_Confirm */
2286 * (Re-)compute the type for a Max.
2288 * @param node the node
2290 static void compute_Max(node_t *node) {
2291 ir_node *op = node->node;
2292 node_t *l = get_irn_node(get_binop_left(op));
2293 node_t *r = get_irn_node(get_binop_right(op));
2294 lattice_elem_t a = l->type;
2295 lattice_elem_t b = r->type;
2297 if (a.tv == tarval_top || b.tv == tarval_top) {
2298 node->type.tv = tarval_top;
2299 } else if (is_con(a) && is_con(b)) {
2300 /* both nodes are constants, we can probably do something */
2302 /* this case handles SymConsts as well */
2305 ir_mode *mode = get_irn_mode(op);
2306 tarval *tv_min = get_mode_min(mode);
2310 else if (b.tv == tv_min)
2312 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2313 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2314 node->type.tv = a.tv;
2316 node->type.tv = b.tv;
2318 node->type.tv = tarval_bad;
2321 } else if (r->part == l->part) {
2322 /* both nodes congruent, we can probably do something */
2325 node->type.tv = tarval_bottom;
2330 * (Re-)compute the type for a Min.
2332 * @param node the node
2334 static void compute_Min(node_t *node) {
2335 ir_node *op = node->node;
2336 node_t *l = get_irn_node(get_binop_left(op));
2337 node_t *r = get_irn_node(get_binop_right(op));
2338 lattice_elem_t a = l->type;
2339 lattice_elem_t b = r->type;
2341 if (a.tv == tarval_top || b.tv == tarval_top) {
2342 node->type.tv = tarval_top;
2343 } else if (is_con(a) && is_con(b)) {
2344 /* both nodes are constants, we can probably do something */
2346 /* this case handles SymConsts as well */
2349 ir_mode *mode = get_irn_mode(op);
2350 tarval *tv_max = get_mode_max(mode);
2354 else if (b.tv == tv_max)
2356 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2357 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2358 node->type.tv = a.tv;
2360 node->type.tv = b.tv;
2362 node->type.tv = tarval_bad;
2365 } else if (r->part == l->part) {
2366 /* both nodes congruent, we can probably do something */
2369 node->type.tv = tarval_bottom;
2374 * (Re-)compute the type for a given node.
2376 * @param node the node
2378 static void compute(node_t *node) {
2381 if (is_no_Block(node->node)) {
2382 node_t *block = get_irn_node(get_nodes_block(node->node));
2384 if (block->type.tv == tarval_unreachable) {
2385 node->type.tv = tarval_top;
2390 func = (compute_func)node->node->op->ops.generic;
2396 * Identity functions: Note that one might thing that identity() is just a
2397 * synonym for equivalent_node(). While this is true, we cannot use it for the algorithm
2398 * here, because it expects that the identity node is one of the inputs, which is NOT
2399 * always true for equivalent_node() which can handle (and does sometimes) DAGs.
2400 * So, we have our own implementation, which copies some parts of equivalent_node()
2404 * Calculates the Identity for Phi nodes
2406 static node_t *identity_Phi(node_t *node) {
2407 ir_node *phi = node->node;
2408 ir_node *block = get_nodes_block(phi);
2409 node_t *n_part = NULL;
2412 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
2413 node_t *pred_X = get_irn_node(get_Block_cfgpred(block, i));
2415 if (pred_X->type.tv == tarval_reachable) {
2416 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
2420 else if (n_part->part != pred->part) {
2421 /* incongruent inputs, not a follower */
2426 /* if n_part is NULL here, all inputs path are dead, the Phi computes
2427 * tarval_top, is in the TOP partition and should NOT being split! */
2428 assert(n_part != NULL);
2430 } /* identity_Phi */
2433 * Calculates the Identity for commutative 0 neutral nodes.
2435 static node_t *identity_comm_zero_binop(node_t *node) {
2436 ir_node *op = node->node;
2437 node_t *a = get_irn_node(get_binop_left(op));
2438 node_t *b = get_irn_node(get_binop_right(op));
2439 ir_mode *mode = get_irn_mode(op);
2442 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2443 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2446 /* node: no input should be tarval_top, else the binop would be also
2447 * Top and not being split. */
2448 zero = get_mode_null(mode);
2449 if (a->type.tv == zero)
2451 if (b->type.tv == zero)
2454 } /* identity_comm_zero_binop */
2457 * Calculates the Identity for Shift nodes.
2459 static node_t *identity_shift(node_t *node) {
2460 ir_node *op = node->node;
2461 node_t *b = get_irn_node(get_binop_right(op));
2462 ir_mode *mode = get_irn_mode(b->node);
2465 /* node: no input should be tarval_top, else the binop would be also
2466 * Top and not being split. */
2467 zero = get_mode_null(mode);
2468 if (b->type.tv == zero)
2469 return get_irn_node(get_binop_left(op));
2471 } /* identity_shift */
2474 * Calculates the Identity for Mul nodes.
2476 static node_t *identity_Mul(node_t *node) {
2477 ir_node *op = node->node;
2478 node_t *a = get_irn_node(get_Mul_left(op));
2479 node_t *b = get_irn_node(get_Mul_right(op));
2480 ir_mode *mode = get_irn_mode(op);
2483 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2484 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2487 /* node: no input should be tarval_top, else the binop would be also
2488 * Top and not being split. */
2489 one = get_mode_one(mode);
2490 if (a->type.tv == one)
2492 if (b->type.tv == one)
2495 } /* identity_Mul */
2498 * Calculates the Identity for Sub nodes.
2500 static node_t *identity_Sub(node_t *node) {
2501 ir_node *sub = node->node;
2502 node_t *b = get_irn_node(get_Sub_right(sub));
2503 ir_mode *mode = get_irn_mode(sub);
2505 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2506 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2509 /* node: no input should be tarval_top, else the binop would be also
2510 * Top and not being split. */
2511 if (b->type.tv == get_mode_null(mode))
2512 return get_irn_node(get_Sub_left(sub));
2514 } /* identity_Sub */
2517 * Calculates the Identity for And nodes.
2519 static node_t *identity_And(node_t *node) {
2520 ir_node *and = node->node;
2521 node_t *a = get_irn_node(get_And_left(and));
2522 node_t *b = get_irn_node(get_And_right(and));
2523 tarval *neutral = get_mode_all_one(get_irn_mode(and));
2525 /* node: no input should be tarval_top, else the And would be also
2526 * Top and not being split. */
2527 if (a->type.tv == neutral)
2529 if (b->type.tv == neutral)
2532 } /* identity_And */
2535 * Calculates the Identity for Confirm nodes.
2537 static node_t *identity_Confirm(node_t *node) {
2538 ir_node *confirm = node->node;
2540 /* a Confirm is always a Copy */
2541 return get_irn_node(get_Confirm_value(confirm));
2542 } /* identity_Confirm */
2545 * Calculates the Identity for Mux nodes.
2547 static node_t *identity_Mux(node_t *node) {
2548 ir_node *mux = node->node;
2549 node_t *t = get_irn_node(get_Mux_true(mux));
2550 node_t *f = get_irn_node(get_Mux_false(mux));
2553 if (t->part == f->part)
2556 /* for now, the 1-input identity is not supported */
2558 sel = get_irn_node(get_Mux_sel(mux));
2560 /* Mux sel input is mode_b, so it is always a tarval */
2561 if (sel->type.tv == tarval_b_true)
2563 if (sel->type.tv == tarval_b_false)
2567 } /* identity_Mux */
2570 * Calculates the Identity for Min nodes.
2572 static node_t *identity_Min(node_t *node) {
2573 ir_node *op = node->node;
2574 node_t *a = get_irn_node(get_binop_left(op));
2575 node_t *b = get_irn_node(get_binop_right(op));
2576 ir_mode *mode = get_irn_mode(op);
2579 if (a->part == b->part) {
2580 /* leader of multiple predecessors */
2584 /* works even with NaN */
2585 tv_max = get_mode_max(mode);
2586 if (a->type.tv == tv_max)
2588 if (b->type.tv == tv_max)
2591 } /* identity_Min */
2594 * Calculates the Identity for Max nodes.
2596 static node_t *identity_Max(node_t *node) {
2597 ir_node *op = node->node;
2598 node_t *a = get_irn_node(get_binop_left(op));
2599 node_t *b = get_irn_node(get_binop_right(op));
2600 ir_mode *mode = get_irn_mode(op);
2603 if (a->part == b->part) {
2604 /* leader of multiple predecessors */
2608 /* works even with NaN */
2609 tv_min = get_mode_min(mode);
2610 if (a->type.tv == tv_min)
2612 if (b->type.tv == tv_min)
2615 } /* identity_Max */
2618 * Calculates the Identity for nodes.
2620 static node_t *identity(node_t *node) {
2621 ir_node *irn = node->node;
2623 switch (get_irn_opcode(irn)) {
2625 return identity_Phi(node);
2627 return identity_Mul(node);
2631 return identity_comm_zero_binop(node);
2636 return identity_shift(node);
2638 return identity_And(node);
2640 return identity_Sub(node);
2642 return identity_Confirm(node);
2644 return identity_Mux(node);
2646 return identity_Min(node);
2648 return identity_Max(node);
2655 * Node follower is a (new) follower of leader, segregate Leader
2658 static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader) {
2659 ir_node *l = leader->node;
2660 int j, i, n = get_irn_n_outs(l);
2662 DB((dbg, LEVEL_2, "%+F is a follower of %+F\n", follower, leader->node));
2663 /* The leader edges must remain sorted, but follower edges can
2665 for (i = leader->n_followers + 1; i <= n; ++i) {
2666 if (l->out[i].use == follower) {
2667 ir_def_use_edge t = l->out[i];
2669 for (j = i - 1; j >= leader->n_followers + 1; --j)
2670 l->out[j + 1] = l->out[j];
2671 ++leader->n_followers;
2672 l->out[leader->n_followers] = t;
2676 } /* segregate_def_use_chain_1 */
2679 * Node follower is a (new) follower segregate its Leader
2682 * @param follower the follower IR node
2684 static void segregate_def_use_chain(const ir_node *follower) {
2687 for (i = get_irn_arity(follower) - 1; i >= 0; --i) {
2688 node_t *pred = get_irn_node(get_irn_n(follower, i));
2690 segregate_def_use_chain_1(follower, pred);
2692 } /* segregate_def_use_chain */
2695 * Propagate constant evaluation.
2697 * @param env the environment
2699 static void propagate(environment_t *env) {
2702 lattice_elem_t old_type;
2704 unsigned n_fallen, old_type_was_T_or_C;
2707 while (env->cprop != NULL) {
2708 void *oldopcode = NULL;
2710 /* remove the first partition X from cprop */
2713 env->cprop = X->cprop_next;
2715 old_type_was_T_or_C = X->type_is_T_or_C;
2717 DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
2720 while (! list_empty(&X->cprop)) {
2721 /* remove the first Node x from X.cprop */
2722 x = list_entry(X->cprop.next, node_t, cprop_list);
2723 //assert(x->part == X);
2724 list_del(&x->cprop_list);
2727 if (x->is_follower && identity(x) == x) {
2728 /* check the opcode first */
2729 if (oldopcode == NULL) {
2730 oldopcode = lambda_opcode(get_first_node(X), env);
2732 if (oldopcode != lambda_opcode(x, env)) {
2733 if (x->on_fallen == 0) {
2734 /* different opcode -> x falls out of this partition */
2739 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2743 /* x will make the follower -> leader transition */
2744 follower_to_leader(x);
2747 /* compute a new type for x */
2749 DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
2751 if (x->type.tv != old_type.tv) {
2752 verify_type(old_type, x->type);
2753 DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
2755 if (x->on_fallen == 0) {
2756 /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
2757 not already on the list. */
2762 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2764 for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
2765 ir_node *succ = get_irn_out(x->node, i);
2766 node_t *y = get_irn_node(succ);
2768 /* Add y to y.partition.cprop. */
2769 add_to_cprop(y, env);
2774 if (n_fallen > 0 && n_fallen != X->n_leader) {
2775 DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
2776 Y = split(&X, fallen, env);
2778 * We have split out fallen node. The type of the result
2779 * partition is NOT set yet.
2781 Y->type_is_T_or_C = 0;
2785 /* remove the flags from the fallen list */
2786 for (x = fallen; x != NULL; x = x->next)
2789 if (old_type_was_T_or_C) {
2792 /* check if some nodes will make the leader -> follower transition */
2793 list_for_each_entry_safe(node_t, y, tmp, &Y->Leader, node_list) {
2794 if (y->type.tv != tarval_top && ! is_con(y->type)) {
2795 node_t *eq_node = identity(y);
2797 if (eq_node != y && eq_node->part == y->part) {
2798 DB((dbg, LEVEL_2, "Node %+F is a follower of %+F\n", y->node, eq_node->node));
2799 /* move to Follower */
2801 list_del(&y->node_list);
2802 list_add_tail(&y->node_list, &Y->Follower);
2805 segregate_def_use_chain(y->node);
2815 * Get the leader for a given node from its congruence class.
2817 * @param irn the node
2819 static ir_node *get_leader(node_t *node) {
2820 partition_t *part = node->part;
2822 if (part->n_leader > 1 || node->is_follower) {
2823 if (node->is_follower) {
2824 DB((dbg, LEVEL_2, "Replacing follower %+F\n", node->node));
2827 DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
2829 return get_first_node(part)->node;
2835 * Return non-zero if the control flow predecessor node pred
2836 * is the only reachable control flow exit of its block.
2838 * @param pred the control flow exit
2840 static int can_exchange(ir_node *pred) {
2843 else if (is_Jmp(pred))
2845 else if (get_irn_mode(pred) == mode_T) {
2848 /* if the predecessor block has more than one
2849 reachable outputs we cannot remove the block */
2851 for (i = get_irn_n_outs(pred) - 1; i >= 0; --i) {
2852 ir_node *proj = get_irn_out(pred, i);
2855 /* skip non-control flow Proj's */
2856 if (get_irn_mode(proj) != mode_X)
2859 node = get_irn_node(proj);
2860 if (node->type.tv == tarval_reachable) {
2868 } /* can_exchange */
2871 * Block Post-Walker, apply the analysis results on control flow by
2872 * shortening Phi's and Block inputs.
2874 static void apply_cf(ir_node *block, void *ctx) {
2875 environment_t *env = ctx;
2876 node_t *node = get_irn_node(block);
2878 ir_node **ins, **in_X;
2879 ir_node *phi, *next;
2881 n = get_Block_n_cfgpreds(block);
2883 if (node->type.tv == tarval_unreachable) {
2886 for (i = n - 1; i >= 0; --i) {
2887 ir_node *pred = get_Block_cfgpred(block, i);
2889 if (! is_Bad(pred)) {
2890 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
2892 if (pred_bl->flagged == 0) {
2893 pred_bl->flagged = 3;
2895 if (pred_bl->type.tv == tarval_reachable) {
2897 * We will remove an edge from block to its pred.
2898 * This might leave the pred block as an endless loop
2900 if (! is_backedge(block, i))
2901 keep_alive(pred_bl->node);
2907 /* the EndBlock is always reachable even if the analysis
2908 finds out the opposite :-) */
2909 if (block != get_irg_end_block(current_ir_graph)) {
2910 /* mark dead blocks */
2911 set_Block_dead(block);
2912 DB((dbg, LEVEL_1, "Removing dead %+F\n", block));
2914 /* the endblock is unreachable */
2915 set_irn_in(block, 0, NULL);
2921 /* only one predecessor combine */
2922 ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
2924 if (can_exchange(pred)) {
2925 ir_node *new_block = get_nodes_block(pred);
2926 DB((dbg, LEVEL_1, "Fuse %+F with %+F\n", block, new_block));
2927 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
2928 exchange(block, new_block);
2929 node->node = new_block;
2935 NEW_ARR_A(ir_node *, in_X, n);
2937 for (i = 0; i < n; ++i) {
2938 ir_node *pred = get_Block_cfgpred(block, i);
2939 node_t *node = get_irn_node(pred);
2941 if (node->type.tv == tarval_reachable) {
2944 DB((dbg, LEVEL_1, "Removing dead input %d from %+F (%+F)\n", i, block, pred));
2945 if (! is_Bad(pred)) {
2946 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
2948 if (pred_bl->flagged == 0) {
2949 pred_bl->flagged = 3;
2951 if (pred_bl->type.tv == tarval_reachable) {
2953 * We will remove an edge from block to its pred.
2954 * This might leave the pred block as an endless loop
2956 if (! is_backedge(block, i))
2957 keep_alive(pred_bl->node);
2966 NEW_ARR_A(ir_node *, ins, n);
2967 for (phi = get_Block_phis(block); phi != NULL; phi = next) {
2968 node_t *node = get_irn_node(phi);
2970 next = get_Phi_next(phi);
2971 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
2972 /* this Phi is replaced by a constant */
2973 tarval *tv = node->type.tv;
2974 ir_node *c = new_r_Const(current_ir_graph, block, get_tarval_mode(tv), tv);
2976 set_irn_node(c, node);
2978 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
2979 DBG_OPT_COMBO(phi, c, FS_OPT_COMBO_CONST);
2984 for (i = 0; i < n; ++i) {
2985 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
2987 if (pred->type.tv == tarval_reachable) {
2988 ins[j++] = get_Phi_pred(phi, i);
2992 /* this Phi is replaced by a single predecessor */
2993 ir_node *s = ins[0];
2994 node_t *phi_node = get_irn_node(phi);
2997 DB((dbg, LEVEL_1, "%+F is replaced by %+F because of cf change\n", phi, s));
2998 DBG_OPT_COMBO(phi, s, FS_OPT_COMBO_FOLLOWER);
3003 set_irn_in(phi, j, ins);
3010 /* this Block has only one live predecessor */
3011 ir_node *pred = skip_Proj(in_X[0]);
3013 if (can_exchange(pred)) {
3014 ir_node *new_block = get_nodes_block(pred);
3015 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
3016 exchange(block, new_block);
3017 node->node = new_block;
3021 set_irn_in(block, k, in_X);
3027 * Exchange a node by its leader.
3028 * Beware: in rare cases the mode might be wrong here, for instance
3029 * AddP(x, NULL) is a follower of x, but with different mode.
3032 static void exchange_leader(ir_node *irn, ir_node *leader) {
3033 ir_mode *mode = get_irn_mode(irn);
3034 if (mode != get_irn_mode(leader)) {
3035 /* The conv is a no-op, so we are fre to place in
3036 * either in the block of the leader OR in irn's block.
3037 * Propably placing it into leaders block might reduce
3038 * the number of Conv due to CSE. */
3039 ir_node *block = get_nodes_block(leader);
3040 dbg_info *dbg = get_irn_dbg_info(irn);
3042 leader = new_rd_Conv(dbg, current_ir_graph, block, leader, mode);
3044 exchange(irn, leader);
3048 * Post-Walker, apply the analysis results;
3050 static void apply_result(ir_node *irn, void *ctx) {
3051 environment_t *env = ctx;
3052 node_t *node = get_irn_node(irn);
3054 if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
3055 /* blocks already handled, do not touch the End node */
3057 node_t *block = get_irn_node(get_nodes_block(irn));
3059 if (block->type.tv == tarval_unreachable) {
3060 ir_node *bad = get_irg_bad(current_ir_graph);
3062 /* here, bad might already have a node, but this can be safely ignored
3063 as long as bad has at least ONE valid node */
3064 set_irn_node(bad, node);
3066 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
3070 else if (node->type.tv == tarval_unreachable) {
3071 /* don't kick away Unknown */
3072 if (! is_Unknown(irn)) {
3073 ir_node *bad = get_irg_bad(current_ir_graph);
3075 /* see comment above */
3076 set_irn_node(bad, node);
3078 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
3083 else if (get_irn_mode(irn) == mode_X) {
3086 ir_node *cond = get_Proj_pred(irn);
3088 if (is_Cond(cond)) {
3089 node_t *sel = get_irn_node(get_Cond_selector(cond));
3091 if (is_tarval(sel->type.tv) && tarval_is_constant(sel->type.tv)) {
3092 /* Cond selector is a constant and the Proj is reachable, make a Jmp */
3093 ir_node *jmp = new_r_Jmp(current_ir_graph, block->node);
3094 set_irn_node(jmp, node);
3096 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
3097 DBG_OPT_COMBO(irn, jmp, FS_OPT_COMBO_CF);
3104 /* normal data node */
3105 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
3106 tarval *tv = node->type.tv;
3109 * Beware: never replace mode_T nodes by constants. Currently we must mark
3110 * mode_T nodes with constants, but do NOT replace them.
3112 if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
3113 /* can be replaced by a constant */
3114 ir_node *c = new_r_Const(current_ir_graph, block->node, get_tarval_mode(tv), tv);
3115 set_irn_node(c, node);
3117 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
3118 DBG_OPT_COMBO(irn, c, FS_OPT_COMBO_CONST);
3119 exchange_leader(irn, c);
3122 } else if (is_entity(node->type.sym.entity_p)) {
3123 if (! is_SymConst(irn)) {
3124 /* can be replaced by a SymConst */
3125 ir_node *symc = new_r_SymConst(current_ir_graph, block->node, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
3126 set_irn_node(symc, node);
3129 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
3130 DBG_OPT_COMBO(irn, symc, FS_OPT_COMBO_CONST);
3131 exchange_leader(irn, symc);
3134 } else if (is_Confirm(irn)) {
3135 /* Confirms are always follower, but do not kill them here */
3137 ir_node *leader = get_leader(node);
3139 if (leader != irn) {
3140 DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
3141 if (node->is_follower)
3142 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_FOLLOWER);
3144 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_CONGRUENT);
3145 exchange_leader(irn, leader);
3151 } /* apply_result */
3154 * Fix the keep-alives by deleting unreachable ones.
3156 static void apply_end(ir_node *end, environment_t *env) {
3157 int i, j, n = get_End_n_keepalives(end);
3161 NEW_ARR_A(ir_node *, in, n);
3163 /* fix the keep alive */
3164 for (i = j = 0; i < n; i++) {
3165 ir_node *ka = get_End_keepalive(end, i);
3166 node_t *node = get_irn_node(ka);
3169 node = get_irn_node(get_nodes_block(ka));
3171 if (node->type.tv != tarval_unreachable && !is_Bad(ka))
3175 set_End_keepalives(end, j, in);
3180 #define SET(code) op_##code->ops.generic = (op_func)compute_##code
3183 * sets the generic functions to compute.
3185 static void set_compute_functions(void) {
3188 /* set the default compute function */
3189 for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
3190 ir_op *op = get_irp_opcode(i);
3191 op->ops.generic = (op_func)default_compute;
3194 /* set specific functions */
3214 } /* set_compute_functions */
3216 static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
3217 #ifdef DEBUG_libfirm
3218 ir_node *irn = local != NULL ? local : n;
3219 node_t *node = get_irn_node(irn);
3221 ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
3226 void combo(ir_graph *irg) {
3228 ir_node *initial_bl;
3230 ir_graph *rem = current_ir_graph;
3232 current_ir_graph = irg;
3234 /* register a debug mask */
3235 FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
3237 DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
3239 obstack_init(&env.obst);
3240 env.worklist = NULL;
3244 #ifdef DEBUG_libfirm
3245 env.dbg_list = NULL;
3247 env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
3248 env.type2id_map = pmap_create();
3249 env.end_idx = get_opt_global_cse() ? 0 : -1;
3250 env.lambda_input = 0;
3251 env.nonstd_cond = 0;
3252 env.commutative = 1;
3255 assure_irg_outs(irg);
3256 assure_cf_loop(irg);
3258 /* we have our own value_of function */
3259 set_value_of_func(get_node_tarval);
3261 set_compute_functions();
3262 DEBUG_ONLY(part_nr = 0);
3264 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
3266 /* create the initial partition and place it on the work list */
3267 env.initial = new_partition(&env);
3268 add_to_worklist(env.initial, &env);
3269 irg_walk_graph(irg, init_block_phis, create_initial_partitions, &env);
3272 tarval_UNKNOWN = env.nonstd_cond ? tarval_bad : tarval_top;
3274 tarval_UNKNOWN = tarval_bad;
3277 /* all nodes on the initial partition have type Top */
3278 env.initial->type_is_T_or_C = 1;
3280 /* Place the START Node's partition on cprop.
3281 Place the START Node on its local worklist. */
3282 initial_bl = get_irg_start_block(irg);
3283 start = get_irn_node(initial_bl);
3284 add_to_cprop(start, &env);
3288 if (env.worklist != NULL)
3290 } while (env.cprop != NULL || env.worklist != NULL);
3292 dump_all_partitions(&env);
3293 check_all_partitions(&env);
3296 set_dump_node_vcgattr_hook(dump_partition_hook);
3297 dump_ir_block_graph(irg, "-partition");
3298 set_dump_node_vcgattr_hook(NULL);
3300 (void)dump_partition_hook;
3303 /* apply the result */
3304 irg_block_walk_graph(irg, NULL, apply_cf, &env);
3305 irg_walk_graph(irg, NULL, apply_result, &env);
3306 apply_end(get_irg_end(irg), &env);
3309 /* control flow might changed */
3310 set_irg_outs_inconsistent(irg);
3311 set_irg_extblk_inconsistent(irg);
3312 set_irg_doms_inconsistent(irg);
3313 set_irg_loopinfo_inconsistent(irg);
3316 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
3318 pmap_destroy(env.type2id_map);
3319 del_set(env.opcode2id_map);
3320 obstack_free(&env.obst, NULL);
3322 /* restore value_of() default behavior */
3323 set_value_of_func(NULL);
3324 current_ir_graph = rem;