2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Cliff Click's Combined Analysis/Optimization
23 * @author Michael Beck
26 * This is a slightly enhanced version of Cliff Clicks combo algorithm
27 * - support for commutative nodes is added, Add(a,b) and Add(b,a) ARE congruent
28 * - supports all Firm direct (by a data edge) identities except Mux
29 * (Mux can be a 2-input or 1-input identity, only 2-input is implemented yet)
30 * - supports Confirm nodes (handle them like Copies but do NOT remove them)
31 * - let Cmp nodes calculate Top like all othe data nodes: this would let
32 * Mux nodes to calculate Unknown instead of taking the true result
33 * - let Cond(Top) always select FALSE/default: This is tricky. Nodes are only reavaluated
34 * IFF the predecessor changed its type. Because nodes are initialized with Top
35 * this never happens, let all Proj(Cond) be unreachable.
36 * We avoid this condition by the same way we work around Phi: whenever a Block
37 * node is placed on the list, place its Cond nodes (and because they are Tuple
38 * all its Proj-nodes either on the cprop list)
39 * Especially, this changes the meaning of Click's example:
54 * using Click's version while is silent with our.
55 * - support for global congruences is implemented but not tested yet
57 * Note further that we use the terminology from Click's work here, which is different
58 * in some cases from Firm terminology. Especially, Click's type is a
59 * Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
65 #include "iroptimize.h"
73 #include "irgraph_t.h"
80 #include "iropt_dbg.h"
90 /* define this to check that all type translations are monotone */
91 #define VERIFY_MONOTONE
93 /* define this to check the consistency of partitions */
94 #define CHECK_PARTITIONS
96 typedef struct node_t node_t;
97 typedef struct partition_t partition_t;
98 typedef struct opcode_key_t opcode_key_t;
99 typedef struct listmap_entry_t listmap_entry_t;
101 /** The type of the compute function. */
102 typedef void (*compute_func)(node_t *node);
107 struct opcode_key_t {
108 ir_opcode code; /**< The Firm opcode. */
109 ir_mode *mode; /**< The mode of all nodes in the partition. */
110 int arity; /**< The arity of this opcode (needed for Phi etc. */
112 long proj; /**< For Proj nodes, its proj number */
113 ir_entity *ent; /**< For Sel Nodes, its entity */
118 * An entry in the list_map.
120 struct listmap_entry_t {
121 void *id; /**< The id. */
122 node_t *list; /**< The associated list for this id. */
123 listmap_entry_t *next; /**< Link to the next entry in the map. */
126 /** We must map id's to lists. */
127 typedef struct listmap_t {
128 set *map; /**< Map id's to listmap_entry_t's */
129 listmap_entry_t *values; /**< List of all values in the map. */
133 * A lattice element. Because we handle constants and symbolic constants different, we
134 * have to use this union.
145 ir_node *node; /**< The IR-node itself. */
146 list_head node_list; /**< Double-linked list of leader/follower entries. */
147 list_head cprop_list; /**< Double-linked partition.cprop list. */
148 partition_t *part; /**< points to the partition this node belongs to */
149 node_t *next; /**< Next node on local list (partition.touched, fallen). */
150 node_t *race_next; /**< Next node on race list. */
151 lattice_elem_t type; /**< The associated lattice element "type". */
152 int max_user_input; /**< Maximum input number of Def-Use edges. */
153 int next_edge; /**< Index of the next Def-Use edge to use. */
154 int n_followers; /**< Number of Follower in the outs set. */
155 unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
156 unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
157 unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
158 unsigned is_follower:1; /**< Set, if this node is a follower. */
159 unsigned flagged:2; /**< 2 Bits, set if this node was visited by race 1 or 2. */
163 * A partition containing congruent nodes.
166 list_head Leader; /**< The head of partition Leader node list. */
167 list_head Follower; /**< The head of partition Follower node list. */
168 list_head cprop; /**< The head of partition.cprop list. */
169 list_head cprop_X; /**< The head of partition.cprop (Cond nodes and its Projs) list. */
170 partition_t *wl_next; /**< Next entry in the work list if any. */
171 partition_t *touched_next; /**< Points to the next partition in the touched set. */
172 partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
173 partition_t *split_next; /**< Points to the next partition in the list that must be split by split_by(). */
174 node_t *touched; /**< The partition.touched set of this partition. */
175 unsigned n_leader; /**< Number of entries in this partition.Leader. */
176 unsigned n_touched; /**< Number of entries in the partition.touched. */
177 int max_user_inputs; /**< Maximum number of user inputs of all entries. */
178 unsigned on_worklist:1; /**< Set, if this partition is in the work list. */
179 unsigned on_touched:1; /**< Set, if this partition is on the touched set. */
180 unsigned on_cprop:1; /**< Set, if this partition is on the cprop list. */
181 unsigned type_is_T_or_C:1;/**< Set, if all nodes in this partition have type Top or Constant. */
183 partition_t *dbg_next; /**< Link all partitions for debugging */
184 unsigned nr; /**< A unique number for (what-)mapping, >0. */
188 typedef struct environment_t {
189 struct obstack obst; /**< obstack to allocate data structures. */
190 partition_t *worklist; /**< The work list. */
191 partition_t *cprop; /**< The constant propagation list. */
192 partition_t *touched; /**< the touched set. */
193 partition_t *initial; /**< The initial partition. */
194 set *opcode2id_map; /**< The opcodeMode->id map. */
195 pmap *type2id_map; /**< The type->id map. */
196 int end_idx; /**< -1 for local and 0 for global congruences. */
197 int lambda_input; /**< Captured argument for lambda_partition(). */
198 unsigned modified:1; /**< Set, if the graph was modified. */
199 unsigned unopt_cf:1; /**< If set, control flow is not optimized due to Unknown. */
200 /* options driving the optimization */
201 unsigned commutative:1; /**< Set, if commutation nodes should be handled specially. */
202 unsigned opt_unknown:1; /**< Set, if non-strict programs should be optimized. */
204 partition_t *dbg_list; /**< List of all partitions. */
208 /** Type of the what function. */
209 typedef void *(*what_func)(const node_t *node, environment_t *env);
211 #define get_irn_node(follower) ((node_t *)get_irn_link(follower))
212 #define set_irn_node(follower, node) set_irn_link(follower, node)
214 /* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
215 #undef tarval_unreachable
216 #define tarval_unreachable tarval_top
219 /** The debug module handle. */
220 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
222 /** The what reason. */
223 DEBUG_ONLY(static const char *what_reason;)
225 /** Next partition number. */
226 DEBUG_ONLY(static unsigned part_nr = 0);
228 /** The tarval returned by Unknown nodes: set to either tarval_bad OR tarval_top. */
229 static tarval *tarval_UNKNOWN;
232 static node_t *identity(node_t *node);
234 #ifdef CHECK_PARTITIONS
238 static void check_partition(const partition_t *T) {
242 list_for_each_entry(node_t, node, &T->Leader, node_list) {
243 assert(node->is_follower == 0);
244 assert(node->flagged == 0);
245 assert(node->part == T);
248 assert(n == T->n_leader);
250 list_for_each_entry(node_t, node, &T->Follower, node_list) {
251 assert(node->is_follower == 1);
252 assert(node->flagged == 0);
253 assert(node->part == T);
255 } /* check_partition */
258 * check that all leader nodes in the partition have the same opcode.
260 static void check_opcode(const partition_t *Z) {
265 list_for_each_entry(node_t, node, &Z->Leader, node_list) {
266 ir_node *irn = node->node;
269 key.code = get_irn_opcode(irn);
270 key.mode = get_irn_mode(irn);
271 key.arity = get_irn_arity(irn);
275 switch (get_irn_opcode(irn)) {
277 key.u.proj = get_Proj_proj(irn);
280 key.u.ent = get_Sel_entity(irn);
287 assert(key.code == get_irn_opcode(irn));
288 assert(key.mode == get_irn_mode(irn));
289 assert(key.arity == get_irn_arity(irn));
291 switch (get_irn_opcode(irn)) {
293 assert(key.u.proj == get_Proj_proj(irn));
296 assert(key.u.ent == get_Sel_entity(irn));
305 static void check_all_partitions(environment_t *env) {
310 for (P = env->dbg_list; P != NULL; P = P->dbg_next) {
312 if (! P->type_is_T_or_C)
314 list_for_each_entry(node_t, node, &P->Follower, node_list) {
315 node_t *leader = identity(node);
317 assert(leader != node && leader->part == node->part);
326 static void do_check_list(const node_t *list, int ofs, const partition_t *Z) {
329 #define NEXT(e) *((const node_t **)((char *)(e) + (ofs)))
330 for (e = list; e != NULL; e = NEXT(e)) {
331 assert(e->part == Z);
334 } /* ido_check_list */
337 * Check a local list.
339 static void check_list(const node_t *list, const partition_t *Z) {
340 do_check_list(list, offsetof(node_t, next), Z);
344 #define check_partition(T)
345 #define check_list(list, Z)
346 #define check_all_partitions(env)
347 #endif /* CHECK_PARTITIONS */
350 static inline lattice_elem_t get_partition_type(const partition_t *X);
353 * Dump partition to output.
355 static void dump_partition(const char *msg, const partition_t *part) {
358 lattice_elem_t type = get_partition_type(part);
360 DB((dbg, LEVEL_2, "%s part%u%s (%u, %+F) {\n ",
361 msg, part->nr, part->type_is_T_or_C ? "*" : "",
362 part->n_leader, type));
363 list_for_each_entry(node_t, node, &part->Leader, node_list) {
364 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
367 if (! list_empty(&part->Follower)) {
368 DB((dbg, LEVEL_2, "\n---\n "));
370 list_for_each_entry(node_t, node, &part->Follower, node_list) {
371 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
375 DB((dbg, LEVEL_2, "\n}\n"));
376 } /* dump_partition */
381 static void do_dump_list(const char *msg, const node_t *node, int ofs) {
385 #define GET_LINK(p, ofs) *((const node_t **)((char *)(p) + (ofs)))
387 DB((dbg, LEVEL_3, "%s = {\n ", msg));
388 for (p = node; p != NULL; p = GET_LINK(p, ofs)) {
389 DB((dbg, LEVEL_3, "%s%+F", first ? "" : ", ", p->node));
392 DB((dbg, LEVEL_3, "\n}\n"));
400 static void dump_race_list(const char *msg, const node_t *list) {
401 do_dump_list(msg, list, offsetof(node_t, race_next));
402 } /* dump_race_list */
405 * Dumps a local list.
407 static void dump_list(const char *msg, const node_t *list) {
408 do_dump_list(msg, list, offsetof(node_t, next));
412 * Dump all partitions.
414 static void dump_all_partitions(const environment_t *env) {
415 const partition_t *P;
417 DB((dbg, LEVEL_2, "All partitions\n===============\n"));
418 for (P = env->dbg_list; P != NULL; P = P->dbg_next)
419 dump_partition("", P);
420 } /* dump_all_partitions */
425 static void dump_split_list(const partition_t *list) {
426 const partition_t *p;
428 DB((dbg, LEVEL_2, "Split by %s produced = {\n", what_reason));
429 for (p = list; p != NULL; p = p->split_next)
430 DB((dbg, LEVEL_2, "part%u, ", p->nr));
431 DB((dbg, LEVEL_2, "\n}\n"));
432 } /* dump_split_list */
435 * Dump partition and type for a node.
437 static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
438 ir_node *irn = local != NULL ? local : n;
439 node_t *node = get_irn_node(irn);
441 ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
443 } /* dump_partition_hook */
446 #define dump_partition(msg, part)
447 #define dump_race_list(msg, list)
448 #define dump_list(msg, list)
449 #define dump_all_partitions(env)
450 #define dump_split_list(list)
453 #if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
455 * Verify that a type transition is monotone
457 static void verify_type(const lattice_elem_t old_type, node_t *node) {
458 if (old_type.tv == node->type.tv) {
462 if (old_type.tv == tarval_top) {
463 /* from Top down-to is always allowed */
466 if (node->type.tv == tarval_bottom || node->type.tv == tarval_reachable) {
470 panic("combo: wrong translation from %+F to %+F on node %+F", old_type, node->type, node->node);
474 #define verify_type(old_type, node)
478 * Compare two pointer values of a listmap.
480 static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
481 const listmap_entry_t *e1 = elt;
482 const listmap_entry_t *e2 = key;
485 return e1->id != e2->id;
486 } /* listmap_cmp_ptr */
489 * Initializes a listmap.
491 * @param map the listmap
493 static void listmap_init(listmap_t *map) {
494 map->map = new_set(listmap_cmp_ptr, 16);
499 * Terminates a listmap.
501 * @param map the listmap
503 static void listmap_term(listmap_t *map) {
508 * Return the associated listmap entry for a given id.
510 * @param map the listmap
511 * @param id the id to search for
513 * @return the associated listmap entry for the given id
515 static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
516 listmap_entry_t key, *entry;
521 entry = set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
523 if (entry->list == NULL) {
524 /* a new entry, put into the list */
525 entry->next = map->values;
532 * Calculate the hash value for an opcode map entry.
534 * @param entry an opcode map entry
536 * @return a hash value for the given opcode map entry
538 static unsigned opcode_hash(const opcode_key_t *entry) {
539 return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent) + entry->arity;
543 * Compare two entries in the opcode map.
545 static int cmp_opcode(const void *elt, const void *key, size_t size) {
546 const opcode_key_t *o1 = elt;
547 const opcode_key_t *o2 = key;
550 return o1->code != o2->code || o1->mode != o2->mode ||
551 o1->arity != o2->arity ||
552 o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent;
556 * Compare two Def-Use edges for input position.
558 static int cmp_def_use_edge(const void *a, const void *b) {
559 const ir_def_use_edge *ea = a;
560 const ir_def_use_edge *eb = b;
562 /* no overrun, because range is [-1, MAXINT] */
563 return ea->pos - eb->pos;
564 } /* cmp_def_use_edge */
567 * We need the Def-Use edges sorted.
569 static void sort_irn_outs(node_t *node) {
570 ir_node *irn = node->node;
571 int n_outs = get_irn_n_outs(irn);
574 qsort(&irn->out[1], n_outs, sizeof(irn->out[0]), cmp_def_use_edge);
576 node->max_user_input = irn->out[n_outs].pos;
577 } /* sort_irn_outs */
580 * Return the type of a node.
582 * @param irn an IR-node
584 * @return the associated type of this node
586 static inline lattice_elem_t get_node_type(const ir_node *irn) {
587 return get_irn_node(irn)->type;
588 } /* get_node_type */
591 * Return the tarval of a node.
593 * @param irn an IR-node
595 * @return the associated type of this node
597 static inline tarval *get_node_tarval(const ir_node *irn) {
598 lattice_elem_t type = get_node_type(irn);
600 if (is_tarval(type.tv))
602 return tarval_bottom;
603 } /* get_node_type */
606 * Add a partition to the worklist.
608 static inline void add_to_worklist(partition_t *X, environment_t *env) {
609 assert(X->on_worklist == 0);
610 DB((dbg, LEVEL_2, "Adding part%d to worklist\n", X->nr));
611 X->wl_next = env->worklist;
614 } /* add_to_worklist */
617 * Create a new empty partition.
619 * @param env the environment
621 * @return a newly allocated partition
623 static inline partition_t *new_partition(environment_t *env) {
624 partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
626 INIT_LIST_HEAD(&part->Leader);
627 INIT_LIST_HEAD(&part->Follower);
628 INIT_LIST_HEAD(&part->cprop);
629 INIT_LIST_HEAD(&part->cprop_X);
630 part->wl_next = NULL;
631 part->touched_next = NULL;
632 part->cprop_next = NULL;
633 part->split_next = NULL;
634 part->touched = NULL;
637 part->max_user_inputs = 0;
638 part->on_worklist = 0;
639 part->on_touched = 0;
641 part->type_is_T_or_C = 0;
643 part->dbg_next = env->dbg_list;
644 env->dbg_list = part;
645 part->nr = part_nr++;
649 } /* new_partition */
652 * Get the first node from a partition.
654 static inline node_t *get_first_node(const partition_t *X) {
655 return list_entry(X->Leader.next, node_t, node_list);
656 } /* get_first_node */
659 * Return the type of a partition (assuming partition is non-empty and
660 * all elements have the same type).
662 * @param X a partition
664 * @return the type of the first element of the partition
666 static inline lattice_elem_t get_partition_type(const partition_t *X) {
667 const node_t *first = get_first_node(X);
669 } /* get_partition_type */
672 * Creates a partition node for the given IR-node and place it
673 * into the given partition.
675 * @param irn an IR-node
676 * @param part a partition to place the node in
677 * @param env the environment
679 * @return the created node
681 static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env) {
682 /* create a partition node and place it in the partition */
683 node_t *node = obstack_alloc(&env->obst, sizeof(*node));
685 INIT_LIST_HEAD(&node->node_list);
686 INIT_LIST_HEAD(&node->cprop_list);
690 node->race_next = NULL;
691 node->type.tv = tarval_top;
692 node->max_user_input = 0;
694 node->n_followers = 0;
695 node->on_touched = 0;
698 node->is_follower = 0;
700 set_irn_node(irn, node);
702 list_add_tail(&node->node_list, &part->Leader);
706 } /* create_partition_node */
709 * Pre-Walker, initialize all Nodes' type to U or top and place
710 * all nodes into the TOP partition.
712 static void create_initial_partitions(ir_node *irn, void *ctx) {
713 environment_t *env = ctx;
714 partition_t *part = env->initial;
717 node = create_partition_node(irn, part, env);
719 if (node->max_user_input > part->max_user_inputs)
720 part->max_user_inputs = node->max_user_input;
723 set_Block_phis(irn, NULL);
725 } /* create_initial_partitions */
728 * Post-Walker, collect all Block-Phi lists, set Cond.
730 static void init_block_phis(ir_node *irn, void *ctx) {
734 add_Block_phi(get_nodes_block(irn), irn);
736 } /* init_block_phis */
739 * Add a node to the entry.partition.touched set and
740 * node->partition to the touched set if not already there.
743 * @param env the environment
745 static inline void add_to_touched(node_t *y, environment_t *env) {
746 if (y->on_touched == 0) {
747 partition_t *part = y->part;
749 y->next = part->touched;
754 if (part->on_touched == 0) {
755 part->touched_next = env->touched;
757 part->on_touched = 1;
760 check_list(part->touched, part);
762 } /* add_to_touched */
765 * Place a node on the cprop list.
768 * @param env the environment
770 static void add_to_cprop(node_t *y, environment_t *env) {
773 /* Add y to y.partition.cprop. */
774 if (y->on_cprop == 0) {
775 partition_t *Y = y->part;
776 ir_node *irn = y->node;
778 /* place Conds and all its Projs on the cprop_X list */
779 if (is_Cond(skip_Proj(irn)))
780 list_add_tail(&y->cprop_list, &Y->cprop_X);
782 list_add_tail(&y->cprop_list, &Y->cprop);
785 DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
787 /* place its partition on the cprop list */
788 if (Y->on_cprop == 0) {
789 Y->cprop_next = env->cprop;
795 if (get_irn_mode(irn) == mode_T) {
796 /* mode_T nodes always produce tarval_bottom, so we must explicitly
797 add it's Proj's to get constant evaluation to work */
800 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
801 node_t *proj = get_irn_node(get_irn_out(irn, i));
803 add_to_cprop(proj, env);
805 } else if (is_Block(irn)) {
806 /* Due to the way we handle Phi's, we must place all Phis of a block on the list
807 * if someone placed the block. The Block is only placed if the reachability
808 * changes, and this must be re-evaluated in compute_Phi(). */
810 for (phi = get_Block_phis(irn); phi != NULL; phi = get_Phi_next(phi)) {
811 node_t *p = get_irn_node(phi);
812 add_to_cprop(p, env);
818 * Update the worklist: If Z is on worklist then add Z' to worklist.
819 * Else add the smaller of Z and Z' to worklist.
821 * @param Z the Z partition
822 * @param Z_prime the Z' partition, a previous part of Z
823 * @param env the environment
825 static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env) {
826 if (Z->on_worklist || Z_prime->n_leader < Z->n_leader) {
827 add_to_worklist(Z_prime, env);
829 add_to_worklist(Z, env);
831 } /* update_worklist */
834 * Make all inputs to x no longer be F.def_use edges.
838 static void move_edges_to_leader(node_t *x) {
839 ir_node *irn = x->node;
842 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
843 node_t *pred = get_irn_node(get_irn_n(irn, i));
848 n = get_irn_n_outs(p);
849 for (j = 1; j <= pred->n_followers; ++j) {
850 if (p->out[j].pos == i && p->out[j].use == irn) {
851 /* found a follower edge to x, move it to the Leader */
852 ir_def_use_edge edge = p->out[j];
854 /* remove this edge from the Follower set */
855 p->out[j] = p->out[pred->n_followers];
858 /* sort it into the leader set */
859 for (k = pred->n_followers + 2; k <= n; ++k) {
860 if (p->out[k].pos >= edge.pos)
862 p->out[k - 1] = p->out[k];
864 /* place the new edge here */
865 p->out[k - 1] = edge;
867 /* edge found and moved */
872 } /* move_edges_to_leader */
875 * Split a partition that has NO followers by a local list.
877 * @param Z partition to split
878 * @param g a (non-empty) node list
879 * @param env the environment
881 * @return a new partition containing the nodes of g
883 static partition_t *split_no_followers(partition_t *Z, node_t *g, environment_t *env) {
884 partition_t *Z_prime;
889 dump_partition("Splitting ", Z);
890 dump_list("by list ", g);
894 /* Remove g from Z. */
895 for (node = g; node != NULL; node = node->next) {
896 assert(node->part == Z);
897 list_del(&node->node_list);
900 assert(n < Z->n_leader);
903 /* Move g to a new partition, Z'. */
904 Z_prime = new_partition(env);
906 for (node = g; node != NULL; node = node->next) {
907 list_add_tail(&node->node_list, &Z_prime->Leader);
908 node->part = Z_prime;
909 if (node->max_user_input > max_input)
910 max_input = node->max_user_input;
912 Z_prime->max_user_inputs = max_input;
913 Z_prime->n_leader = n;
916 check_partition(Z_prime);
918 /* for now, copy the type info tag, it will be adjusted in split_by(). */
919 Z_prime->type_is_T_or_C = Z->type_is_T_or_C;
921 update_worklist(Z, Z_prime, env);
923 dump_partition("Now ", Z);
924 dump_partition("Created new ", Z_prime);
926 } /* split_no_followers */
929 * Make the Follower -> Leader transition for a node.
933 static void follower_to_leader(node_t *n) {
934 assert(n->is_follower == 1);
936 DB((dbg, LEVEL_2, "%+F make the follower -> leader transition\n", n->node));
938 move_edges_to_leader(n);
939 list_del(&n->node_list);
940 list_add_tail(&n->node_list, &n->part->Leader);
942 } /* follower_to_leader */
945 * The environment for one race step.
947 typedef struct step_env {
948 node_t *initial; /**< The initial node list. */
949 node_t *unwalked; /**< The unwalked node list. */
950 node_t *walked; /**< The walked node list. */
951 int index; /**< Next index of Follower use_def edge. */
952 unsigned side; /**< side number. */
956 * Return non-zero, if a input is a real follower
958 * @param irn the node to check
959 * @param input number of the input
961 static int is_real_follower(const ir_node *irn, int input) {
964 switch (get_irn_opcode(irn)) {
967 /* ignore the Confirm bound input */
973 /* ignore the Mux sel input */
978 /* dead inputs are not follower edges */
979 ir_node *block = get_nodes_block(irn);
980 node_t *pred = get_irn_node(get_Block_cfgpred(block, input));
982 if (pred->type.tv == tarval_unreachable)
992 /* only a Sub x,0 / Shift x,0 might be a follower */
999 pred = get_irn_node(get_irn_n(irn, input));
1000 if (is_tarval(pred->type.tv) && tarval_is_null(pred->type.tv))
1004 pred = get_irn_node(get_irn_n(irn, input));
1005 if (is_tarval(pred->type.tv) && tarval_is_one(pred->type.tv))
1009 pred = get_irn_node(get_irn_n(irn, input));
1010 if (is_tarval(pred->type.tv) && tarval_is_all_one(pred->type.tv))
1015 /* all inputs are followers */
1018 assert(!"opcode not implemented yet");
1022 } /* is_real_follower */
1025 * Do one step in the race.
1027 static int step(step_env *env) {
1030 if (env->initial != NULL) {
1031 /* Move node from initial to unwalked */
1033 env->initial = n->race_next;
1035 n->race_next = env->unwalked;
1041 while (env->unwalked != NULL) {
1042 /* let n be the first node in unwalked */
1044 while (env->index < n->n_followers) {
1045 const ir_def_use_edge *edge = &n->node->out[1 + env->index];
1047 /* let m be n.F.def_use[index] */
1048 node_t *m = get_irn_node(edge->use);
1050 assert(m->is_follower);
1052 * Some inputs, like the get_Confirm_bound are NOT
1053 * real followers, sort them out.
1055 if (! is_real_follower(m->node, edge->pos)) {
1061 /* only followers from our partition */
1062 if (m->part != n->part)
1065 if ((m->flagged & env->side) == 0) {
1066 m->flagged |= env->side;
1068 if (m->flagged != 3) {
1069 /* visited the first time */
1070 /* add m to unwalked not as first node (we might still need to
1071 check for more follower node */
1072 m->race_next = n->race_next;
1076 /* else already visited by the other side and on the other list */
1079 /* move n to walked */
1080 env->unwalked = n->race_next;
1081 n->race_next = env->walked;
1089 * Clear the flags from a list and check for
1090 * nodes that where touched from both sides.
1092 * @param list the list
1094 static int clear_flags(node_t *list) {
1098 for (n = list; n != NULL; n = n->race_next) {
1099 if (n->flagged == 3) {
1100 /* we reach a follower from both sides, this will split congruent
1101 * inputs and make it a leader. */
1102 follower_to_leader(n);
1111 * Split a partition by a local list using the race.
1113 * @param pX pointer to the partition to split, might be changed!
1114 * @param gg a (non-empty) node list
1115 * @param env the environment
1117 * @return a new partition containing the nodes of gg
1119 static partition_t *split(partition_t **pX, node_t *gg, environment_t *env) {
1120 partition_t *X = *pX;
1121 partition_t *X_prime;
1124 node_t *g, *h, *node, *t;
1125 int max_input, transitions, winner, shf;
1127 DEBUG_ONLY(static int run = 0;)
1129 DB((dbg, LEVEL_2, "Run %d ", run++));
1130 if (list_empty(&X->Follower)) {
1131 /* if the partition has NO follower, we can use the fast
1132 splitting algorithm. */
1133 return split_no_followers(X, gg, env);
1135 /* else do the race */
1137 dump_partition("Splitting ", X);
1138 dump_list("by list ", gg);
1140 INIT_LIST_HEAD(&tmp);
1142 /* Remove gg from X.Leader and put into g */
1144 for (node = gg; node != NULL; node = node->next) {
1145 assert(node->part == X);
1146 assert(node->is_follower == 0);
1148 list_del(&node->node_list);
1149 list_add_tail(&node->node_list, &tmp);
1150 node->race_next = g;
1155 list_for_each_entry(node_t, node, &X->Leader, node_list) {
1156 node->race_next = h;
1159 /* restore X.Leader */
1160 list_splice(&tmp, &X->Leader);
1162 senv[0].initial = g;
1163 senv[0].unwalked = NULL;
1164 senv[0].walked = NULL;
1168 senv[1].initial = h;
1169 senv[1].unwalked = NULL;
1170 senv[1].walked = NULL;
1175 * Some informations on the race that are not stated clearly in Click's
1177 * 1) A follower stays on the side that reach him first.
1178 * 2) If the other side reches a follower, if will be converted to
1179 * a leader. /This must be done after the race is over, else the
1180 * edges we are iterating on are renumbered./
1181 * 3) /New leader might end up on both sides./
1182 * 4) /If one side ends up with new Leaders, we must ensure that
1183 * they can split out by opcode, hence we have to put _every_
1184 * partition with new Leader nodes on the cprop list, as
1185 * opcode splitting is done by split_by() at the end of
1186 * constant propagation./
1189 if (step(&senv[0])) {
1193 if (step(&senv[1])) {
1198 assert(senv[winner].initial == NULL);
1199 assert(senv[winner].unwalked == NULL);
1201 /* clear flags from walked/unwalked */
1203 transitions = clear_flags(senv[0].unwalked) << shf;
1204 transitions |= clear_flags(senv[0].walked) << shf;
1206 transitions |= clear_flags(senv[1].unwalked) << shf;
1207 transitions |= clear_flags(senv[1].walked) << shf;
1209 dump_race_list("winner ", senv[winner].walked);
1211 /* Move walked_{winner} to a new partition, X'. */
1212 X_prime = new_partition(env);
1215 for (node = senv[winner].walked; node != NULL; node = node->race_next) {
1216 list_del(&node->node_list);
1217 node->part = X_prime;
1218 if (node->is_follower) {
1219 list_add_tail(&node->node_list, &X_prime->Follower);
1221 list_add_tail(&node->node_list, &X_prime->Leader);
1224 if (node->max_user_input > max_input)
1225 max_input = node->max_user_input;
1227 X_prime->n_leader = n;
1228 X_prime->max_user_inputs = max_input;
1229 X->n_leader -= X_prime->n_leader;
1231 /* for now, copy the type info tag, it will be adjusted in split_by(). */
1232 X_prime->type_is_T_or_C = X->type_is_T_or_C;
1235 * Even if a follower was not checked by both sides, it might have
1236 * loose its congruence, so we need to check this case for all follower.
1238 list_for_each_entry_safe(node_t, node, t, &X_prime->Follower, node_list) {
1239 if (identity(node) == node) {
1240 follower_to_leader(node);
1246 check_partition(X_prime);
1248 /* X' is the smaller part */
1249 add_to_worklist(X_prime, env);
1252 * If there where follower to leader transitions, ensure that the nodes
1253 * can be split out if necessary.
1255 if (transitions & 1) {
1256 /* place winner partition on the cprop list */
1257 if (X_prime->on_cprop == 0) {
1258 X_prime->cprop_next = env->cprop;
1259 env->cprop = X_prime;
1260 X_prime->on_cprop = 1;
1263 if (transitions & 2) {
1264 /* place other partition on the cprop list */
1265 if (X->on_cprop == 0) {
1266 X->cprop_next = env->cprop;
1272 dump_partition("Now ", X);
1273 dump_partition("Created new ", X_prime);
1275 /* we have to ensure that the partition containing g is returned */
1285 * Returns non-zero if the i'th input of a Phi node is live.
1287 * @param phi a Phi-node
1288 * @param i an input number
1290 * @return non-zero if the i'th input of the given Phi node is live
1292 static int is_live_input(ir_node *phi, int i) {
1294 ir_node *block = get_nodes_block(phi);
1295 ir_node *pred = get_Block_cfgpred(block, i);
1296 lattice_elem_t type = get_node_type(pred);
1298 return type.tv != tarval_unreachable;
1300 /* else it's the control input, always live */
1302 } /* is_live_input */
1305 * Return non-zero if a type is a constant.
1307 static int is_constant_type(lattice_elem_t type) {
1308 if (type.tv != tarval_bottom && type.tv != tarval_top)
1311 } /* is_constant_type */
1314 * Check whether a type is neither Top or a constant.
1315 * Note: U is handled like Top here, R is a constant.
1317 * @param type the type to check
1319 static int type_is_neither_top_nor_const(const lattice_elem_t type) {
1320 if (is_tarval(type.tv)) {
1321 if (type.tv == tarval_top)
1323 if (tarval_is_constant(type.tv))
1330 } /* type_is_neither_top_nor_const */
1333 * Collect nodes to the touched list.
1335 * @param list the list which contains the nodes that must be evaluated
1336 * @param idx the index of the def_use edge to evaluate
1337 * @param env the environment
1339 static void collect_touched(list_head *list, int idx, environment_t *env) {
1341 int end_idx = env->end_idx;
1343 list_for_each_entry(node_t, x, list, node_list) {
1347 /* leader edges start AFTER follower edges */
1348 x->next_edge = x->n_followers + 1;
1350 num_edges = get_irn_n_outs(x->node);
1352 /* for all edges in x.L.def_use_{idx} */
1353 while (x->next_edge <= num_edges) {
1354 const ir_def_use_edge *edge = &x->node->out[x->next_edge];
1357 /* check if we have necessary edges */
1358 if (edge->pos > idx)
1365 /* only non-commutative nodes */
1366 if (env->commutative &&
1367 (idx == 0 || idx == 1) && is_op_commutative(get_irn_op(succ)))
1370 /* ignore the "control input" for non-pinned nodes
1371 if we are running in GCSE mode */
1372 if (idx < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
1375 y = get_irn_node(succ);
1376 assert(get_irn_n(succ, idx) == x->node);
1378 /* ignore block edges touching followers */
1379 if (idx == -1 && y->is_follower)
1382 if (is_constant_type(y->type)) {
1383 ir_opcode code = get_irn_opcode(succ);
1384 if (code == iro_Sub || code == iro_Cmp)
1385 add_to_cprop(y, env);
1388 /* Partitions of constants should not be split simply because their Nodes have unequal
1389 functions or incongruent inputs. */
1390 if (type_is_neither_top_nor_const(y->type) &&
1391 (! is_Phi(y->node) || is_live_input(y->node, idx))) {
1392 add_to_touched(y, env);
1396 } /* collect_touched */
1399 * Collect commutative nodes to the touched list.
1401 * @param X the partition of the list
1402 * @param list the list which contains the nodes that must be evaluated
1403 * @param env the environment
1405 static void collect_commutative_touched(partition_t *X, list_head *list, environment_t *env) {
1410 list_for_each_entry(node_t, x, list, node_list) {
1413 num_edges = get_irn_n_outs(x->node);
1415 x->next_edge = x->n_followers + 1;
1417 /* for all edges in x.L.def_use_{idx} */
1418 while (x->next_edge <= num_edges) {
1419 const ir_def_use_edge *edge = &x->node->out[x->next_edge];
1422 /* check if we have necessary edges */
1432 /* only commutative nodes */
1433 if (!is_op_commutative(get_irn_op(succ)))
1436 y = get_irn_node(succ);
1437 if (is_constant_type(y->type)) {
1438 ir_opcode code = get_irn_opcode(succ);
1439 if (code == iro_Eor)
1440 add_to_cprop(y, env);
1443 /* Partitions of constants should not be split simply because their Nodes have unequal
1444 functions or incongruent inputs. */
1445 if (type_is_neither_top_nor_const(y->type)) {
1446 int other_idx = edge->pos ^ 1;
1447 node_t *other = get_irn_node(get_irn_n(succ, other_idx));
1448 int equal = X == other->part;
1451 * Note: op(a, a) is NOT congruent to op(a, b).
1452 * So, either all touch nodes must have both inputs congruent,
1453 * or not. We decide this by the first occurred node.
1459 if (both_input == equal)
1460 add_to_touched(y, env);
1464 } /* collect_commutative_touched */
1467 * Split the partitions if caused by the first entry on the worklist.
1469 * @param env the environment
1471 static void cause_splits(environment_t *env) {
1472 partition_t *X, *Z, *N;
1475 /* remove the first partition from the worklist */
1477 env->worklist = X->wl_next;
1480 dump_partition("Cause_split: ", X);
1482 if (env->commutative) {
1483 /* handle commutative nodes first */
1485 /* empty the touched set: already done, just clear the list */
1486 env->touched = NULL;
1488 collect_commutative_touched(X, &X->Leader, env);
1489 collect_commutative_touched(X, &X->Follower, env);
1491 for (Z = env->touched; Z != NULL; Z = N) {
1493 node_t *touched = Z->touched;
1494 unsigned n_touched = Z->n_touched;
1496 assert(Z->touched != NULL);
1498 /* beware, split might change Z */
1499 N = Z->touched_next;
1501 /* remove it from the touched set */
1504 /* Empty local Z.touched. */
1505 for (e = touched; e != NULL; e = e->next) {
1506 assert(e->is_follower == 0);
1512 if (0 < n_touched && n_touched < Z->n_leader) {
1513 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1514 split(&Z, touched, env);
1516 assert(n_touched <= Z->n_leader);
1520 /* combine temporary leader and follower list */
1521 for (idx = -1; idx <= X->max_user_inputs; ++idx) {
1522 /* empty the touched set: already done, just clear the list */
1523 env->touched = NULL;
1525 collect_touched(&X->Leader, idx, env);
1526 collect_touched(&X->Follower, idx, env);
1528 for (Z = env->touched; Z != NULL; Z = N) {
1530 node_t *touched = Z->touched;
1531 unsigned n_touched = Z->n_touched;
1533 assert(Z->touched != NULL);
1535 /* beware, split might change Z */
1536 N = Z->touched_next;
1538 /* remove it from the touched set */
1541 /* Empty local Z.touched. */
1542 for (e = touched; e != NULL; e = e->next) {
1543 assert(e->is_follower == 0);
1549 if (0 < n_touched && n_touched < Z->n_leader) {
1550 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1551 split(&Z, touched, env);
1553 assert(n_touched <= Z->n_leader);
1556 } /* cause_splits */
1559 * Implements split_by_what(): Split a partition by characteristics given
1560 * by the what function.
1562 * @param X the partition to split
1563 * @param What a function returning an Id for every node of the partition X
1564 * @param P a list to store the result partitions
1565 * @param env the environment
1569 static partition_t *split_by_what(partition_t *X, what_func What,
1570 partition_t **P, environment_t *env) {
1573 listmap_entry_t *iter;
1576 /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
1578 list_for_each_entry(node_t, x, &X->Leader, node_list) {
1579 void *id = What(x, env);
1580 listmap_entry_t *entry;
1583 /* input not allowed, ignore */
1586 /* Add x to map[What(x)]. */
1587 entry = listmap_find(&map, id);
1588 x->next = entry->list;
1591 /* Let P be a set of Partitions. */
1593 /* for all sets S except one in the range of map do */
1594 for (iter = map.values; iter != NULL; iter = iter->next) {
1595 if (iter->next == NULL) {
1596 /* this is the last entry, ignore */
1601 /* Add SPLIT( X, S ) to P. */
1602 DB((dbg, LEVEL_2, "Split part%d by WHAT = %s\n", X->nr, what_reason));
1603 R = split(&X, S, env);
1613 } /* split_by_what */
1615 /** lambda n.(n.type) */
1616 static void *lambda_type(const node_t *node, environment_t *env) {
1618 return node->type.tv;
1621 /** lambda n.(n.opcode) */
1622 static void *lambda_opcode(const node_t *node, environment_t *env) {
1623 opcode_key_t key, *entry;
1624 ir_node *irn = node->node;
1626 key.code = get_irn_opcode(irn);
1627 key.mode = get_irn_mode(irn);
1628 key.arity = get_irn_arity(irn);
1632 switch (get_irn_opcode(irn)) {
1634 key.u.proj = get_Proj_proj(irn);
1637 key.u.ent = get_Sel_entity(irn);
1643 entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
1645 } /* lambda_opcode */
1647 /** lambda n.(n[i].partition) */
1648 static void *lambda_partition(const node_t *node, environment_t *env) {
1649 ir_node *skipped = skip_Proj(node->node);
1652 int i = env->lambda_input;
1654 if (i >= get_irn_arity(node->node)) {
1656 * We are outside the allowed range: This can happen even
1657 * if we have split by opcode first: doing so might move Followers
1658 * to Leaders and those will have a different opcode!
1659 * Note that in this case the partition is on the cprop list and will be
1665 /* ignore the "control input" for non-pinned nodes
1666 if we are running in GCSE mode */
1667 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1670 pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
1671 p = get_irn_node(pred);
1674 } /* lambda_partition */
1676 /** lambda n.(n[i].partition) for commutative nodes */
1677 static void *lambda_commutative_partition(const node_t *node, environment_t *env) {
1678 ir_node *irn = node->node;
1679 ir_node *skipped = skip_Proj(irn);
1680 ir_node *pred, *left, *right;
1682 partition_t *pl, *pr;
1683 int i = env->lambda_input;
1685 if (i >= get_irn_arity(node->node)) {
1687 * We are outside the allowed range: This can happen even
1688 * if we have split by opcode first: doing so might move Followers
1689 * to Leaders and those will have a different opcode!
1690 * Note that in this case the partition is on the cprop list and will be
1696 /* ignore the "control input" for non-pinned nodes
1697 if we are running in GCSE mode */
1698 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1702 pred = get_irn_n(skipped, i);
1703 p = get_irn_node(pred);
1707 if (is_op_commutative(get_irn_op(irn))) {
1708 /* normalize partition order by returning the "smaller" on input 0,
1709 the "bigger" on input 1. */
1710 left = get_binop_left(irn);
1711 pl = get_irn_node(left)->part;
1712 right = get_binop_right(irn);
1713 pr = get_irn_node(right)->part;
1716 return pl < pr ? pl : pr;
1718 return pl > pr ? pl : pr;
1720 /* a not split out Follower */
1721 pred = get_irn_n(irn, i);
1722 p = get_irn_node(pred);
1726 } /* lambda_commutative_partition */
1729 * Returns true if a type is a constant (and NOT Top
1732 static int is_con(const lattice_elem_t type) {
1733 /* be conservative */
1734 if (is_tarval(type.tv))
1735 return tarval_is_constant(type.tv);
1736 return is_entity(type.sym.entity_p);
1740 * Implements split_by().
1742 * @param X the partition to split
1743 * @param env the environment
1745 static void split_by(partition_t *X, environment_t *env) {
1746 partition_t *I, *P = NULL;
1749 dump_partition("split_by", X);
1751 if (X->n_leader == 1) {
1752 /* we have only one leader, no need to split, just check it's type */
1753 node_t *x = get_first_node(X);
1754 X->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1758 DEBUG_ONLY(what_reason = "lambda n.(n.type)";)
1759 P = split_by_what(X, lambda_type, &P, env);
1762 /* adjust the type tags, we have split partitions by type */
1763 for (I = P; I != NULL; I = I->split_next) {
1764 node_t *x = get_first_node(I);
1765 I->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1772 if (Y->n_leader > 1) {
1773 /* we do not want split the TOP or constant partitions */
1774 if (! Y->type_is_T_or_C) {
1775 partition_t *Q = NULL;
1777 DEBUG_ONLY(what_reason = "lambda n.(n.opcode)";)
1778 Q = split_by_what(Y, lambda_opcode, &Q, env);
1785 if (Z->n_leader > 1) {
1786 const node_t *first = get_first_node(Z);
1787 int arity = get_irn_arity(first->node);
1789 what_func what = lambda_partition;
1790 DEBUG_ONLY(char buf[64];)
1792 if (env->commutative && is_op_commutative(get_irn_op(first->node)))
1793 what = lambda_commutative_partition;
1796 * BEWARE: during splitting by input 2 for instance we might
1797 * create new partitions which are different by input 1, so collect
1798 * them and split further.
1800 Z->split_next = NULL;
1803 for (input = arity - 1; input >= -1; --input) {
1805 partition_t *Z_prime = R;
1808 if (Z_prime->n_leader > 1) {
1809 env->lambda_input = input;
1810 DEBUG_ONLY(snprintf(buf, sizeof(buf), "lambda n.(n[%d].partition)", input);)
1811 DEBUG_ONLY(what_reason = buf;)
1812 S = split_by_what(Z_prime, what, &S, env);
1815 Z_prime->split_next = S;
1818 } while (R != NULL);
1823 } while (Q != NULL);
1826 } while (P != NULL);
1830 * (Re-)compute the type for a given node.
1832 * @param node the node
1834 static void default_compute(node_t *node) {
1836 ir_node *irn = node->node;
1838 /* if any of the data inputs have type top, the result is type top */
1839 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
1840 ir_node *pred = get_irn_n(irn, i);
1841 node_t *p = get_irn_node(pred);
1843 if (p->type.tv == tarval_top) {
1844 node->type.tv = tarval_top;
1849 if (get_irn_mode(node->node) == mode_X)
1850 node->type.tv = tarval_reachable;
1852 node->type.tv = computed_value(irn);
1853 } /* default_compute */
1856 * (Re-)compute the type for a Block node.
1858 * @param node the node
1860 static void compute_Block(node_t *node) {
1862 ir_node *block = node->node;
1864 if (block == get_irg_start_block(current_ir_graph)) {
1865 /* start block is always reachable */
1866 node->type.tv = tarval_reachable;
1870 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1871 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1873 if (pred->type.tv == tarval_reachable) {
1874 /* A block is reachable, if at least of predecessor is reachable. */
1875 node->type.tv = tarval_reachable;
1879 node->type.tv = tarval_top;
1880 } /* compute_Block */
1883 * (Re-)compute the type for a Bad node.
1885 * @param node the node
1887 static void compute_Bad(node_t *node) {
1888 /* Bad nodes ALWAYS compute Top */
1889 node->type.tv = tarval_top;
1893 * (Re-)compute the type for an Unknown node.
1895 * @param node the node
1897 static void compute_Unknown(node_t *node) {
1898 /* While Unknown nodes should compute Top this is dangerous:
1899 * a Top input to a Cond would lead to BOTH control flows unreachable.
1900 * While this is correct in the given semantics, it would destroy the Firm
1903 * It would be safe to compute Top IF it can be assured, that only Cmp
1904 * nodes are inputs to Conds. We check that first.
1905 * This is the way Frontends typically build Firm, but some optimizations
1906 * (cond_eval for instance) might replace them by Phib's...
1908 node->type.tv = tarval_UNKNOWN;
1909 } /* compute_Unknown */
1912 * (Re-)compute the type for a Jmp node.
1914 * @param node the node
1916 static void compute_Jmp(node_t *node) {
1917 node_t *block = get_irn_node(get_nodes_block(node->node));
1919 node->type = block->type;
1923 * (Re-)compute the type for the Return node.
1925 * @param node the node
1927 static void compute_Return(node_t *node) {
1928 /* The Return node is NOT dead if it is in a reachable block.
1929 * This is already checked in compute(). so we can return
1930 * Reachable here. */
1931 node->type.tv = tarval_reachable;
1932 } /* compute_Return */
1935 * (Re-)compute the type for the End node.
1937 * @param node the node
1939 static void compute_End(node_t *node) {
1940 /* the End node is NOT dead of course */
1941 node->type.tv = tarval_reachable;
1945 * (Re-)compute the type for a Call.
1947 * @param node the node
1949 static void compute_Call(node_t *node) {
1951 * A Call computes always bottom, even if it has Unknown
1954 node->type.tv = tarval_bottom;
1955 } /* compute_Call */
1958 * (Re-)compute the type for a SymConst node.
1960 * @param node the node
1962 static void compute_SymConst(node_t *node) {
1963 ir_node *irn = node->node;
1964 node_t *block = get_irn_node(get_nodes_block(irn));
1966 if (block->type.tv == tarval_unreachable) {
1967 node->type.tv = tarval_top;
1970 switch (get_SymConst_kind(irn)) {
1971 case symconst_addr_ent:
1972 /* case symconst_addr_name: cannot handle this yet */
1973 node->type.sym = get_SymConst_symbol(irn);
1976 node->type.tv = computed_value(irn);
1978 } /* compute_SymConst */
1981 * (Re-)compute the type for a Phi node.
1983 * @param node the node
1985 static void compute_Phi(node_t *node) {
1987 ir_node *phi = node->node;
1988 lattice_elem_t type;
1990 /* if a Phi is in a unreachable block, its type is TOP */
1991 node_t *block = get_irn_node(get_nodes_block(phi));
1993 if (block->type.tv == tarval_unreachable) {
1994 node->type.tv = tarval_top;
1998 /* Phi implements the Meet operation */
1999 type.tv = tarval_top;
2000 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
2001 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
2002 node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
2004 if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
2005 /* ignore TOP inputs: We must check here for unreachable blocks,
2006 because Firm constants live in the Start Block are NEVER Top.
2007 Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
2008 comes from a unreachable input. */
2011 if (pred->type.tv == tarval_bottom) {
2012 node->type.tv = tarval_bottom;
2014 } else if (type.tv == tarval_top) {
2015 /* first constant found */
2017 } else if (type.tv != pred->type.tv) {
2018 /* different constants or tarval_bottom */
2019 node->type.tv = tarval_bottom;
2022 /* else nothing, constants are the same */
2028 * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
2030 * @param node the node
2032 static void compute_Add(node_t *node) {
2033 ir_node *sub = node->node;
2034 node_t *l = get_irn_node(get_Add_left(sub));
2035 node_t *r = get_irn_node(get_Add_right(sub));
2036 lattice_elem_t a = l->type;
2037 lattice_elem_t b = r->type;
2040 if (a.tv == tarval_top || b.tv == tarval_top) {
2041 node->type.tv = tarval_top;
2042 } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
2043 node->type.tv = tarval_bottom;
2045 /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
2046 must call tarval_add() first to handle this case! */
2047 if (is_tarval(a.tv)) {
2048 if (is_tarval(b.tv)) {
2049 node->type.tv = tarval_add(a.tv, b.tv);
2052 mode = get_tarval_mode(a.tv);
2053 if (a.tv == get_mode_null(mode)) {
2057 } else if (is_tarval(b.tv)) {
2058 mode = get_tarval_mode(b.tv);
2059 if (b.tv == get_mode_null(mode)) {
2064 node->type.tv = tarval_bottom;
2069 * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
2071 * @param node the node
2073 static void compute_Sub(node_t *node) {
2074 ir_node *sub = node->node;
2075 node_t *l = get_irn_node(get_Sub_left(sub));
2076 node_t *r = get_irn_node(get_Sub_right(sub));
2077 lattice_elem_t a = l->type;
2078 lattice_elem_t b = r->type;
2081 if (a.tv == tarval_top || b.tv == tarval_top) {
2082 node->type.tv = tarval_top;
2083 } else if (is_con(a) && is_con(b)) {
2084 if (is_tarval(a.tv) && is_tarval(b.tv)) {
2085 node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
2086 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
2088 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
2091 node->type.tv = tarval_bottom;
2093 } else if (r->part == l->part &&
2094 (!mode_is_float(get_irn_mode(l->node)))) {
2096 * BEWARE: a - a is NOT always 0 for floating Point values, as
2097 * NaN op NaN = NaN, so we must check this here.
2099 ir_mode *mode = get_irn_mode(sub);
2100 tv = get_mode_null(mode);
2102 /* if the node was ONCE evaluated by all constants, but now
2103 this breaks AND we get from the argument partitions a different
2104 result, switch to bottom.
2105 This happens because initially all nodes are in the same partition ... */
2106 if (node->type.tv != tv)
2110 node->type.tv = tarval_bottom;
2115 * (Re-)compute the type for an Eor. Special case: both nodes are congruent.
2117 * @param node the node
2119 static void compute_Eor(node_t *node) {
2120 ir_node *eor = node->node;
2121 node_t *l = get_irn_node(get_Eor_left(eor));
2122 node_t *r = get_irn_node(get_Eor_right(eor));
2123 lattice_elem_t a = l->type;
2124 lattice_elem_t b = r->type;
2127 if (a.tv == tarval_top || b.tv == tarval_top) {
2128 node->type.tv = tarval_top;
2129 } else if (is_con(a) && is_con(b)) {
2130 if (is_tarval(a.tv) && is_tarval(b.tv)) {
2131 node->type.tv = tarval_eor(a.tv, b.tv);
2132 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
2134 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
2137 node->type.tv = tarval_bottom;
2139 } else if (r->part == l->part) {
2140 ir_mode *mode = get_irn_mode(eor);
2141 tv = get_mode_null(mode);
2143 /* if the node was ONCE evaluated by all constants, but now
2144 this breaks AND we get from the argument partitions a different
2145 result, switch to bottom.
2146 This happens because initially all nodes are in the same partition ... */
2147 if (node->type.tv != tv)
2151 node->type.tv = tarval_bottom;
2156 * (Re-)compute the type for Cmp.
2158 * @param node the node
2160 static void compute_Cmp(node_t *node) {
2161 ir_node *cmp = node->node;
2162 node_t *l = get_irn_node(get_Cmp_left(cmp));
2163 node_t *r = get_irn_node(get_Cmp_right(cmp));
2164 lattice_elem_t a = l->type;
2165 lattice_elem_t b = r->type;
2167 if (a.tv == tarval_top || b.tv == tarval_top) {
2168 node->type.tv = tarval_top;
2169 } else if (r->part == l->part) {
2170 /* both nodes congruent, we can probably do something */
2171 node->type.tv = tarval_b_true;
2172 } else if (is_con(a) && is_con(b)) {
2173 /* both nodes are constants, we can probably do something */
2174 node->type.tv = tarval_b_true;
2176 node->type.tv = tarval_bottom;
2181 * (Re-)compute the type for a Proj(Cmp).
2183 * @param node the node
2184 * @param cond the predecessor Cmp node
2186 static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
2187 ir_node *proj = node->node;
2188 node_t *l = get_irn_node(get_Cmp_left(cmp));
2189 node_t *r = get_irn_node(get_Cmp_right(cmp));
2190 lattice_elem_t a = l->type;
2191 lattice_elem_t b = r->type;
2192 pn_Cmp pnc = get_Proj_proj(proj);
2195 if (a.tv == tarval_top || b.tv == tarval_top) {
2196 node->type.tv = tarval_undefined;
2197 } else if (is_con(a) && is_con(b)) {
2198 default_compute(node);
2199 } else if (r->part == l->part &&
2200 (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
2202 * BEWARE: a == a is NOT always True for floating Point values, as
2203 * NaN != NaN is defined, so we must check this here.
2205 tv = pnc & pn_Cmp_Eq ? tarval_b_true: tarval_b_false;
2207 /* if the node was ONCE evaluated by all constants, but now
2208 this breaks AND we get from the argument partitions a different
2209 result, switch to bottom.
2210 This happens because initially all nodes are in the same partition ... */
2211 if (node->type.tv != tv)
2215 node->type.tv = tarval_bottom;
2217 } /* compute_Proj_Cmp */
2220 * (Re-)compute the type for a Proj(Cond).
2222 * @param node the node
2223 * @param cond the predecessor Cond node
2225 static void compute_Proj_Cond(node_t *node, ir_node *cond) {
2226 ir_node *proj = node->node;
2227 long pnc = get_Proj_proj(proj);
2228 ir_node *sel = get_Cond_selector(cond);
2229 node_t *selector = get_irn_node(sel);
2232 * Note: it is crucial for the monotony that the Proj(Cond)
2233 * are evaluates after all predecessors of the Cond selector are
2239 * Due to the fact that 0 is a const, the Cmp gets immediately
2240 * on the cprop list. It will be evaluated before x is evaluated,
2241 * might leaving x as Top. When later x is evaluated, the Cmp
2242 * might change its value.
2243 * BUT if the Cond is evaluated before this happens, Proj(Cond, FALSE)
2244 * gets R, and later changed to F if Cmp is evaluated to True!
2246 * We prevent this by putting Conds in an extra cprop_X queue, which
2247 * gets evaluated after the cprop queue is empty.
2249 * Note that this even happens with Click's original algorithm, if
2250 * Cmp(x, 0) is evaluated to True first and later changed to False
2251 * if x was Top first and later changed to a Const ...
2252 * It is unclear how Click solved that problem ...
2254 * However, in rare cases even this does not help, if a Top reaches
2255 * a compare through a Phi, than Proj(Cond) is evaluated changing
2256 * the type of the Phi to something other.
2257 * So, we take the last resort and bind the type to R once
2260 * (This might be even the way Click works around the whole problem).
2262 * Finally, we may miss some optimization possibilities due to this:
2267 * If Top reaches the if first, than we decide for != here.
2268 * If y later is evaluated to 0, we cannot revert this decision
2269 * and must live with both outputs enabled. If this happens,
2270 * we get an unresolved if (true) in the code ...
2272 * In Click's version where this decision is done at the Cmp,
2273 * the Cmp is NOT optimized away than (if y evaluated to 1
2274 * for instance) and we get a if (1 == 0) here ...
2276 * Both solutions are suboptimal.
2277 * At least, we could easily detect this problem and run
2278 * cf_opt() (or even combo) again :-(
2280 if (node->type.tv == tarval_reachable)
2283 if (get_irn_mode(sel) == mode_b) {
2285 if (pnc == pn_Cond_true) {
2286 if (selector->type.tv == tarval_b_false) {
2287 node->type.tv = tarval_unreachable;
2288 } else if (selector->type.tv == tarval_b_true) {
2289 node->type.tv = tarval_reachable;
2290 } else if (selector->type.tv == tarval_bottom) {
2291 node->type.tv = tarval_reachable;
2293 assert(selector->type.tv == tarval_top);
2294 if (tarval_UNKNOWN == tarval_top) {
2295 /* any condition based on Top is "!=" */
2296 node->type.tv = tarval_unreachable;
2298 node->type.tv = tarval_unreachable;
2302 assert(pnc == pn_Cond_false);
2304 if (selector->type.tv == tarval_b_false) {
2305 node->type.tv = tarval_reachable;
2306 } else if (selector->type.tv == tarval_b_true) {
2307 node->type.tv = tarval_unreachable;
2308 } else if (selector->type.tv == tarval_bottom) {
2309 node->type.tv = tarval_reachable;
2311 assert(selector->type.tv == tarval_top);
2312 if (tarval_UNKNOWN == tarval_top) {
2313 /* any condition based on Top is "!=" */
2314 node->type.tv = tarval_reachable;
2316 node->type.tv = tarval_unreachable;
2322 if (selector->type.tv == tarval_bottom) {
2323 node->type.tv = tarval_reachable;
2324 } else if (selector->type.tv == tarval_top) {
2325 if (tarval_UNKNOWN == tarval_top &&
2326 pnc == get_Cond_defaultProj(cond)) {
2327 /* a switch based of Top is always "default" */
2328 node->type.tv = tarval_reachable;
2330 node->type.tv = tarval_unreachable;
2333 long value = get_tarval_long(selector->type.tv);
2334 if (pnc == get_Cond_defaultProj(cond)) {
2335 /* default switch, have to check ALL other cases */
2338 for (i = get_irn_n_outs(cond) - 1; i >= 0; --i) {
2339 ir_node *succ = get_irn_out(cond, i);
2343 if (value == get_Proj_proj(succ)) {
2344 /* we found a match, will NOT take the default case */
2345 node->type.tv = tarval_unreachable;
2349 /* all cases checked, no match, will take default case */
2350 node->type.tv = tarval_reachable;
2353 node->type.tv = value == pnc ? tarval_reachable : tarval_unreachable;
2357 } /* compute_Proj_Cond */
2360 * (Re-)compute the type for a Proj-Node.
2362 * @param node the node
2364 static void compute_Proj(node_t *node) {
2365 ir_node *proj = node->node;
2366 ir_mode *mode = get_irn_mode(proj);
2367 node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
2368 ir_node *pred = get_Proj_pred(proj);
2370 if (block->type.tv == tarval_unreachable) {
2371 /* a Proj in a unreachable Block stay Top */
2372 node->type.tv = tarval_top;
2375 if (get_irn_node(pred)->type.tv == tarval_top && !is_Cond(pred)) {
2376 /* if the predecessor is Top, its Proj follow */
2377 node->type.tv = tarval_top;
2381 if (mode == mode_M) {
2382 /* mode M is always bottom */
2383 node->type.tv = tarval_bottom;
2386 if (mode != mode_X) {
2388 compute_Proj_Cmp(node, pred);
2390 default_compute(node);
2393 /* handle mode_X nodes */
2395 switch (get_irn_opcode(pred)) {
2397 /* the Proj_X from the Start is always reachable.
2398 However this is already handled at the top. */
2399 node->type.tv = tarval_reachable;
2402 compute_Proj_Cond(node, pred);
2405 default_compute(node);
2407 } /* compute_Proj */
2410 * (Re-)compute the type for a Confirm.
2412 * @param node the node
2414 static void compute_Confirm(node_t *node) {
2415 ir_node *confirm = node->node;
2416 node_t *pred = get_irn_node(get_Confirm_value(confirm));
2418 if (get_Confirm_cmp(confirm) == pn_Cmp_Eq) {
2419 node_t *bound = get_irn_node(get_Confirm_bound(confirm));
2421 if (is_con(bound->type)) {
2422 /* is equal to a constant */
2423 node->type = bound->type;
2427 /* a Confirm is a copy OR a Const */
2428 node->type = pred->type;
2429 } /* compute_Confirm */
2432 * (Re-)compute the type for a Max.
2434 * @param node the node
2436 static void compute_Max(node_t *node) {
2437 ir_node *op = node->node;
2438 node_t *l = get_irn_node(get_binop_left(op));
2439 node_t *r = get_irn_node(get_binop_right(op));
2440 lattice_elem_t a = l->type;
2441 lattice_elem_t b = r->type;
2443 if (a.tv == tarval_top || b.tv == tarval_top) {
2444 node->type.tv = tarval_top;
2445 } else if (is_con(a) && is_con(b)) {
2446 /* both nodes are constants, we can probably do something */
2448 /* this case handles SymConsts as well */
2451 ir_mode *mode = get_irn_mode(op);
2452 tarval *tv_min = get_mode_min(mode);
2456 else if (b.tv == tv_min)
2458 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2459 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2460 node->type.tv = a.tv;
2462 node->type.tv = b.tv;
2464 node->type.tv = tarval_bad;
2467 } else if (r->part == l->part) {
2468 /* both nodes congruent, we can probably do something */
2471 node->type.tv = tarval_bottom;
2476 * (Re-)compute the type for a Min.
2478 * @param node the node
2480 static void compute_Min(node_t *node) {
2481 ir_node *op = node->node;
2482 node_t *l = get_irn_node(get_binop_left(op));
2483 node_t *r = get_irn_node(get_binop_right(op));
2484 lattice_elem_t a = l->type;
2485 lattice_elem_t b = r->type;
2487 if (a.tv == tarval_top || b.tv == tarval_top) {
2488 node->type.tv = tarval_top;
2489 } else if (is_con(a) && is_con(b)) {
2490 /* both nodes are constants, we can probably do something */
2492 /* this case handles SymConsts as well */
2495 ir_mode *mode = get_irn_mode(op);
2496 tarval *tv_max = get_mode_max(mode);
2500 else if (b.tv == tv_max)
2502 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2503 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2504 node->type.tv = a.tv;
2506 node->type.tv = b.tv;
2508 node->type.tv = tarval_bad;
2511 } else if (r->part == l->part) {
2512 /* both nodes congruent, we can probably do something */
2515 node->type.tv = tarval_bottom;
2520 * (Re-)compute the type for a given node.
2522 * @param node the node
2524 static void compute(node_t *node) {
2525 ir_node *irn = node->node;
2528 #ifndef VERIFY_MONOTONE
2530 * Once a node reaches bottom, the type cannot fall further
2531 * in the lattice and we can stop computation.
2532 * Do not take this exit if the monotony verifier is
2533 * enabled to catch errors.
2535 if (node->type.tv == tarval_bottom)
2539 if (is_no_Block(irn)) {
2540 /* for pinned nodes, check its control input */
2541 if (get_irn_pinned(skip_Proj(irn)) == op_pin_state_pinned) {
2542 node_t *block = get_irn_node(get_nodes_block(irn));
2544 if (block->type.tv == tarval_unreachable) {
2545 node->type.tv = tarval_top;
2551 func = (compute_func)node->node->op->ops.generic;
2557 * Identity functions: Note that one might thing that identity() is just a
2558 * synonym for equivalent_node(). While this is true, we cannot use it for the algorithm
2559 * here, because it expects that the identity node is one of the inputs, which is NOT
2560 * always true for equivalent_node() which can handle (and does sometimes) DAGs.
2561 * So, we have our own implementation, which copies some parts of equivalent_node()
2565 * Calculates the Identity for Phi nodes
2567 static node_t *identity_Phi(node_t *node) {
2568 ir_node *phi = node->node;
2569 ir_node *block = get_nodes_block(phi);
2570 node_t *n_part = NULL;
2573 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
2574 node_t *pred_X = get_irn_node(get_Block_cfgpred(block, i));
2576 if (pred_X->type.tv == tarval_reachable) {
2577 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
2581 else if (n_part->part != pred->part) {
2582 /* incongruent inputs, not a follower */
2587 /* if n_part is NULL here, all inputs path are dead, the Phi computes
2588 * tarval_top, is in the TOP partition and should NOT being split! */
2589 assert(n_part != NULL);
2591 } /* identity_Phi */
2594 * Calculates the Identity for commutative 0 neutral nodes.
2596 static node_t *identity_comm_zero_binop(node_t *node) {
2597 ir_node *op = node->node;
2598 node_t *a = get_irn_node(get_binop_left(op));
2599 node_t *b = get_irn_node(get_binop_right(op));
2600 ir_mode *mode = get_irn_mode(op);
2603 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2604 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2607 /* node: no input should be tarval_top, else the binop would be also
2608 * Top and not being split. */
2609 zero = get_mode_null(mode);
2610 if (a->type.tv == zero)
2612 if (b->type.tv == zero)
2615 } /* identity_comm_zero_binop */
2618 * Calculates the Identity for Shift nodes.
2620 static node_t *identity_shift(node_t *node) {
2621 ir_node *op = node->node;
2622 node_t *b = get_irn_node(get_binop_right(op));
2623 ir_mode *mode = get_irn_mode(b->node);
2626 /* node: no input should be tarval_top, else the binop would be also
2627 * Top and not being split. */
2628 zero = get_mode_null(mode);
2629 if (b->type.tv == zero)
2630 return get_irn_node(get_binop_left(op));
2632 } /* identity_shift */
2635 * Calculates the Identity for Mul nodes.
2637 static node_t *identity_Mul(node_t *node) {
2638 ir_node *op = node->node;
2639 node_t *a = get_irn_node(get_Mul_left(op));
2640 node_t *b = get_irn_node(get_Mul_right(op));
2641 ir_mode *mode = get_irn_mode(op);
2644 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2645 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2648 /* node: no input should be tarval_top, else the binop would be also
2649 * Top and not being split. */
2650 one = get_mode_one(mode);
2651 if (a->type.tv == one)
2653 if (b->type.tv == one)
2656 } /* identity_Mul */
2659 * Calculates the Identity for Sub nodes.
2661 static node_t *identity_Sub(node_t *node) {
2662 ir_node *sub = node->node;
2663 node_t *b = get_irn_node(get_Sub_right(sub));
2664 ir_mode *mode = get_irn_mode(sub);
2666 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2667 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2670 /* node: no input should be tarval_top, else the binop would be also
2671 * Top and not being split. */
2672 if (b->type.tv == get_mode_null(mode))
2673 return get_irn_node(get_Sub_left(sub));
2675 } /* identity_Sub */
2678 * Calculates the Identity for And nodes.
2680 static node_t *identity_And(node_t *node) {
2681 ir_node *and = node->node;
2682 node_t *a = get_irn_node(get_And_left(and));
2683 node_t *b = get_irn_node(get_And_right(and));
2684 tarval *neutral = get_mode_all_one(get_irn_mode(and));
2686 /* node: no input should be tarval_top, else the And would be also
2687 * Top and not being split. */
2688 if (a->type.tv == neutral)
2690 if (b->type.tv == neutral)
2693 } /* identity_And */
2696 * Calculates the Identity for Confirm nodes.
2698 static node_t *identity_Confirm(node_t *node) {
2699 ir_node *confirm = node->node;
2701 /* a Confirm is always a Copy */
2702 return get_irn_node(get_Confirm_value(confirm));
2703 } /* identity_Confirm */
2706 * Calculates the Identity for Mux nodes.
2708 static node_t *identity_Mux(node_t *node) {
2709 ir_node *mux = node->node;
2710 node_t *t = get_irn_node(get_Mux_true(mux));
2711 node_t *f = get_irn_node(get_Mux_false(mux));
2714 if (t->part == f->part)
2717 /* for now, the 1-input identity is not supported */
2719 sel = get_irn_node(get_Mux_sel(mux));
2721 /* Mux sel input is mode_b, so it is always a tarval */
2722 if (sel->type.tv == tarval_b_true)
2724 if (sel->type.tv == tarval_b_false)
2728 } /* identity_Mux */
2731 * Calculates the Identity for Min nodes.
2733 static node_t *identity_Min(node_t *node) {
2734 ir_node *op = node->node;
2735 node_t *a = get_irn_node(get_binop_left(op));
2736 node_t *b = get_irn_node(get_binop_right(op));
2737 ir_mode *mode = get_irn_mode(op);
2740 if (a->part == b->part) {
2741 /* leader of multiple predecessors */
2745 /* works even with NaN */
2746 tv_max = get_mode_max(mode);
2747 if (a->type.tv == tv_max)
2749 if (b->type.tv == tv_max)
2752 } /* identity_Min */
2755 * Calculates the Identity for Max nodes.
2757 static node_t *identity_Max(node_t *node) {
2758 ir_node *op = node->node;
2759 node_t *a = get_irn_node(get_binop_left(op));
2760 node_t *b = get_irn_node(get_binop_right(op));
2761 ir_mode *mode = get_irn_mode(op);
2764 if (a->part == b->part) {
2765 /* leader of multiple predecessors */
2769 /* works even with NaN */
2770 tv_min = get_mode_min(mode);
2771 if (a->type.tv == tv_min)
2773 if (b->type.tv == tv_min)
2776 } /* identity_Max */
2779 * Calculates the Identity for nodes.
2781 static node_t *identity(node_t *node) {
2782 ir_node *irn = node->node;
2784 switch (get_irn_opcode(irn)) {
2786 return identity_Phi(node);
2788 return identity_Mul(node);
2792 return identity_comm_zero_binop(node);
2797 return identity_shift(node);
2799 return identity_And(node);
2801 return identity_Sub(node);
2803 return identity_Confirm(node);
2805 return identity_Mux(node);
2807 return identity_Min(node);
2809 return identity_Max(node);
2816 * Node follower is a (new) follower of leader, segregate Leader
2819 static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader) {
2820 ir_node *l = leader->node;
2821 int j, i, n = get_irn_n_outs(l);
2823 DB((dbg, LEVEL_2, "%+F is a follower of %+F\n", follower, leader->node));
2824 /* The leader edges must remain sorted, but follower edges can
2826 for (i = leader->n_followers + 1; i <= n; ++i) {
2827 if (l->out[i].use == follower) {
2828 ir_def_use_edge t = l->out[i];
2830 for (j = i - 1; j >= leader->n_followers + 1; --j)
2831 l->out[j + 1] = l->out[j];
2832 ++leader->n_followers;
2833 l->out[leader->n_followers] = t;
2837 } /* segregate_def_use_chain_1 */
2840 * Node follower is a (new) follower segregate its Leader
2843 * @param follower the follower IR node
2845 static void segregate_def_use_chain(const ir_node *follower) {
2848 for (i = get_irn_arity(follower) - 1; i >= 0; --i) {
2849 node_t *pred = get_irn_node(get_irn_n(follower, i));
2851 segregate_def_use_chain_1(follower, pred);
2853 } /* segregate_def_use_chain */
2856 * Propagate constant evaluation.
2858 * @param env the environment
2860 static void propagate(environment_t *env) {
2863 lattice_elem_t old_type;
2865 unsigned n_fallen, old_type_was_T_or_C;
2868 while (env->cprop != NULL) {
2869 void *oldopcode = NULL;
2871 /* remove the first partition X from cprop */
2874 env->cprop = X->cprop_next;
2876 old_type_was_T_or_C = X->type_is_T_or_C;
2878 DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
2882 int cprop_empty = list_empty(&X->cprop);
2883 int cprop_X_empty = list_empty(&X->cprop_X);
2885 if (cprop_empty && cprop_X_empty) {
2886 /* both cprop lists are empty */
2890 /* remove the first Node x from X.cprop */
2892 /* Get a node from the cprop_X list only if
2893 * all data nodes are processed.
2894 * This ensures, that all inputs of the Cond
2895 * predecessor are processed if its type is still Top.
2897 x = list_entry(X->cprop_X.next, node_t, cprop_list);
2899 x = list_entry(X->cprop.next, node_t, cprop_list);
2902 //assert(x->part == X);
2903 list_del(&x->cprop_list);
2906 if (x->is_follower && identity(x) == x) {
2907 /* check the opcode first */
2908 if (oldopcode == NULL) {
2909 oldopcode = lambda_opcode(get_first_node(X), env);
2911 if (oldopcode != lambda_opcode(x, env)) {
2912 if (x->on_fallen == 0) {
2913 /* different opcode -> x falls out of this partition */
2918 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2922 /* x will make the follower -> leader transition */
2923 follower_to_leader(x);
2926 /* compute a new type for x */
2928 DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
2930 if (x->type.tv != old_type.tv) {
2931 DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
2932 verify_type(old_type, x);
2934 if (x->on_fallen == 0) {
2935 /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
2936 not already on the list. */
2941 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2943 for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
2944 ir_node *succ = get_irn_out(x->node, i);
2945 node_t *y = get_irn_node(succ);
2947 /* Add y to y.partition.cprop. */
2948 add_to_cprop(y, env);
2953 if (n_fallen > 0 && n_fallen != X->n_leader) {
2954 DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
2955 Y = split(&X, fallen, env);
2957 * We have split out fallen node. The type of the result
2958 * partition is NOT set yet.
2960 Y->type_is_T_or_C = 0;
2964 /* remove the flags from the fallen list */
2965 for (x = fallen; x != NULL; x = x->next)
2968 if (old_type_was_T_or_C) {
2971 /* check if some nodes will make the leader -> follower transition */
2972 list_for_each_entry_safe(node_t, y, tmp, &Y->Leader, node_list) {
2973 if (y->type.tv != tarval_top && ! is_con(y->type)) {
2974 node_t *eq_node = identity(y);
2976 if (eq_node != y && eq_node->part == y->part) {
2977 DB((dbg, LEVEL_2, "Node %+F is a follower of %+F\n", y->node, eq_node->node));
2978 /* move to Follower */
2980 list_del(&y->node_list);
2981 list_add_tail(&y->node_list, &Y->Follower);
2984 segregate_def_use_chain(y->node);
2994 * Get the leader for a given node from its congruence class.
2996 * @param irn the node
2998 static ir_node *get_leader(node_t *node) {
2999 partition_t *part = node->part;
3001 if (part->n_leader > 1 || node->is_follower) {
3002 if (node->is_follower) {
3003 DB((dbg, LEVEL_2, "Replacing follower %+F\n", node->node));
3006 DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
3008 return get_first_node(part)->node;
3014 * Returns non-zero if a mode_T node has only one reachable output.
3016 static int only_one_reachable_proj(ir_node *n) {
3019 for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
3020 ir_node *proj = get_irn_out(n, i);
3023 /* skip non-control flow Proj's */
3024 if (get_irn_mode(proj) != mode_X)
3027 node = get_irn_node(proj);
3028 if (node->type.tv == tarval_reachable) {
3034 } /* only_one_reachable_proj */
3037 * Return non-zero if the control flow predecessor node pred
3038 * is the only reachable control flow exit of its block.
3040 * @param pred the control flow exit
3042 static int can_exchange(ir_node *pred) {
3045 else if (is_Jmp(pred))
3047 else if (get_irn_mode(pred) == mode_T) {
3048 /* if the predecessor block has more than one
3049 reachable outputs we cannot remove the block */
3050 return only_one_reachable_proj(pred);
3053 } /* can_exchange */
3056 * Block Post-Walker, apply the analysis results on control flow by
3057 * shortening Phi's and Block inputs.
3059 static void apply_cf(ir_node *block, void *ctx) {
3060 environment_t *env = ctx;
3061 node_t *node = get_irn_node(block);
3063 ir_node **ins, **in_X;
3064 ir_node *phi, *next;
3066 n = get_Block_n_cfgpreds(block);
3068 if (node->type.tv == tarval_unreachable) {
3071 for (i = n - 1; i >= 0; --i) {
3072 ir_node *pred = get_Block_cfgpred(block, i);
3074 if (! is_Bad(pred)) {
3075 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
3077 if (pred_bl->flagged == 0) {
3078 pred_bl->flagged = 3;
3080 if (pred_bl->type.tv == tarval_reachable) {
3082 * We will remove an edge from block to its pred.
3083 * This might leave the pred block as an endless loop
3085 if (! is_backedge(block, i))
3086 keep_alive(pred_bl->node);
3092 /* the EndBlock is always reachable even if the analysis
3093 finds out the opposite :-) */
3094 if (block != get_irg_end_block(current_ir_graph)) {
3095 /* mark dead blocks */
3096 set_Block_dead(block);
3097 DB((dbg, LEVEL_1, "Removing dead %+F\n", block));
3099 /* the endblock is unreachable */
3100 set_irn_in(block, 0, NULL);
3106 /* only one predecessor combine */
3107 ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
3109 if (can_exchange(pred)) {
3110 ir_node *new_block = get_nodes_block(pred);
3111 DB((dbg, LEVEL_1, "Fuse %+F with %+F\n", block, new_block));
3112 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
3113 exchange(block, new_block);
3114 node->node = new_block;
3120 NEW_ARR_A(ir_node *, in_X, n);
3122 for (i = 0; i < n; ++i) {
3123 ir_node *pred = get_Block_cfgpred(block, i);
3124 node_t *node = get_irn_node(pred);
3126 if (node->type.tv == tarval_reachable) {
3129 DB((dbg, LEVEL_1, "Removing dead input %d from %+F (%+F)\n", i, block, pred));
3130 if (! is_Bad(pred)) {
3131 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
3133 if (pred_bl->flagged == 0) {
3134 pred_bl->flagged = 3;
3136 if (pred_bl->type.tv == tarval_reachable) {
3138 * We will remove an edge from block to its pred.
3139 * This might leave the pred block as an endless loop
3141 if (! is_backedge(block, i))
3142 keep_alive(pred_bl->node);
3152 NEW_ARR_A(ir_node *, ins, n);
3153 for (phi = get_Block_phis(block); phi != NULL; phi = next) {
3154 node_t *node = get_irn_node(phi);
3156 next = get_Phi_next(phi);
3157 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
3158 /* this Phi is replaced by a constant */
3159 tarval *tv = node->type.tv;
3160 ir_node *c = new_r_Const(current_ir_graph, block, get_tarval_mode(tv), tv);
3162 set_irn_node(c, node);
3164 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
3165 DBG_OPT_COMBO(phi, c, FS_OPT_COMBO_CONST);
3170 for (i = 0; i < n; ++i) {
3171 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
3173 if (pred->type.tv == tarval_reachable) {
3174 ins[j++] = get_Phi_pred(phi, i);
3178 /* this Phi is replaced by a single predecessor */
3179 ir_node *s = ins[0];
3180 node_t *phi_node = get_irn_node(phi);
3183 DB((dbg, LEVEL_1, "%+F is replaced by %+F because of cf change\n", phi, s));
3184 DBG_OPT_COMBO(phi, s, FS_OPT_COMBO_FOLLOWER);
3189 set_irn_in(phi, j, ins);
3197 /* this Block has only one live predecessor */
3198 ir_node *pred = skip_Proj(in_X[0]);
3200 if (can_exchange(pred)) {
3201 ir_node *new_block = get_nodes_block(pred);
3202 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
3203 exchange(block, new_block);
3204 node->node = new_block;
3209 set_irn_in(block, k, in_X);
3214 * Exchange a node by its leader.
3215 * Beware: in rare cases the mode might be wrong here, for instance
3216 * AddP(x, NULL) is a follower of x, but with different mode.
3219 static void exchange_leader(ir_node *irn, ir_node *leader) {
3220 ir_mode *mode = get_irn_mode(irn);
3221 if (mode != get_irn_mode(leader)) {
3222 /* The conv is a no-op, so we are fre to place in
3223 * either in the block of the leader OR in irn's block.
3224 * Probably placing it into leaders block might reduce
3225 * the number of Conv due to CSE. */
3226 ir_node *block = get_nodes_block(leader);
3227 dbg_info *dbg = get_irn_dbg_info(irn);
3229 leader = new_rd_Conv(dbg, current_ir_graph, block, leader, mode);
3231 exchange(irn, leader);
3235 * Post-Walker, apply the analysis results;
3237 static void apply_result(ir_node *irn, void *ctx) {
3238 environment_t *env = ctx;
3239 node_t *node = get_irn_node(irn);
3241 if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
3242 /* blocks already handled, do not touch the End node */
3244 node_t *block = get_irn_node(get_nodes_block(irn));
3246 if (block->type.tv == tarval_unreachable) {
3247 ir_node *bad = get_irg_bad(current_ir_graph);
3249 /* here, bad might already have a node, but this can be safely ignored
3250 as long as bad has at least ONE valid node */
3251 set_irn_node(bad, node);
3253 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
3256 } else if (node->type.tv == tarval_top) {
3257 ir_mode *mode = get_irn_mode(irn);
3259 if (mode == mode_M) {
3260 /* never kill a mode_M node */
3262 ir_node *pred = get_Proj_pred(irn);
3263 node_t *pnode = get_irn_node(pred);
3265 if (pnode->type.tv == tarval_top) {
3266 /* skip the predecessor */
3267 ir_node *mem = get_memop_mem(pred);
3269 DB((dbg, LEVEL_1, "%+F computes Top, replaced by %+F\n", irn, mem));
3274 /* leave other nodes, especially PhiM */
3275 } else if (mode == mode_T) {
3276 /* Do not kill mode_T nodes, kill their Projs */
3277 } else if (! is_Unknown(irn)) {
3278 /* don't kick away Unknown's, they might be still needed */
3279 ir_node *unk = new_r_Unknown(current_ir_graph, mode);
3281 /* control flow should already be handled at apply_cf() */
3282 assert(mode != mode_X);
3284 /* see comment above */
3285 set_irn_node(unk, node);
3287 DB((dbg, LEVEL_1, "%+F computes Top\n", irn));
3292 else if (get_irn_mode(irn) == mode_X) {
3295 ir_node *cond = get_Proj_pred(irn);
3297 if (is_Cond(cond)) {
3298 if (only_one_reachable_proj(cond)) {
3299 ir_node *jmp = new_r_Jmp(current_ir_graph, block->node);
3300 set_irn_node(jmp, node);
3302 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
3303 DBG_OPT_COMBO(irn, jmp, FS_OPT_COMBO_CF);
3307 node_t *sel = get_irn_node(get_Cond_selector(cond));
3308 tarval *tv = sel->type.tv;
3310 if (is_tarval(tv) && tarval_is_constant(tv)) {
3311 /* The selector is a constant, but more
3312 * than one output is active: An unoptimized
3320 /* normal data node */
3321 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
3322 tarval *tv = node->type.tv;
3325 * Beware: never replace mode_T nodes by constants. Currently we must mark
3326 * mode_T nodes with constants, but do NOT replace them.
3328 if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
3329 /* can be replaced by a constant */
3330 ir_node *c = new_r_Const(current_ir_graph, block->node, get_tarval_mode(tv), tv);
3331 set_irn_node(c, node);
3333 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
3334 DBG_OPT_COMBO(irn, c, FS_OPT_COMBO_CONST);
3335 exchange_leader(irn, c);
3338 } else if (is_entity(node->type.sym.entity_p)) {
3339 if (! is_SymConst(irn)) {
3340 /* can be replaced by a SymConst */
3341 ir_node *symc = new_r_SymConst(current_ir_graph, block->node, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
3342 set_irn_node(symc, node);
3345 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
3346 DBG_OPT_COMBO(irn, symc, FS_OPT_COMBO_CONST);
3347 exchange_leader(irn, symc);
3350 } else if (is_Confirm(irn)) {
3351 /* Confirms are always follower, but do not kill them here */
3353 ir_node *leader = get_leader(node);
3355 if (leader != irn) {
3356 int non_strict_phi = 0;
3359 * Beware: Do not remove Phi(Unknown, ..., x, ..., Unknown)
3360 * as this might create non-strict programs.
3362 if (node->is_follower && is_Phi(irn) && !is_Unknown(leader)) {
3365 for (i = get_Phi_n_preds(irn) - 1; i >= 0; --i) {
3366 ir_node *pred = get_Phi_pred(irn, i);
3368 if (is_Unknown(pred)) {
3374 if (! non_strict_phi) {
3375 DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
3376 if (node->is_follower)
3377 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_FOLLOWER);
3379 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_CONGRUENT);
3380 exchange_leader(irn, leader);
3387 } /* apply_result */
3390 * Fix the keep-alives by deleting unreachable ones.
3392 static void apply_end(ir_node *end, environment_t *env) {
3393 int i, j, n = get_End_n_keepalives(end);
3397 NEW_ARR_A(ir_node *, in, n);
3399 /* fix the keep alive */
3400 for (i = j = 0; i < n; i++) {
3401 ir_node *ka = get_End_keepalive(end, i);
3402 node_t *node = get_irn_node(ka);
3405 node = get_irn_node(get_nodes_block(ka));
3407 if (node->type.tv != tarval_unreachable && !is_Bad(ka))
3411 set_End_keepalives(end, j, in);
3416 #define SET(code) op_##code->ops.generic = (op_func)compute_##code
3419 * sets the generic functions to compute.
3421 static void set_compute_functions(void) {
3424 /* set the default compute function */
3425 for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
3426 ir_op *op = get_irp_opcode(i);
3427 op->ops.generic = (op_func)default_compute;
3430 /* set specific functions */
3452 } /* set_compute_functions */
3454 void combo(ir_graph *irg) {
3456 ir_node *initial_bl;
3458 ir_graph *rem = current_ir_graph;
3460 current_ir_graph = irg;
3462 /* register a debug mask */
3463 FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
3465 DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
3467 obstack_init(&env.obst);
3468 env.worklist = NULL;
3472 #ifdef DEBUG_libfirm
3473 env.dbg_list = NULL;
3475 env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
3476 env.type2id_map = pmap_create();
3477 env.end_idx = get_opt_global_cse() ? 0 : -1;
3478 env.lambda_input = 0;
3481 /* options driving the optimization */
3482 env.commutative = 1;
3483 env.opt_unknown = 1;
3485 assure_irg_outs(irg);
3486 assure_cf_loop(irg);
3488 /* we have our own value_of function */
3489 set_value_of_func(get_node_tarval);
3491 set_compute_functions();
3492 DEBUG_ONLY(part_nr = 0);
3494 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
3496 if (env.opt_unknown)
3497 tarval_UNKNOWN = tarval_top;
3499 tarval_UNKNOWN = tarval_bad;
3501 /* create the initial partition and place it on the work list */
3502 env.initial = new_partition(&env);
3503 add_to_worklist(env.initial, &env);
3504 irg_walk_graph(irg, create_initial_partitions, init_block_phis, &env);
3506 /* set the hook: from now, every node has a partition and a type */
3507 DEBUG_ONLY(set_dump_node_vcgattr_hook(dump_partition_hook));
3509 /* all nodes on the initial partition have type Top */
3510 env.initial->type_is_T_or_C = 1;
3512 /* Place the START Node's partition on cprop.
3513 Place the START Node on its local worklist. */
3514 initial_bl = get_irg_start_block(irg);
3515 start = get_irn_node(initial_bl);
3516 add_to_cprop(start, &env);
3520 if (env.worklist != NULL)
3522 } while (env.cprop != NULL || env.worklist != NULL);
3524 dump_all_partitions(&env);
3525 check_all_partitions(&env);
3528 dump_ir_block_graph(irg, "-partition");
3531 /* apply the result */
3532 irg_block_walk_graph(irg, NULL, apply_cf, &env);
3533 /* Kill keep-alives of dead blocks: this speeds up apply_result()
3534 * and fixes assertion because dead cf to dead blocks is NOT removed by
3536 apply_end(get_irg_end(irg), &env);
3537 irg_walk_graph(irg, NULL, apply_result, &env);
3540 DB((dbg, LEVEL_1, "Unoptimized Control Flow left"));
3544 /* control flow might changed */
3545 set_irg_outs_inconsistent(irg);
3546 set_irg_extblk_inconsistent(irg);
3547 set_irg_doms_inconsistent(irg);
3548 set_irg_loopinfo_inconsistent(irg);
3551 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
3553 /* remove the partition hook */
3554 DEBUG_ONLY(set_dump_node_vcgattr_hook(NULL));
3556 pmap_destroy(env.type2id_map);
3557 del_set(env.opcode2id_map);
3558 obstack_free(&env.obst, NULL);
3560 /* restore value_of() default behavior */
3561 set_value_of_func(NULL);
3562 current_ir_graph = rem;