2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Cliff Click's Combined Analysis/Optimization
23 * @author Michael Beck
26 * This is a slightly enhanced version of Cliff Clicks combo algorithm
27 * - support for commutative nodes is added, Add(a,b) and Add(b,a) ARE congruent
28 * - supports all Firm direct (by a data edge) identities except Mux
29 * (Mux can be a 2-input or 1-input identity, only 2-input is implemented yet)
30 * - supports Confirm nodes (handle them like Copies but do NOT remove them)
31 * - let Cmp nodes calculate Top like all othe data nodes: this would let
32 * Mux nodes to calculate Unknown instead of taking the true result
33 * - let Cond(Top) always select FALSE/default: This is tricky. Nodes are only reavaluated
34 * IFF the predecessor changed its type. Because nodes are initialized with Top
35 * this never happens, let all Proj(Cond) be unreachable.
36 * We avoid this condition by the same way we work around Phi: whenever a Block
37 * node is placed on the list, place its Cond nodes (and because they are Tuple
38 * all its Proj-nodes either on the cprop list)
39 * Especially, this changes the meaning of Click's example:
54 * using Click's version while is silent with our.
55 * - support for global congruences is implemented but not tested yet
57 * Note further that we use the terminology from Click's work here, which is different
58 * in some cases from Firm terminology. Especially, Click's type is a
59 * Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
65 #include "iroptimize.h"
73 #include "irgraph_t.h"
80 #include "iropt_dbg.h"
90 /* define this to check that all type translations are monotone */
91 #undef VERIFY_MONOTONE
93 /* define this to check the consistency of partitions */
94 #define CHECK_PARTITIONS
97 /* allow optimization of non-strict programs */
100 typedef struct node_t node_t;
101 typedef struct partition_t partition_t;
102 typedef struct opcode_key_t opcode_key_t;
103 typedef struct listmap_entry_t listmap_entry_t;
105 /** The type of the compute function. */
106 typedef void (*compute_func)(node_t *node);
111 struct opcode_key_t {
112 ir_opcode code; /**< The Firm opcode. */
113 ir_mode *mode; /**< The mode of all nodes in the partition. */
114 int arity; /**< The arity of this opcode (needed for Phi etc. */
116 long proj; /**< For Proj nodes, its proj number */
117 ir_entity *ent; /**< For Sel Nodes, its entity */
122 * An entry in the list_map.
124 struct listmap_entry_t {
125 void *id; /**< The id. */
126 node_t *list; /**< The associated list for this id. */
127 listmap_entry_t *next; /**< Link to the next entry in the map. */
130 /** We must map id's to lists. */
131 typedef struct listmap_t {
132 set *map; /**< Map id's to listmap_entry_t's */
133 listmap_entry_t *values; /**< List of all values in the map. */
137 * A lattice element. Because we handle constants and symbolic constants different, we
138 * have to use this union.
149 ir_node *node; /**< The IR-node itself. */
150 list_head node_list; /**< Double-linked list of leader/follower entries. */
151 list_head cprop_list; /**< Double-linked partition.cprop list. */
152 partition_t *part; /**< points to the partition this node belongs to */
153 node_t *next; /**< Next node on local list (partition.touched, fallen). */
154 node_t *race_next; /**< Next node on race list. */
155 lattice_elem_t type; /**< The associated lattice element "type". */
156 int max_user_input; /**< Maximum input number of Def-Use edges. */
157 int next_edge; /**< Index of the next Def-Use edge to use. */
158 int n_followers; /**< Number of Follower in the outs set. */
159 unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
160 unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
161 unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
162 unsigned is_follower:1; /**< Set, if this node is a follower. */
163 unsigned by_all_const:1; /**< Set, if this node was once evaluated by all constants. */
164 unsigned flagged:2; /**< 2 Bits, set if this node was visited by race 1 or 2. */
165 node_t *cond; /**< if this is a Block node, points to its Cond if any */
169 * A partition containing congruent nodes.
172 list_head Leader; /**< The head of partition Leader node list. */
173 list_head Follower; /**< The head of partition Follower node list. */
174 list_head cprop; /**< The head of partition.cprop list. */
175 partition_t *wl_next; /**< Next entry in the work list if any. */
176 partition_t *touched_next; /**< Points to the next partition in the touched set. */
177 partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
178 partition_t *split_next; /**< Points to the next partition in the list that must be split by split_by(). */
179 node_t *touched; /**< The partition.touched set of this partition. */
180 unsigned n_leader; /**< Number of entries in this partition.Leader. */
181 unsigned n_touched; /**< Number of entries in the partition.touched. */
182 int max_user_inputs; /**< Maximum number of user inputs of all entries. */
183 unsigned on_worklist:1; /**< Set, if this partition is in the work list. */
184 unsigned on_touched:1; /**< Set, if this partition is on the touched set. */
185 unsigned on_cprop:1; /**< Set, if this partition is on the cprop list. */
186 unsigned type_is_T_or_C:1;/**< Set, if all nodes in this partition have type Top or Constant. */
188 partition_t *dbg_next; /**< Link all partitions for debugging */
189 unsigned nr; /**< A unique number for (what-)mapping, >0. */
193 typedef struct environment_t {
194 struct obstack obst; /**< obstack to allocate data structures. */
195 partition_t *worklist; /**< The work list. */
196 partition_t *cprop; /**< The constant propagation list. */
197 partition_t *touched; /**< the touched set. */
198 partition_t *initial; /**< The initial partition. */
199 set *opcode2id_map; /**< The opcodeMode->id map. */
200 pmap *type2id_map; /**< The type->id map. */
201 int end_idx; /**< -1 for local and 0 for global congruences. */
202 int lambda_input; /**< Captured argument for lambda_partition(). */
203 char modified; /**< Set, if the graph was modified. */
204 char commutative; /**< Set, if commutation nodes should be handled specially. */
206 partition_t *dbg_list; /**< List of all partitions. */
210 /** Type of the what function. */
211 typedef void *(*what_func)(const node_t *node, environment_t *env);
213 #define get_irn_node(follower) ((node_t *)get_irn_link(follower))
214 #define set_irn_node(follower, node) set_irn_link(follower, node)
216 /* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
217 #undef tarval_unreachable
218 #define tarval_unreachable tarval_top
221 /** The debug module handle. */
222 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
224 /** The what reason. */
225 DEBUG_ONLY(static const char *what_reason;)
227 /** Next partition number. */
228 DEBUG_ONLY(static unsigned part_nr = 0);
230 /** The tarval returned by Unknown nodes. */
231 static tarval *tarval_UNKNOWN;
234 static node_t *identity(node_t *node);
236 #ifdef CHECK_PARTITIONS
240 static void check_partition(const partition_t *T) {
244 list_for_each_entry(node_t, node, &T->Leader, node_list) {
245 assert(node->is_follower == 0);
246 assert(node->flagged == 0);
247 assert(node->part == T);
250 assert(n == T->n_leader);
252 list_for_each_entry(node_t, node, &T->Follower, node_list) {
253 assert(node->is_follower == 1);
254 assert(node->flagged == 0);
255 assert(node->part == T);
257 } /* check_partition */
260 * check that all leader nodes in the partition have the same opcode.
262 static void check_opcode(const partition_t *Z) {
267 list_for_each_entry(node_t, node, &Z->Leader, node_list) {
268 ir_node *irn = node->node;
271 key.code = get_irn_opcode(irn);
272 key.mode = get_irn_mode(irn);
273 key.arity = get_irn_arity(irn);
277 switch (get_irn_opcode(irn)) {
279 key.u.proj = get_Proj_proj(irn);
282 key.u.ent = get_Sel_entity(irn);
289 assert(key.code == get_irn_opcode(irn));
290 assert(key.mode == get_irn_mode(irn));
291 assert(key.arity == get_irn_arity(irn));
293 switch (get_irn_opcode(irn)) {
295 assert(key.u.proj == get_Proj_proj(irn));
298 assert(key.u.ent == get_Sel_entity(irn));
307 static void check_all_partitions(environment_t *env) {
312 for (P = env->dbg_list; P != NULL; P = P->dbg_next) {
314 if (! P->type_is_T_or_C)
316 list_for_each_entry(node_t, node, &P->Follower, node_list) {
317 node_t *leader = identity(node);
319 assert(leader != node && leader->part == node->part);
328 static void do_check_list(const node_t *list, int ofs, const partition_t *Z) {
331 #define NEXT(e) *((const node_t **)((char *)(e) + (ofs)))
332 for (e = list; e != NULL; e = NEXT(e)) {
333 assert(e->part == Z);
336 } /* ido_check_list */
339 * Check a local list.
341 static void check_list(const node_t *list, const partition_t *Z) {
342 do_check_list(list, offsetof(node_t, next), Z);
346 #define check_partition(T)
347 #define check_list(list, Z)
348 #define check_all_partitions(env)
349 #endif /* CHECK_PARTITIONS */
352 static inline lattice_elem_t get_partition_type(const partition_t *X);
355 * Dump partition to output.
357 static void dump_partition(const char *msg, const partition_t *part) {
360 lattice_elem_t type = get_partition_type(part);
362 DB((dbg, LEVEL_2, "%s part%u%s (%u, %+F) {\n ",
363 msg, part->nr, part->type_is_T_or_C ? "*" : "",
364 part->n_leader, type));
365 list_for_each_entry(node_t, node, &part->Leader, node_list) {
366 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
369 if (! list_empty(&part->Follower)) {
370 DB((dbg, LEVEL_2, "\n---\n "));
372 list_for_each_entry(node_t, node, &part->Follower, node_list) {
373 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
377 DB((dbg, LEVEL_2, "\n}\n"));
378 } /* dump_partition */
383 static void do_dump_list(const char *msg, const node_t *node, int ofs) {
387 #define GET_LINK(p, ofs) *((const node_t **)((char *)(p) + (ofs)))
389 DB((dbg, LEVEL_3, "%s = {\n ", msg));
390 for (p = node; p != NULL; p = GET_LINK(p, ofs)) {
391 DB((dbg, LEVEL_3, "%s%+F", first ? "" : ", ", p->node));
394 DB((dbg, LEVEL_3, "\n}\n"));
402 static void dump_race_list(const char *msg, const node_t *list) {
403 do_dump_list(msg, list, offsetof(node_t, race_next));
404 } /* dump_race_list */
407 * Dumps a local list.
409 static void dump_list(const char *msg, const node_t *list) {
410 do_dump_list(msg, list, offsetof(node_t, next));
414 * Dump all partitions.
416 static void dump_all_partitions(const environment_t *env) {
417 const partition_t *P;
419 DB((dbg, LEVEL_2, "All partitions\n===============\n"));
420 for (P = env->dbg_list; P != NULL; P = P->dbg_next)
421 dump_partition("", P);
422 } /* dump_all_partitions */
427 static void dump_split_list(const partition_t *list) {
428 const partition_t *p;
430 DB((dbg, LEVEL_2, "Split by %s produced = {\n", what_reason));
431 for (p = list; p != NULL; p = p->split_next)
432 DB((dbg, LEVEL_2, "part%u, ", p->nr));
433 DB((dbg, LEVEL_2, "\n}\n"));
434 } /* dump_split_list */
437 * Dump partition and type for a node.
439 static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
440 ir_node *irn = local != NULL ? local : n;
441 node_t *node = get_irn_node(irn);
443 ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
445 } /* dump_partition_hook */
448 #define dump_partition(msg, part)
449 #define dump_race_list(msg, list)
450 #define dump_list(msg, list)
451 #define dump_all_partitions(env)
452 #define dump_split_list(list)
455 #if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
457 * Verify that a type transition is monotone
459 static void verify_type(const lattice_elem_t old_type, const lattice_elem_t new_type) {
460 if (old_type.tv == new_type.tv) {
464 if (old_type.tv == tarval_top) {
465 /* from Top down-to is always allowed */
468 if (old_type.tv == tarval_reachable) {
469 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
471 if (new_type.tv == tarval_bottom || new_type.tv == tarval_reachable) {
475 panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
478 #define verify_type(old_type, new_type)
482 * Compare two pointer values of a listmap.
484 static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
485 const listmap_entry_t *e1 = elt;
486 const listmap_entry_t *e2 = key;
489 return e1->id != e2->id;
490 } /* listmap_cmp_ptr */
493 * Initializes a listmap.
495 * @param map the listmap
497 static void listmap_init(listmap_t *map) {
498 map->map = new_set(listmap_cmp_ptr, 16);
503 * Terminates a listmap.
505 * @param map the listmap
507 static void listmap_term(listmap_t *map) {
512 * Return the associated listmap entry for a given id.
514 * @param map the listmap
515 * @param id the id to search for
517 * @return the associated listmap entry for the given id
519 static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
520 listmap_entry_t key, *entry;
525 entry = set_insert(map->map, &key, sizeof(key), HASH_PTR(id));
527 if (entry->list == NULL) {
528 /* a new entry, put into the list */
529 entry->next = map->values;
536 * Calculate the hash value for an opcode map entry.
538 * @param entry an opcode map entry
540 * @return a hash value for the given opcode map entry
542 static unsigned opcode_hash(const opcode_key_t *entry) {
543 return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent) + entry->arity;
547 * Compare two entries in the opcode map.
549 static int cmp_opcode(const void *elt, const void *key, size_t size) {
550 const opcode_key_t *o1 = elt;
551 const opcode_key_t *o2 = key;
554 return o1->code != o2->code || o1->mode != o2->mode ||
555 o1->arity != o2->arity ||
556 o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent;
560 * Compare two Def-Use edges for input position.
562 static int cmp_def_use_edge(const void *a, const void *b) {
563 const ir_def_use_edge *ea = a;
564 const ir_def_use_edge *eb = b;
566 /* no overrun, because range is [-1, MAXINT] */
567 return ea->pos - eb->pos;
568 } /* cmp_def_use_edge */
571 * We need the Def-Use edges sorted.
573 static void sort_irn_outs(node_t *node) {
574 ir_node *irn = node->node;
575 int n_outs = get_irn_n_outs(irn);
578 qsort(&irn->out[1], n_outs, sizeof(irn->out[0]), cmp_def_use_edge);
580 node->max_user_input = irn->out[n_outs].pos;
581 } /* sort_irn_outs */
584 * Return the type of a node.
586 * @param irn an IR-node
588 * @return the associated type of this node
590 static inline lattice_elem_t get_node_type(const ir_node *irn) {
591 return get_irn_node(irn)->type;
592 } /* get_node_type */
595 * Return the tarval of a node.
597 * @param irn an IR-node
599 * @return the associated type of this node
601 static inline tarval *get_node_tarval(const ir_node *irn) {
602 lattice_elem_t type = get_node_type(irn);
604 if (is_tarval(type.tv))
606 return tarval_bottom;
607 } /* get_node_type */
610 * Add a partition to the worklist.
612 static inline void add_to_worklist(partition_t *X, environment_t *env) {
613 assert(X->on_worklist == 0);
614 DB((dbg, LEVEL_2, "Adding part%d to worklist\n", X->nr));
615 X->wl_next = env->worklist;
618 } /* add_to_worklist */
621 * Create a new empty partition.
623 * @param env the environment
625 * @return a newly allocated partition
627 static inline partition_t *new_partition(environment_t *env) {
628 partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
630 INIT_LIST_HEAD(&part->Leader);
631 INIT_LIST_HEAD(&part->Follower);
632 INIT_LIST_HEAD(&part->cprop);
633 part->wl_next = NULL;
634 part->touched_next = NULL;
635 part->cprop_next = NULL;
636 part->split_next = NULL;
637 part->touched = NULL;
640 part->max_user_inputs = 0;
641 part->on_worklist = 0;
642 part->on_touched = 0;
644 part->type_is_T_or_C = 0;
646 part->dbg_next = env->dbg_list;
647 env->dbg_list = part;
648 part->nr = part_nr++;
652 } /* new_partition */
655 * Get the first node from a partition.
657 static inline node_t *get_first_node(const partition_t *X) {
658 return list_entry(X->Leader.next, node_t, node_list);
659 } /* get_first_node */
662 * Return the type of a partition (assuming partition is non-empty and
663 * all elements have the same type).
665 * @param X a partition
667 * @return the type of the first element of the partition
669 static inline lattice_elem_t get_partition_type(const partition_t *X) {
670 const node_t *first = get_first_node(X);
672 } /* get_partition_type */
675 * Creates a partition node for the given IR-node and place it
676 * into the given partition.
678 * @param irn an IR-node
679 * @param part a partition to place the node in
680 * @param env the environment
682 * @return the created node
684 static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env) {
685 /* create a partition node and place it in the partition */
686 node_t *node = obstack_alloc(&env->obst, sizeof(*node));
688 INIT_LIST_HEAD(&node->node_list);
689 INIT_LIST_HEAD(&node->cprop_list);
693 node->race_next = NULL;
694 node->type.tv = tarval_top;
695 node->max_user_input = 0;
697 node->n_followers = 0;
698 node->on_touched = 0;
701 node->is_follower = 0;
702 node->by_all_const = 0;
705 set_irn_node(irn, node);
707 list_add_tail(&node->node_list, &part->Leader);
711 } /* create_partition_node */
714 * Pre-Walker, init all Block-Phi lists.
716 static void init_block_phis(ir_node *irn, void *env) {
720 set_Block_phis(irn, NULL);
722 } /* init_block_phis */
725 * Post-Walker, initialize all Nodes' type to U or top and place
726 * all nodes into the TOP partition.
728 static void create_initial_partitions(ir_node *irn, void *ctx) {
729 environment_t *env = ctx;
730 partition_t *part = env->initial;
733 node = create_partition_node(irn, part, env);
735 if (node->max_user_input > part->max_user_inputs)
736 part->max_user_inputs = node->max_user_input;
739 add_Block_phi(get_nodes_block(irn), irn);
740 } else if (is_Cond(irn)) {
741 node_t *block = get_irn_node(get_nodes_block(irn));
743 /* link every block with its Cond node if any */
746 } /* create_initial_partitions */
749 * Add a node to the entry.partition.touched set and
750 * node->partition to the touched set if not already there.
753 * @param env the environment
755 static inline void add_to_touched(node_t *y, environment_t *env) {
756 if (y->on_touched == 0) {
757 partition_t *part = y->part;
759 y->next = part->touched;
764 if (part->on_touched == 0) {
765 part->touched_next = env->touched;
767 part->on_touched = 1;
770 check_list(part->touched, part);
772 } /* add_to_touched */
775 * Place a node on the cprop list.
778 * @param env the environment
780 static void add_to_cprop(node_t *y, environment_t *env) {
781 /* Add y to y.partition.cprop. */
782 if (y->on_cprop == 0) {
783 partition_t *Y = y->part;
785 list_add_tail(&y->cprop_list, &Y->cprop);
788 DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
790 /* place its partition on the cprop list */
791 if (Y->on_cprop == 0) {
792 Y->cprop_next = env->cprop;
797 if (get_irn_mode(y->node) == mode_T) {
798 /* mode_T nodes always produce tarval_bottom, so we must explicitly
799 add it's Proj's to get constant evaluation to work */
802 for (i = get_irn_n_outs(y->node) - 1; i >= 0; --i) {
803 node_t *proj = get_irn_node(get_irn_out(y->node, i));
805 add_to_cprop(proj, env);
807 } else if (is_Block(y->node)) {
808 /* Due to the way we handle Phi's, we must place all Phis of a block on the list
809 * if someone placed the block. The Block is only placed if the reachability
810 * changes, and this must be re-evaluated in compute_Phi(). */
812 for (phi = get_Block_phis(y->node); phi != NULL; phi = get_Phi_next(phi)) {
813 node_t *p = get_irn_node(phi);
814 add_to_cprop(p, env);
816 /* same for Conds: they must be re-evaluated due to the way we handle Top */
818 add_to_cprop(y->cond, env);
823 * Update the worklist: If Z is on worklist then add Z' to worklist.
824 * Else add the smaller of Z and Z' to worklist.
826 * @param Z the Z partition
827 * @param Z_prime the Z' partition, a previous part of Z
828 * @param env the environment
830 static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env) {
831 if (Z->on_worklist || Z_prime->n_leader < Z->n_leader) {
832 add_to_worklist(Z_prime, env);
834 add_to_worklist(Z, env);
836 } /* update_worklist */
839 * Make all inputs to x no longer be F.def_use edges.
843 static void move_edges_to_leader(node_t *x) {
844 ir_node *irn = x->node;
847 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
848 node_t *pred = get_irn_node(get_irn_n(irn, i));
853 n = get_irn_n_outs(p);
854 for (j = 1; j <= pred->n_followers; ++j) {
855 if (p->out[j].pos == i && p->out[j].use == irn) {
856 /* found a follower edge to x, move it to the Leader */
857 ir_def_use_edge edge = p->out[j];
859 /* remove this edge from the Follower set */
860 p->out[j] = p->out[pred->n_followers];
863 /* sort it into the leader set */
864 for (k = pred->n_followers + 2; k <= n; ++k) {
865 if (p->out[k].pos >= edge.pos)
867 p->out[k - 1] = p->out[k];
869 /* place the new edge here */
870 p->out[k - 1] = edge;
872 /* edge found and moved */
877 } /* move_edges_to_leader */
880 * Split a partition that has NO followers by a local list.
882 * @param Z partition to split
883 * @param g a (non-empty) node list
884 * @param env the environment
886 * @return a new partition containing the nodes of g
888 static partition_t *split_no_followers(partition_t *Z, node_t *g, environment_t *env) {
889 partition_t *Z_prime;
894 dump_partition("Splitting ", Z);
895 dump_list("by list ", g);
899 /* Remove g from Z. */
900 for (node = g; node != NULL; node = node->next) {
901 assert(node->part == Z);
902 list_del(&node->node_list);
905 assert(n < Z->n_leader);
908 /* Move g to a new partition, Z'. */
909 Z_prime = new_partition(env);
911 for (node = g; node != NULL; node = node->next) {
912 list_add_tail(&node->node_list, &Z_prime->Leader);
913 node->part = Z_prime;
914 if (node->max_user_input > max_input)
915 max_input = node->max_user_input;
917 Z_prime->max_user_inputs = max_input;
918 Z_prime->n_leader = n;
921 check_partition(Z_prime);
923 /* for now, copy the type info tag, it will be adjusted in split_by(). */
924 Z_prime->type_is_T_or_C = Z->type_is_T_or_C;
926 update_worklist(Z, Z_prime, env);
928 dump_partition("Now ", Z);
929 dump_partition("Created new ", Z_prime);
931 } /* split_no_followers */
934 * Make the Follower -> Leader transition for a node.
938 static void follower_to_leader(node_t *n) {
939 assert(n->is_follower == 1);
941 DB((dbg, LEVEL_2, "%+F make the follower -> leader transition\n", n->node));
943 move_edges_to_leader(n);
944 list_del(&n->node_list);
945 list_add_tail(&n->node_list, &n->part->Leader);
947 } /* follower_to_leader */
950 * The environment for one race step.
952 typedef struct step_env {
953 node_t *initial; /**< The initial node list. */
954 node_t *unwalked; /**< The unwalked node list. */
955 node_t *walked; /**< The walked node list. */
956 int index; /**< Next index of Follower use_def edge. */
957 unsigned side; /**< side number. */
961 * Return non-zero, if a input is a real follower
963 * @param irn the node to check
964 * @param input number of the input
966 static int is_real_follower(const ir_node *irn, int input) {
969 switch (get_irn_opcode(irn)) {
972 /* ignore the Confirm bound input */
978 /* ignore the Mux sel input */
983 /* dead inputs are not follower edges */
984 ir_node *block = get_nodes_block(irn);
985 node_t *pred = get_irn_node(get_Block_cfgpred(block, input));
987 if (pred->type.tv == tarval_unreachable)
997 /* only a Sub x,0 / Shift x,0 might be a follower */
1004 pred = get_irn_node(get_irn_n(irn, input));
1005 if (is_tarval(pred->type.tv) && tarval_is_null(pred->type.tv))
1009 pred = get_irn_node(get_irn_n(irn, input));
1010 if (is_tarval(pred->type.tv) && tarval_is_one(pred->type.tv))
1014 pred = get_irn_node(get_irn_n(irn, input));
1015 if (is_tarval(pred->type.tv) && tarval_is_all_one(pred->type.tv))
1020 /* all inputs are followers */
1023 assert(!"opcode not implemented yet");
1027 } /* is_real_follower */
1030 * Do one step in the race.
1032 static int step(step_env *env) {
1035 if (env->initial != NULL) {
1036 /* Move node from initial to unwalked */
1038 env->initial = n->race_next;
1040 n->race_next = env->unwalked;
1046 while (env->unwalked != NULL) {
1047 /* let n be the first node in unwalked */
1049 while (env->index < n->n_followers) {
1050 const ir_def_use_edge *edge = &n->node->out[1 + env->index];
1052 /* let m be n.F.def_use[index] */
1053 node_t *m = get_irn_node(edge->use);
1055 assert(m->is_follower);
1057 * Some inputs, like the get_Confirm_bound are NOT
1058 * real followers, sort them out.
1060 if (! is_real_follower(m->node, edge->pos)) {
1066 /* only followers from our partition */
1067 if (m->part != n->part)
1070 if ((m->flagged & env->side) == 0) {
1071 m->flagged |= env->side;
1073 if (m->flagged != 3) {
1074 /* visited the first time */
1075 /* add m to unwalked not as first node (we might still need to
1076 check for more follower node */
1077 m->race_next = n->race_next;
1081 /* else already visited by the other side and on the other list */
1084 /* move n to walked */
1085 env->unwalked = n->race_next;
1086 n->race_next = env->walked;
1094 * Clear the flags from a list and check for
1095 * nodes that where touched from both sides.
1097 * @param list the list
1099 static int clear_flags(node_t *list) {
1103 for (n = list; n != NULL; n = n->race_next) {
1104 if (n->flagged == 3) {
1105 /* we reach a follower from both sides, this will split congruent
1106 * inputs and make it a leader. */
1107 follower_to_leader(n);
1116 * Split a partition by a local list using the race.
1118 * @param pX pointer to the partition to split, might be changed!
1119 * @param gg a (non-empty) node list
1120 * @param env the environment
1122 * @return a new partition containing the nodes of gg
1124 static partition_t *split(partition_t **pX, node_t *gg, environment_t *env) {
1125 partition_t *X = *pX;
1126 partition_t *X_prime;
1129 node_t *g, *h, *node, *t;
1130 int max_input, transitions, winner, shf;
1132 DEBUG_ONLY(static int run = 0;)
1134 DB((dbg, LEVEL_2, "Run %d ", run++));
1135 if (list_empty(&X->Follower)) {
1136 /* if the partition has NO follower, we can use the fast
1137 splitting algorithm. */
1138 return split_no_followers(X, gg, env);
1140 /* else do the race */
1142 dump_partition("Splitting ", X);
1143 dump_list("by list ", gg);
1145 INIT_LIST_HEAD(&tmp);
1147 /* Remove gg from X.Leader and put into g */
1149 for (node = gg; node != NULL; node = node->next) {
1150 assert(node->part == X);
1151 assert(node->is_follower == 0);
1153 list_del(&node->node_list);
1154 list_add_tail(&node->node_list, &tmp);
1155 node->race_next = g;
1160 list_for_each_entry(node_t, node, &X->Leader, node_list) {
1161 node->race_next = h;
1164 /* restore X.Leader */
1165 list_splice(&tmp, &X->Leader);
1167 senv[0].initial = g;
1168 senv[0].unwalked = NULL;
1169 senv[0].walked = NULL;
1173 senv[1].initial = h;
1174 senv[1].unwalked = NULL;
1175 senv[1].walked = NULL;
1180 * Some informations on the race that are not stated clearly in Click's
1182 * 1) A follower stays on the side that reach him first.
1183 * 2) If the other side reches a follower, if will be converted to
1184 * a leader. /This must be done after the race is over, else the
1185 * edges we are iterating on are renumbered./
1186 * 3) /New leader might end up on both sides./
1187 * 4) /If one side ends up with new Leaders, we must ensure that
1188 * they can split out by opcode, hence we have to put _every_
1189 * partition with new Leader nodes on the cprop list, as
1190 * opcode splitting is done by split_by() at the end of
1191 * constant propagation./
1194 if (step(&senv[0])) {
1198 if (step(&senv[1])) {
1203 assert(senv[winner].initial == NULL);
1204 assert(senv[winner].unwalked == NULL);
1206 /* clear flags from walked/unwalked */
1208 transitions = clear_flags(senv[0].unwalked) << shf;
1209 transitions |= clear_flags(senv[0].walked) << shf;
1211 transitions |= clear_flags(senv[1].unwalked) << shf;
1212 transitions |= clear_flags(senv[1].walked) << shf;
1214 dump_race_list("winner ", senv[winner].walked);
1216 /* Move walked_{winner} to a new partition, X'. */
1217 X_prime = new_partition(env);
1220 for (node = senv[winner].walked; node != NULL; node = node->race_next) {
1221 list_del(&node->node_list);
1222 node->part = X_prime;
1223 if (node->is_follower) {
1224 list_add_tail(&node->node_list, &X_prime->Follower);
1226 list_add_tail(&node->node_list, &X_prime->Leader);
1229 if (node->max_user_input > max_input)
1230 max_input = node->max_user_input;
1232 X_prime->n_leader = n;
1233 X_prime->max_user_inputs = max_input;
1234 X->n_leader -= X_prime->n_leader;
1236 /* for now, copy the type info tag, it will be adjusted in split_by(). */
1237 X_prime->type_is_T_or_C = X->type_is_T_or_C;
1240 * Even if a follower was not checked by both sides, it might have
1241 * loose its congruence, so we need to check this case for all follower.
1243 list_for_each_entry_safe(node_t, node, t, &X_prime->Follower, node_list) {
1244 if (identity(node) == node) {
1245 follower_to_leader(node);
1251 check_partition(X_prime);
1253 /* X' is the smaller part */
1254 add_to_worklist(X_prime, env);
1257 * If there where follower to leader transitions, ensure that the nodes
1258 * can be split out if necessary.
1260 if (transitions & 1) {
1261 /* place winner partition on the cprop list */
1262 if (X_prime->on_cprop == 0) {
1263 X_prime->cprop_next = env->cprop;
1264 env->cprop = X_prime;
1265 X_prime->on_cprop = 1;
1268 if (transitions & 2) {
1269 /* place other partition on the cprop list */
1270 if (X->on_cprop == 0) {
1271 X->cprop_next = env->cprop;
1277 dump_partition("Now ", X);
1278 dump_partition("Created new ", X_prime);
1280 /* we have to ensure that the partition containing g is returned */
1290 * Returns non-zero if the i'th input of a Phi node is live.
1292 * @param phi a Phi-node
1293 * @param i an input number
1295 * @return non-zero if the i'th input of the given Phi node is live
1297 static int is_live_input(ir_node *phi, int i) {
1299 ir_node *block = get_nodes_block(phi);
1300 ir_node *pred = get_Block_cfgpred(block, i);
1301 lattice_elem_t type = get_node_type(pred);
1303 return type.tv != tarval_unreachable;
1305 /* else it's the control input, always live */
1307 } /* is_live_input */
1310 * Return non-zero if a type is a constant.
1312 static int is_constant_type(lattice_elem_t type) {
1313 if (type.tv != tarval_bottom && type.tv != tarval_top)
1316 } /* is_constant_type */
1319 * Check whether a type is neither Top or a constant.
1320 * Note: U is handled like Top here, R is a constant.
1322 * @param type the type to check
1324 static int type_is_neither_top_nor_const(const lattice_elem_t type) {
1325 if (is_tarval(type.tv)) {
1326 if (type.tv == tarval_top)
1328 if (tarval_is_constant(type.tv))
1335 } /* type_is_neither_top_nor_const */
1338 * Collect nodes to the touched list.
1340 * @param list the list which contains the nodes that must be evaluated
1341 * @param idx the index of the def_use edge to evaluate
1342 * @param env the environment
1344 static void collect_touched(list_head *list, int idx, environment_t *env) {
1346 int end_idx = env->end_idx;
1348 list_for_each_entry(node_t, x, list, node_list) {
1352 /* leader edges start AFTER follower edges */
1353 x->next_edge = x->n_followers + 1;
1355 num_edges = get_irn_n_outs(x->node);
1357 /* for all edges in x.L.def_use_{idx} */
1358 while (x->next_edge <= num_edges) {
1359 const ir_def_use_edge *edge = &x->node->out[x->next_edge];
1362 /* check if we have necessary edges */
1363 if (edge->pos > idx)
1370 /* only non-commutative nodes */
1371 if (env->commutative &&
1372 (idx == 0 || idx == 1) && is_op_commutative(get_irn_op(succ)))
1375 /* ignore the "control input" for non-pinned nodes
1376 if we are running in GCSE mode */
1377 if (idx < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
1380 y = get_irn_node(succ);
1381 assert(get_irn_n(succ, idx) == x->node);
1383 /* ignore block edges touching followers */
1384 if (idx == -1 && y->is_follower)
1387 if (is_constant_type(y->type)) {
1388 ir_opcode code = get_irn_opcode(succ);
1389 if (code == iro_Sub || code == iro_Cmp)
1390 add_to_cprop(y, env);
1393 /* Partitions of constants should not be split simply because their Nodes have unequal
1394 functions or incongruent inputs. */
1395 if (type_is_neither_top_nor_const(y->type) &&
1396 (! is_Phi(y->node) || is_live_input(y->node, idx))) {
1397 add_to_touched(y, env);
1401 } /* collect_touched */
1404 * Collect commutative nodes to the touched list.
1406 * @param list the list which contains the nodes that must be evaluated
1407 * @param env the environment
1409 static void collect_commutative_touched(list_head *list, environment_t *env) {
1412 list_for_each_entry(node_t, x, list, node_list) {
1415 num_edges = get_irn_n_outs(x->node);
1417 x->next_edge = x->n_followers + 1;
1419 /* for all edges in x.L.def_use_{idx} */
1420 while (x->next_edge <= num_edges) {
1421 const ir_def_use_edge *edge = &x->node->out[x->next_edge];
1424 /* check if we have necessary edges */
1434 /* only commutative nodes */
1435 if (!is_op_commutative(get_irn_op(succ)))
1438 y = get_irn_node(succ);
1439 if (is_constant_type(y->type)) {
1440 ir_opcode code = get_irn_opcode(succ);
1441 if (code == iro_Eor)
1442 add_to_cprop(y, env);
1445 /* Partitions of constants should not be split simply because their Nodes have unequal
1446 functions or incongruent inputs. */
1447 if (type_is_neither_top_nor_const(y->type)) {
1448 add_to_touched(y, env);
1452 } /* collect_commutative_touched */
1455 * Split the partitions if caused by the first entry on the worklist.
1457 * @param env the environment
1459 static void cause_splits(environment_t *env) {
1460 partition_t *X, *Z, *N;
1463 /* remove the first partition from the worklist */
1465 env->worklist = X->wl_next;
1468 dump_partition("Cause_split: ", X);
1470 if (env->commutative) {
1471 /* handle commutative nodes first */
1473 /* empty the touched set: already done, just clear the list */
1474 env->touched = NULL;
1476 collect_commutative_touched(&X->Leader, env);
1477 collect_commutative_touched(&X->Follower, env);
1479 for (Z = env->touched; Z != NULL; Z = N) {
1481 node_t *touched = Z->touched;
1482 unsigned n_touched = Z->n_touched;
1484 assert(Z->touched != NULL);
1486 /* beware, split might change Z */
1487 N = Z->touched_next;
1489 /* remove it from the touched set */
1492 /* Empty local Z.touched. */
1493 for (e = touched; e != NULL; e = e->next) {
1494 assert(e->is_follower == 0);
1500 if (0 < n_touched && n_touched < Z->n_leader) {
1501 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1502 split(&Z, touched, env);
1504 assert(n_touched <= Z->n_leader);
1508 /* combine temporary leader and follower list */
1509 for (idx = -1; idx <= X->max_user_inputs; ++idx) {
1510 /* empty the touched set: already done, just clear the list */
1511 env->touched = NULL;
1513 collect_touched(&X->Leader, idx, env);
1514 collect_touched(&X->Follower, idx, env);
1516 for (Z = env->touched; Z != NULL; Z = N) {
1518 node_t *touched = Z->touched;
1519 unsigned n_touched = Z->n_touched;
1521 assert(Z->touched != NULL);
1523 /* beware, split might change Z */
1524 N = Z->touched_next;
1526 /* remove it from the touched set */
1529 /* Empty local Z.touched. */
1530 for (e = touched; e != NULL; e = e->next) {
1531 assert(e->is_follower == 0);
1537 if (0 < n_touched && n_touched < Z->n_leader) {
1538 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1539 split(&Z, touched, env);
1541 assert(n_touched <= Z->n_leader);
1544 } /* cause_splits */
1547 * Implements split_by_what(): Split a partition by characteristics given
1548 * by the what function.
1550 * @param X the partition to split
1551 * @param What a function returning an Id for every node of the partition X
1552 * @param P a list to store the result partitions
1553 * @param env the environment
1557 static partition_t *split_by_what(partition_t *X, what_func What,
1558 partition_t **P, environment_t *env) {
1561 listmap_entry_t *iter;
1564 /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
1566 list_for_each_entry(node_t, x, &X->Leader, node_list) {
1567 void *id = What(x, env);
1568 listmap_entry_t *entry;
1571 /* input not allowed, ignore */
1574 /* Add x to map[What(x)]. */
1575 entry = listmap_find(&map, id);
1576 x->next = entry->list;
1579 /* Let P be a set of Partitions. */
1581 /* for all sets S except one in the range of map do */
1582 for (iter = map.values; iter != NULL; iter = iter->next) {
1583 if (iter->next == NULL) {
1584 /* this is the last entry, ignore */
1589 /* Add SPLIT( X, S ) to P. */
1590 DB((dbg, LEVEL_2, "Split part%d by WHAT = %s\n", X->nr, what_reason));
1591 R = split(&X, S, env);
1601 } /* split_by_what */
1603 /** lambda n.(n.type) */
1604 static void *lambda_type(const node_t *node, environment_t *env) {
1606 return node->type.tv;
1609 /** lambda n.(n.opcode) */
1610 static void *lambda_opcode(const node_t *node, environment_t *env) {
1611 opcode_key_t key, *entry;
1612 ir_node *irn = node->node;
1614 key.code = get_irn_opcode(irn);
1615 key.mode = get_irn_mode(irn);
1616 key.arity = get_irn_arity(irn);
1620 switch (get_irn_opcode(irn)) {
1622 key.u.proj = get_Proj_proj(irn);
1625 key.u.ent = get_Sel_entity(irn);
1631 entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
1633 } /* lambda_opcode */
1635 /** lambda n.(n[i].partition) */
1636 static void *lambda_partition(const node_t *node, environment_t *env) {
1637 ir_node *skipped = skip_Proj(node->node);
1640 int i = env->lambda_input;
1642 if (i >= get_irn_arity(node->node)) {
1644 * We are outside the allowed range: This can happen even
1645 * if we have split by opcode first: doing so might move Followers
1646 * to Leaders and those will have a different opcode!
1647 * Note that in this case the partition is on the cprop list and will be
1653 /* ignore the "control input" for non-pinned nodes
1654 if we are running in GCSE mode */
1655 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1658 pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
1659 p = get_irn_node(pred);
1662 } /* lambda_partition */
1664 /** lambda n.(n[i].partition) for commutative nodes */
1665 static void *lambda_commutative_partition(const node_t *node, environment_t *env) {
1666 ir_node *irn = node->node;
1667 ir_node *skipped = skip_Proj(irn);
1668 ir_node *pred, *left, *right;
1670 partition_t *pl, *pr;
1671 int i = env->lambda_input;
1673 if (i >= get_irn_arity(node->node)) {
1675 * We are outside the allowed range: This can happen even
1676 * if we have split by opcode first: doing so might move Followers
1677 * to Leaders and those will have a different opcode!
1678 * Note that in this case the partition is on the cprop list and will be
1684 /* ignore the "control input" for non-pinned nodes
1685 if we are running in GCSE mode */
1686 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1690 pred = get_irn_n(skipped, i);
1691 p = get_irn_node(pred);
1695 if (is_op_commutative(get_irn_op(irn))) {
1696 /* normalize partition order by returning the "smaller" on input 0,
1697 the "bigger" on input 1. */
1698 left = get_binop_left(irn);
1699 pl = get_irn_node(left)->part;
1700 right = get_binop_right(irn);
1701 pr = get_irn_node(right)->part;
1704 return pl < pr ? pl : pr;
1706 return pl > pr ? pl : pr;
1708 /* a not split out Follower */
1709 pred = get_irn_n(irn, i);
1710 p = get_irn_node(pred);
1714 } /* lambda_commutative_partition */
1717 * Returns true if a type is a constant (and NOT Top
1720 static int is_con(const lattice_elem_t type) {
1721 /* be conservative */
1722 if (is_tarval(type.tv))
1723 return tarval_is_constant(type.tv);
1724 return is_entity(type.sym.entity_p);
1728 * Implements split_by().
1730 * @param X the partition to split
1731 * @param env the environment
1733 static void split_by(partition_t *X, environment_t *env) {
1734 partition_t *I, *P = NULL;
1737 dump_partition("split_by", X);
1739 if (X->n_leader == 1) {
1740 /* we have only one leader, no need to split, just check it's type */
1741 node_t *x = get_first_node(X);
1742 X->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1746 DEBUG_ONLY(what_reason = "lambda n.(n.type)";)
1747 P = split_by_what(X, lambda_type, &P, env);
1750 /* adjust the type tags, we have split partitions by type */
1751 for (I = P; I != NULL; I = I->split_next) {
1752 node_t *x = get_first_node(I);
1753 I->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1760 if (Y->n_leader > 1) {
1761 /* we do not want split the TOP or constant partitions */
1762 if (! Y->type_is_T_or_C) {
1763 partition_t *Q = NULL;
1765 DEBUG_ONLY(what_reason = "lambda n.(n.opcode)";)
1766 Q = split_by_what(Y, lambda_opcode, &Q, env);
1773 if (Z->n_leader > 1) {
1774 const node_t *first = get_first_node(Z);
1775 int arity = get_irn_arity(first->node);
1777 what_func what = lambda_partition;
1778 DEBUG_ONLY(char buf[64];)
1780 if (env->commutative && is_op_commutative(get_irn_op(first->node)))
1781 what = lambda_commutative_partition;
1784 * BEWARE: during splitting by input 2 for instance we might
1785 * create new partitions which are different by input 1, so collect
1786 * them and split further.
1788 Z->split_next = NULL;
1791 for (input = arity - 1; input >= -1; --input) {
1793 partition_t *Z_prime = R;
1796 if (Z_prime->n_leader > 1) {
1797 env->lambda_input = input;
1798 DEBUG_ONLY(snprintf(buf, sizeof(buf), "lambda n.(n[%d].partition)", input);)
1799 DEBUG_ONLY(what_reason = buf;)
1800 S = split_by_what(Z_prime, what, &S, env);
1803 Z_prime->split_next = S;
1806 } while (R != NULL);
1811 } while (Q != NULL);
1814 } while (P != NULL);
1818 * (Re-)compute the type for a given node.
1820 * @param node the node
1822 static void default_compute(node_t *node) {
1824 ir_node *irn = node->node;
1825 node_t *block = get_irn_node(get_nodes_block(irn));
1827 if (block->type.tv == tarval_unreachable) {
1828 node->type.tv = tarval_top;
1832 /* if any of the data inputs have type top, the result is type top */
1833 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
1834 ir_node *pred = get_irn_n(irn, i);
1835 node_t *p = get_irn_node(pred);
1837 if (p->type.tv == tarval_top) {
1838 node->type.tv = tarval_top;
1843 if (get_irn_mode(node->node) == mode_X)
1844 node->type.tv = tarval_reachable;
1846 node->type.tv = computed_value(irn);
1847 } /* default_compute */
1850 * (Re-)compute the type for a Block node.
1852 * @param node the node
1854 static void compute_Block(node_t *node) {
1856 ir_node *block = node->node;
1858 if (block == get_irg_start_block(current_ir_graph)) {
1859 /* start block is always reachable */
1860 node->type.tv = tarval_reachable;
1864 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1865 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1867 if (pred->type.tv == tarval_reachable) {
1868 /* A block is reachable, if at least of predecessor is reachable. */
1869 node->type.tv = tarval_reachable;
1873 node->type.tv = tarval_top;
1874 } /* compute_Block */
1877 * (Re-)compute the type for a Bad node.
1879 * @param node the node
1881 static void compute_Bad(node_t *node) {
1882 /* Bad nodes ALWAYS compute Top */
1883 node->type.tv = tarval_top;
1887 * (Re-)compute the type for an Unknown node.
1889 * @param node the node
1891 static void compute_Unknown(node_t *node) {
1892 /* While Unknown nodes should compute Top this is dangerous:
1893 * a Top input to a Cond would lead to BOTH control flows unreachable.
1894 * While this is correct in the given semantics, it would destroy the Firm
1897 * It would be safe to compute Top IF it can be assured, that only Cmp
1898 * nodes are inputs to Conds. We check that first.
1899 * This is the way Frontends typically build Firm, but some optimizations
1900 * (cond_eval for instance) might replace them by Phib's...
1902 node->type.tv = tarval_UNKNOWN;
1903 } /* compute_Unknown */
1906 * (Re-)compute the type for a Jmp node.
1908 * @param node the node
1910 static void compute_Jmp(node_t *node) {
1911 node_t *block = get_irn_node(get_nodes_block(node->node));
1913 node->type = block->type;
1917 * (Re-)compute the type for the Return node.
1919 * @param node the node
1921 static void compute_Return(node_t *node) {
1922 /* The Return node is NOT dead if it is in a reachable block.
1923 * This is already checked in compute(). so we can return
1924 * Reachable here. */
1925 node->type.tv = tarval_reachable;
1926 } /* compute_Return */
1929 * (Re-)compute the type for the End node.
1931 * @param node the node
1933 static void compute_End(node_t *node) {
1934 /* the End node is NOT dead of course */
1935 node->type.tv = tarval_reachable;
1939 * (Re-)compute the type for a SymConst node.
1941 * @param node the node
1943 static void compute_SymConst(node_t *node) {
1944 ir_node *irn = node->node;
1945 node_t *block = get_irn_node(get_nodes_block(irn));
1947 if (block->type.tv == tarval_unreachable) {
1948 node->type.tv = tarval_top;
1951 switch (get_SymConst_kind(irn)) {
1952 case symconst_addr_ent:
1953 /* case symconst_addr_name: cannot handle this yet */
1954 node->type.sym = get_SymConst_symbol(irn);
1957 node->type.tv = computed_value(irn);
1959 } /* compute_SymConst */
1962 * (Re-)compute the type for a Phi node.
1964 * @param node the node
1966 static void compute_Phi(node_t *node) {
1968 ir_node *phi = node->node;
1969 lattice_elem_t type;
1971 /* if a Phi is in a unreachable block, its type is TOP */
1972 node_t *block = get_irn_node(get_nodes_block(phi));
1974 if (block->type.tv == tarval_unreachable) {
1975 node->type.tv = tarval_top;
1979 /* Phi implements the Meet operation */
1980 type.tv = tarval_top;
1981 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
1982 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
1983 node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
1985 if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
1986 /* ignore TOP inputs: We must check here for unreachable blocks,
1987 because Firm constants live in the Start Block are NEVER Top.
1988 Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
1989 comes from a unreachable input. */
1992 if (pred->type.tv == tarval_bottom) {
1993 node->type.tv = tarval_bottom;
1995 } else if (type.tv == tarval_top) {
1996 /* first constant found */
1998 } else if (type.tv != pred->type.tv) {
1999 /* different constants or tarval_bottom */
2000 node->type.tv = tarval_bottom;
2003 /* else nothing, constants are the same */
2009 * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
2011 * @param node the node
2013 static void compute_Add(node_t *node) {
2014 ir_node *sub = node->node;
2015 node_t *l = get_irn_node(get_Add_left(sub));
2016 node_t *r = get_irn_node(get_Add_right(sub));
2017 lattice_elem_t a = l->type;
2018 lattice_elem_t b = r->type;
2021 if (a.tv == tarval_top || b.tv == tarval_top) {
2022 node->type.tv = tarval_top;
2023 } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
2024 node->type.tv = tarval_bottom;
2026 /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
2027 must call tarval_add() first to handle this case! */
2028 if (is_tarval(a.tv)) {
2029 if (is_tarval(b.tv)) {
2030 node->type.tv = tarval_add(a.tv, b.tv);
2033 mode = get_tarval_mode(a.tv);
2034 if (a.tv == get_mode_null(mode)) {
2038 } else if (is_tarval(b.tv)) {
2039 mode = get_tarval_mode(b.tv);
2040 if (b.tv == get_mode_null(mode)) {
2045 node->type.tv = tarval_bottom;
2050 * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
2052 * @param node the node
2054 static void compute_Sub(node_t *node) {
2055 ir_node *sub = node->node;
2056 node_t *l = get_irn_node(get_Sub_left(sub));
2057 node_t *r = get_irn_node(get_Sub_right(sub));
2058 lattice_elem_t a = l->type;
2059 lattice_elem_t b = r->type;
2062 if (a.tv == tarval_top || b.tv == tarval_top) {
2063 node->type.tv = tarval_top;
2064 } else if (is_con(a) && is_con(b)) {
2065 if (is_tarval(a.tv) && is_tarval(b.tv)) {
2066 node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
2067 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
2069 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
2072 node->type.tv = tarval_bottom;
2074 node->by_all_const = 1;
2075 } else if (r->part == l->part &&
2076 (!mode_is_float(get_irn_mode(l->node)))) {
2078 * BEWARE: a - a is NOT always 0 for floating Point values, as
2079 * NaN op NaN = NaN, so we must check this here.
2081 ir_mode *mode = get_irn_mode(sub);
2082 tv = get_mode_null(mode);
2084 /* if the node was ONCE evaluated by all constants, but now
2085 this breaks AND we get from the argument partitions a different
2086 result, switch to bottom.
2087 This happens because initially all nodes are in the same partition ... */
2088 if (node->by_all_const && node->type.tv != tv)
2092 node->type.tv = tarval_bottom;
2097 * (Re-)compute the type for an Eor. Special case: both nodes are congruent.
2099 * @param node the node
2101 static void compute_Eor(node_t *node) {
2102 ir_node *eor = node->node;
2103 node_t *l = get_irn_node(get_Eor_left(eor));
2104 node_t *r = get_irn_node(get_Eor_right(eor));
2105 lattice_elem_t a = l->type;
2106 lattice_elem_t b = r->type;
2109 if (a.tv == tarval_top || b.tv == tarval_top) {
2110 node->type.tv = tarval_top;
2111 } else if (is_con(a) && is_con(b)) {
2112 if (is_tarval(a.tv) && is_tarval(b.tv)) {
2113 node->type.tv = tarval_eor(a.tv, b.tv);
2114 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
2116 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
2119 node->type.tv = tarval_bottom;
2121 node->by_all_const = 1;
2122 } else if (r->part == l->part) {
2123 ir_mode *mode = get_irn_mode(eor);
2124 tv = get_mode_null(mode);
2126 /* if the node was ONCE evaluated by all constants, but now
2127 this breaks AND we get from the argument partitions a different
2128 result, switch to bottom.
2129 This happens because initially all nodes are in the same partition ... */
2130 if (node->by_all_const && node->type.tv != tv)
2134 node->type.tv = tarval_bottom;
2139 * (Re-)compute the type for Cmp.
2141 * @param node the node
2143 static void compute_Cmp(node_t *node) {
2144 ir_node *cmp = node->node;
2145 node_t *l = get_irn_node(get_Cmp_left(cmp));
2146 node_t *r = get_irn_node(get_Cmp_right(cmp));
2147 lattice_elem_t a = l->type;
2148 lattice_elem_t b = r->type;
2150 if (a.tv == tarval_top || b.tv == tarval_top) {
2151 node->type.tv = tarval_top;
2152 } else if (r->part == l->part) {
2153 /* both nodes congruent, we can probably do something */
2154 node->type.tv = tarval_b_true;
2155 } else if (is_con(a) && is_con(b)) {
2156 /* both nodes are constants, we can probably do something */
2157 node->type.tv = tarval_b_true;
2159 node->type.tv = tarval_bottom;
2164 * (Re-)compute the type for a Proj(Cmp).
2166 * @param node the node
2167 * @param cond the predecessor Cmp node
2169 static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
2170 ir_node *proj = node->node;
2171 node_t *l = get_irn_node(get_Cmp_left(cmp));
2172 node_t *r = get_irn_node(get_Cmp_right(cmp));
2173 lattice_elem_t a = l->type;
2174 lattice_elem_t b = r->type;
2175 pn_Cmp pnc = get_Proj_proj(proj);
2178 if (a.tv == tarval_top || b.tv == tarval_top) {
2179 node->type.tv = tarval_undefined;
2180 } else if (is_con(a) && is_con(b)) {
2181 default_compute(node);
2182 node->by_all_const = 1;
2183 } else if (r->part == l->part &&
2184 (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
2186 * BEWARE: a == a is NOT always True for floating Point values, as
2187 * NaN != NaN is defined, so we must check this here.
2189 tv = pnc & pn_Cmp_Eq ? tarval_b_true: tarval_b_false;
2191 /* if the node was ONCE evaluated by all constants, but now
2192 this breaks AND we get from the argument partitions a different
2193 result, switch to bottom.
2194 This happens because initially all nodes are in the same partition ... */
2195 if (node->by_all_const && node->type.tv != tv)
2199 node->type.tv = tarval_bottom;
2201 } /* compute_Proj_Cmp */
2204 * (Re-)compute the type for a Proj(Cond).
2206 * @param node the node
2207 * @param cond the predecessor Cond node
2209 static void compute_Proj_Cond(node_t *node, ir_node *cond) {
2210 ir_node *proj = node->node;
2211 long pnc = get_Proj_proj(proj);
2212 ir_node *sel = get_Cond_selector(cond);
2213 node_t *selector = get_irn_node(sel);
2215 if (get_irn_mode(sel) == mode_b) {
2217 if (pnc == pn_Cond_true) {
2218 if (selector->type.tv == tarval_b_false) {
2219 node->type.tv = tarval_unreachable;
2220 } else if (selector->type.tv == tarval_b_true) {
2221 node->type.tv = tarval_reachable;
2222 } else if (selector->type.tv == tarval_bottom) {
2223 node->type.tv = tarval_reachable;
2225 assert(selector->type.tv == tarval_top);
2226 /* any condition based on Top is "!=" */
2227 node->type.tv = tarval_unreachable;
2230 assert(pnc == pn_Cond_false);
2232 if (selector->type.tv == tarval_b_false) {
2233 node->type.tv = tarval_reachable;
2234 } else if (selector->type.tv == tarval_b_true) {
2235 node->type.tv = tarval_unreachable;
2236 } else if (selector->type.tv == tarval_bottom) {
2237 node->type.tv = tarval_reachable;
2239 assert(selector->type.tv == tarval_top);
2240 /* any condition based on Top is "!=" */
2241 node->type.tv = tarval_reachable;
2246 if (selector->type.tv == tarval_bottom) {
2247 node->type.tv = tarval_reachable;
2248 } else if (selector->type.tv == tarval_top) {
2249 if (pnc == get_Cond_defaultProj(cond)) {
2250 /* a switch based of Top is always "default" */
2251 node->type.tv = tarval_reachable;
2253 node->type.tv = tarval_unreachable;
2255 long value = get_tarval_long(selector->type.tv);
2256 if (pnc == get_Cond_defaultProj(cond)) {
2257 /* default switch, have to check ALL other cases */
2260 for (i = get_irn_n_outs(cond) - 1; i >= 0; --i) {
2261 ir_node *succ = get_irn_out(cond, i);
2265 if (value == get_Proj_proj(succ)) {
2266 /* we found a match, will NOT take the default case */
2267 node->type.tv = tarval_unreachable;
2271 /* all cases checked, no match, will take default case */
2272 node->type.tv = tarval_reachable;
2275 node->type.tv = value == pnc ? tarval_reachable : tarval_unreachable;
2279 } /* compute_Proj_Cond */
2282 * (Re-)compute the type for a Proj-Node.
2284 * @param node the node
2286 static void compute_Proj(node_t *node) {
2287 ir_node *proj = node->node;
2288 ir_mode *mode = get_irn_mode(proj);
2289 node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
2290 ir_node *pred = get_Proj_pred(proj);
2292 if (block->type.tv == tarval_unreachable) {
2293 /* a Proj in a unreachable Block stay Top */
2294 node->type.tv = tarval_top;
2297 if (get_irn_node(pred)->type.tv == tarval_top && !is_Cond(pred)) {
2298 /* if the predecessor is Top, its Proj follow */
2299 node->type.tv = tarval_top;
2303 if (mode == mode_M) {
2304 /* mode M is always bottom */
2305 node->type.tv = tarval_bottom;
2308 if (mode != mode_X) {
2310 compute_Proj_Cmp(node, pred);
2312 default_compute(node);
2315 /* handle mode_X nodes */
2317 switch (get_irn_opcode(pred)) {
2319 /* the Proj_X from the Start is always reachable.
2320 However this is already handled at the top. */
2321 node->type.tv = tarval_reachable;
2324 compute_Proj_Cond(node, pred);
2327 default_compute(node);
2329 } /* compute_Proj */
2332 * (Re-)compute the type for a Confirm.
2334 * @param node the node
2336 static void compute_Confirm(node_t *node) {
2337 ir_node *confirm = node->node;
2338 node_t *pred = get_irn_node(get_Confirm_value(confirm));
2340 if (get_Confirm_cmp(confirm) == pn_Cmp_Eq) {
2341 node_t *bound = get_irn_node(get_Confirm_bound(confirm));
2343 if (is_con(bound->type)) {
2344 /* is equal to a constant */
2345 node->type = bound->type;
2349 /* a Confirm is a copy OR a Const */
2350 node->type = pred->type;
2351 } /* compute_Confirm */
2354 * (Re-)compute the type for a Max.
2356 * @param node the node
2358 static void compute_Max(node_t *node) {
2359 ir_node *op = node->node;
2360 node_t *l = get_irn_node(get_binop_left(op));
2361 node_t *r = get_irn_node(get_binop_right(op));
2362 lattice_elem_t a = l->type;
2363 lattice_elem_t b = r->type;
2365 if (a.tv == tarval_top || b.tv == tarval_top) {
2366 node->type.tv = tarval_top;
2367 } else if (is_con(a) && is_con(b)) {
2368 /* both nodes are constants, we can probably do something */
2370 /* this case handles SymConsts as well */
2373 ir_mode *mode = get_irn_mode(op);
2374 tarval *tv_min = get_mode_min(mode);
2378 else if (b.tv == tv_min)
2380 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2381 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2382 node->type.tv = a.tv;
2384 node->type.tv = b.tv;
2386 node->type.tv = tarval_bad;
2389 } else if (r->part == l->part) {
2390 /* both nodes congruent, we can probably do something */
2393 node->type.tv = tarval_bottom;
2398 * (Re-)compute the type for a Min.
2400 * @param node the node
2402 static void compute_Min(node_t *node) {
2403 ir_node *op = node->node;
2404 node_t *l = get_irn_node(get_binop_left(op));
2405 node_t *r = get_irn_node(get_binop_right(op));
2406 lattice_elem_t a = l->type;
2407 lattice_elem_t b = r->type;
2409 if (a.tv == tarval_top || b.tv == tarval_top) {
2410 node->type.tv = tarval_top;
2411 } else if (is_con(a) && is_con(b)) {
2412 /* both nodes are constants, we can probably do something */
2414 /* this case handles SymConsts as well */
2417 ir_mode *mode = get_irn_mode(op);
2418 tarval *tv_max = get_mode_max(mode);
2422 else if (b.tv == tv_max)
2424 else if (is_tarval(a.tv) && is_tarval(b.tv)) {
2425 if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
2426 node->type.tv = a.tv;
2428 node->type.tv = b.tv;
2430 node->type.tv = tarval_bad;
2433 } else if (r->part == l->part) {
2434 /* both nodes congruent, we can probably do something */
2437 node->type.tv = tarval_bottom;
2442 * (Re-)compute the type for a given node.
2444 * @param node the node
2446 static void compute(node_t *node) {
2447 ir_node *irn = node->node;
2450 if (is_no_Block(irn)) {
2451 /* for pinned nodes, check its control input */
2452 if (get_irn_pinned(skip_Proj(irn)) == op_pin_state_pinned) {
2453 node_t *block = get_irn_node(get_nodes_block(irn));
2455 if (block->type.tv == tarval_unreachable) {
2456 node->type.tv = tarval_top;
2462 func = (compute_func)node->node->op->ops.generic;
2468 * Identity functions: Note that one might thing that identity() is just a
2469 * synonym for equivalent_node(). While this is true, we cannot use it for the algorithm
2470 * here, because it expects that the identity node is one of the inputs, which is NOT
2471 * always true for equivalent_node() which can handle (and does sometimes) DAGs.
2472 * So, we have our own implementation, which copies some parts of equivalent_node()
2476 * Calculates the Identity for Phi nodes
2478 static node_t *identity_Phi(node_t *node) {
2479 ir_node *phi = node->node;
2480 ir_node *block = get_nodes_block(phi);
2481 node_t *n_part = NULL;
2484 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
2485 node_t *pred_X = get_irn_node(get_Block_cfgpred(block, i));
2487 if (pred_X->type.tv == tarval_reachable) {
2488 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
2492 else if (n_part->part != pred->part) {
2493 /* incongruent inputs, not a follower */
2498 /* if n_part is NULL here, all inputs path are dead, the Phi computes
2499 * tarval_top, is in the TOP partition and should NOT being split! */
2500 assert(n_part != NULL);
2502 } /* identity_Phi */
2505 * Calculates the Identity for commutative 0 neutral nodes.
2507 static node_t *identity_comm_zero_binop(node_t *node) {
2508 ir_node *op = node->node;
2509 node_t *a = get_irn_node(get_binop_left(op));
2510 node_t *b = get_irn_node(get_binop_right(op));
2511 ir_mode *mode = get_irn_mode(op);
2514 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2515 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2518 /* node: no input should be tarval_top, else the binop would be also
2519 * Top and not being split. */
2520 zero = get_mode_null(mode);
2521 if (a->type.tv == zero)
2523 if (b->type.tv == zero)
2526 } /* identity_comm_zero_binop */
2529 * Calculates the Identity for Shift nodes.
2531 static node_t *identity_shift(node_t *node) {
2532 ir_node *op = node->node;
2533 node_t *b = get_irn_node(get_binop_right(op));
2534 ir_mode *mode = get_irn_mode(b->node);
2537 /* node: no input should be tarval_top, else the binop would be also
2538 * Top and not being split. */
2539 zero = get_mode_null(mode);
2540 if (b->type.tv == zero)
2541 return get_irn_node(get_binop_left(op));
2543 } /* identity_shift */
2546 * Calculates the Identity for Mul nodes.
2548 static node_t *identity_Mul(node_t *node) {
2549 ir_node *op = node->node;
2550 node_t *a = get_irn_node(get_Mul_left(op));
2551 node_t *b = get_irn_node(get_Mul_right(op));
2552 ir_mode *mode = get_irn_mode(op);
2555 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2556 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2559 /* node: no input should be tarval_top, else the binop would be also
2560 * Top and not being split. */
2561 one = get_mode_one(mode);
2562 if (a->type.tv == one)
2564 if (b->type.tv == one)
2567 } /* identity_Mul */
2570 * Calculates the Identity for Sub nodes.
2572 static node_t *identity_Sub(node_t *node) {
2573 ir_node *sub = node->node;
2574 node_t *b = get_irn_node(get_Sub_right(sub));
2575 ir_mode *mode = get_irn_mode(sub);
2577 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2578 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2581 /* node: no input should be tarval_top, else the binop would be also
2582 * Top and not being split. */
2583 if (b->type.tv == get_mode_null(mode))
2584 return get_irn_node(get_Sub_left(sub));
2586 } /* identity_Sub */
2589 * Calculates the Identity for And nodes.
2591 static node_t *identity_And(node_t *node) {
2592 ir_node *and = node->node;
2593 node_t *a = get_irn_node(get_And_left(and));
2594 node_t *b = get_irn_node(get_And_right(and));
2595 tarval *neutral = get_mode_all_one(get_irn_mode(and));
2597 /* node: no input should be tarval_top, else the And would be also
2598 * Top and not being split. */
2599 if (a->type.tv == neutral)
2601 if (b->type.tv == neutral)
2604 } /* identity_And */
2607 * Calculates the Identity for Confirm nodes.
2609 static node_t *identity_Confirm(node_t *node) {
2610 ir_node *confirm = node->node;
2612 /* a Confirm is always a Copy */
2613 return get_irn_node(get_Confirm_value(confirm));
2614 } /* identity_Confirm */
2617 * Calculates the Identity for Mux nodes.
2619 static node_t *identity_Mux(node_t *node) {
2620 ir_node *mux = node->node;
2621 node_t *t = get_irn_node(get_Mux_true(mux));
2622 node_t *f = get_irn_node(get_Mux_false(mux));
2625 if (t->part == f->part)
2628 /* for now, the 1-input identity is not supported */
2630 sel = get_irn_node(get_Mux_sel(mux));
2632 /* Mux sel input is mode_b, so it is always a tarval */
2633 if (sel->type.tv == tarval_b_true)
2635 if (sel->type.tv == tarval_b_false)
2639 } /* identity_Mux */
2642 * Calculates the Identity for Min nodes.
2644 static node_t *identity_Min(node_t *node) {
2645 ir_node *op = node->node;
2646 node_t *a = get_irn_node(get_binop_left(op));
2647 node_t *b = get_irn_node(get_binop_right(op));
2648 ir_mode *mode = get_irn_mode(op);
2651 if (a->part == b->part) {
2652 /* leader of multiple predecessors */
2656 /* works even with NaN */
2657 tv_max = get_mode_max(mode);
2658 if (a->type.tv == tv_max)
2660 if (b->type.tv == tv_max)
2663 } /* identity_Min */
2666 * Calculates the Identity for Max nodes.
2668 static node_t *identity_Max(node_t *node) {
2669 ir_node *op = node->node;
2670 node_t *a = get_irn_node(get_binop_left(op));
2671 node_t *b = get_irn_node(get_binop_right(op));
2672 ir_mode *mode = get_irn_mode(op);
2675 if (a->part == b->part) {
2676 /* leader of multiple predecessors */
2680 /* works even with NaN */
2681 tv_min = get_mode_min(mode);
2682 if (a->type.tv == tv_min)
2684 if (b->type.tv == tv_min)
2687 } /* identity_Max */
2690 * Calculates the Identity for nodes.
2692 static node_t *identity(node_t *node) {
2693 ir_node *irn = node->node;
2695 switch (get_irn_opcode(irn)) {
2697 return identity_Phi(node);
2699 return identity_Mul(node);
2703 return identity_comm_zero_binop(node);
2708 return identity_shift(node);
2710 return identity_And(node);
2712 return identity_Sub(node);
2714 return identity_Confirm(node);
2716 return identity_Mux(node);
2718 return identity_Min(node);
2720 return identity_Max(node);
2727 * Node follower is a (new) follower of leader, segregate Leader
2730 static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader) {
2731 ir_node *l = leader->node;
2732 int j, i, n = get_irn_n_outs(l);
2734 DB((dbg, LEVEL_2, "%+F is a follower of %+F\n", follower, leader->node));
2735 /* The leader edges must remain sorted, but follower edges can
2737 for (i = leader->n_followers + 1; i <= n; ++i) {
2738 if (l->out[i].use == follower) {
2739 ir_def_use_edge t = l->out[i];
2741 for (j = i - 1; j >= leader->n_followers + 1; --j)
2742 l->out[j + 1] = l->out[j];
2743 ++leader->n_followers;
2744 l->out[leader->n_followers] = t;
2748 } /* segregate_def_use_chain_1 */
2751 * Node follower is a (new) follower segregate its Leader
2754 * @param follower the follower IR node
2756 static void segregate_def_use_chain(const ir_node *follower) {
2759 for (i = get_irn_arity(follower) - 1; i >= 0; --i) {
2760 node_t *pred = get_irn_node(get_irn_n(follower, i));
2762 segregate_def_use_chain_1(follower, pred);
2764 } /* segregate_def_use_chain */
2767 * Propagate constant evaluation.
2769 * @param env the environment
2771 static void propagate(environment_t *env) {
2774 lattice_elem_t old_type;
2776 unsigned n_fallen, old_type_was_T_or_C;
2779 while (env->cprop != NULL) {
2780 void *oldopcode = NULL;
2782 /* remove the first partition X from cprop */
2785 env->cprop = X->cprop_next;
2787 old_type_was_T_or_C = X->type_is_T_or_C;
2789 DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
2792 while (! list_empty(&X->cprop)) {
2793 /* remove the first Node x from X.cprop */
2794 x = list_entry(X->cprop.next, node_t, cprop_list);
2795 //assert(x->part == X);
2796 list_del(&x->cprop_list);
2799 if (x->is_follower && identity(x) == x) {
2800 /* check the opcode first */
2801 if (oldopcode == NULL) {
2802 oldopcode = lambda_opcode(get_first_node(X), env);
2804 if (oldopcode != lambda_opcode(x, env)) {
2805 if (x->on_fallen == 0) {
2806 /* different opcode -> x falls out of this partition */
2811 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2815 /* x will make the follower -> leader transition */
2816 follower_to_leader(x);
2819 /* compute a new type for x */
2821 DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
2823 if (x->type.tv != old_type.tv) {
2824 verify_type(old_type, x->type);
2825 DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
2827 if (x->on_fallen == 0) {
2828 /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
2829 not already on the list. */
2834 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2836 for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
2837 ir_node *succ = get_irn_out(x->node, i);
2838 node_t *y = get_irn_node(succ);
2840 /* Add y to y.partition.cprop. */
2841 add_to_cprop(y, env);
2846 if (n_fallen > 0 && n_fallen != X->n_leader) {
2847 DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
2848 Y = split(&X, fallen, env);
2850 * We have split out fallen node. The type of the result
2851 * partition is NOT set yet.
2853 Y->type_is_T_or_C = 0;
2857 /* remove the flags from the fallen list */
2858 for (x = fallen; x != NULL; x = x->next)
2861 if (old_type_was_T_or_C) {
2864 /* check if some nodes will make the leader -> follower transition */
2865 list_for_each_entry_safe(node_t, y, tmp, &Y->Leader, node_list) {
2866 if (y->type.tv != tarval_top && ! is_con(y->type)) {
2867 node_t *eq_node = identity(y);
2869 if (eq_node != y && eq_node->part == y->part) {
2870 DB((dbg, LEVEL_2, "Node %+F is a follower of %+F\n", y->node, eq_node->node));
2871 /* move to Follower */
2873 list_del(&y->node_list);
2874 list_add_tail(&y->node_list, &Y->Follower);
2877 segregate_def_use_chain(y->node);
2887 * Get the leader for a given node from its congruence class.
2889 * @param irn the node
2891 static ir_node *get_leader(node_t *node) {
2892 partition_t *part = node->part;
2894 if (part->n_leader > 1 || node->is_follower) {
2895 if (node->is_follower) {
2896 DB((dbg, LEVEL_2, "Replacing follower %+F\n", node->node));
2899 DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
2901 return get_first_node(part)->node;
2907 * Return non-zero if the control flow predecessor node pred
2908 * is the only reachable control flow exit of its block.
2910 * @param pred the control flow exit
2912 static int can_exchange(ir_node *pred) {
2915 else if (is_Jmp(pred))
2917 else if (get_irn_mode(pred) == mode_T) {
2920 /* if the predecessor block has more than one
2921 reachable outputs we cannot remove the block */
2923 for (i = get_irn_n_outs(pred) - 1; i >= 0; --i) {
2924 ir_node *proj = get_irn_out(pred, i);
2927 /* skip non-control flow Proj's */
2928 if (get_irn_mode(proj) != mode_X)
2931 node = get_irn_node(proj);
2932 if (node->type.tv == tarval_reachable) {
2940 } /* can_exchange */
2943 * Block Post-Walker, apply the analysis results on control flow by
2944 * shortening Phi's and Block inputs.
2946 static void apply_cf(ir_node *block, void *ctx) {
2947 environment_t *env = ctx;
2948 node_t *node = get_irn_node(block);
2950 ir_node **ins, **in_X;
2951 ir_node *phi, *next;
2953 n = get_Block_n_cfgpreds(block);
2955 if (node->type.tv == tarval_unreachable) {
2958 for (i = n - 1; i >= 0; --i) {
2959 ir_node *pred = get_Block_cfgpred(block, i);
2961 if (! is_Bad(pred)) {
2962 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
2964 if (pred_bl->flagged == 0) {
2965 pred_bl->flagged = 3;
2967 if (pred_bl->type.tv == tarval_reachable) {
2969 * We will remove an edge from block to its pred.
2970 * This might leave the pred block as an endless loop
2972 if (! is_backedge(block, i))
2973 keep_alive(pred_bl->node);
2979 /* the EndBlock is always reachable even if the analysis
2980 finds out the opposite :-) */
2981 if (block != get_irg_end_block(current_ir_graph)) {
2982 /* mark dead blocks */
2983 set_Block_dead(block);
2984 DB((dbg, LEVEL_1, "Removing dead %+F\n", block));
2986 /* the endblock is unreachable */
2987 set_irn_in(block, 0, NULL);
2993 /* only one predecessor combine */
2994 ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
2996 if (can_exchange(pred)) {
2997 ir_node *new_block = get_nodes_block(pred);
2998 DB((dbg, LEVEL_1, "Fuse %+F with %+F\n", block, new_block));
2999 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
3000 exchange(block, new_block);
3001 node->node = new_block;
3007 NEW_ARR_A(ir_node *, in_X, n);
3009 for (i = 0; i < n; ++i) {
3010 ir_node *pred = get_Block_cfgpred(block, i);
3011 node_t *node = get_irn_node(pred);
3013 if (node->type.tv == tarval_reachable) {
3016 DB((dbg, LEVEL_1, "Removing dead input %d from %+F (%+F)\n", i, block, pred));
3017 if (! is_Bad(pred)) {
3018 node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
3020 if (pred_bl->flagged == 0) {
3021 pred_bl->flagged = 3;
3023 if (pred_bl->type.tv == tarval_reachable) {
3025 * We will remove an edge from block to its pred.
3026 * This might leave the pred block as an endless loop
3028 if (! is_backedge(block, i))
3029 keep_alive(pred_bl->node);
3038 NEW_ARR_A(ir_node *, ins, n);
3039 for (phi = get_Block_phis(block); phi != NULL; phi = next) {
3040 node_t *node = get_irn_node(phi);
3042 next = get_Phi_next(phi);
3043 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
3044 /* this Phi is replaced by a constant */
3045 tarval *tv = node->type.tv;
3046 ir_node *c = new_r_Const(current_ir_graph, block, get_tarval_mode(tv), tv);
3048 set_irn_node(c, node);
3050 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
3051 DBG_OPT_COMBO(phi, c, FS_OPT_COMBO_CONST);
3056 for (i = 0; i < n; ++i) {
3057 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
3059 if (pred->type.tv == tarval_reachable) {
3060 ins[j++] = get_Phi_pred(phi, i);
3064 /* this Phi is replaced by a single predecessor */
3065 ir_node *s = ins[0];
3066 node_t *phi_node = get_irn_node(phi);
3069 DB((dbg, LEVEL_1, "%+F is replaced by %+F because of cf change\n", phi, s));
3070 DBG_OPT_COMBO(phi, s, FS_OPT_COMBO_FOLLOWER);
3075 set_irn_in(phi, j, ins);
3082 /* this Block has only one live predecessor */
3083 ir_node *pred = skip_Proj(in_X[0]);
3085 if (can_exchange(pred)) {
3086 ir_node *new_block = get_nodes_block(pred);
3087 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
3088 exchange(block, new_block);
3089 node->node = new_block;
3093 set_irn_in(block, k, in_X);
3099 * Exchange a node by its leader.
3100 * Beware: in rare cases the mode might be wrong here, for instance
3101 * AddP(x, NULL) is a follower of x, but with different mode.
3104 static void exchange_leader(ir_node *irn, ir_node *leader) {
3105 ir_mode *mode = get_irn_mode(irn);
3106 if (mode != get_irn_mode(leader)) {
3107 /* The conv is a no-op, so we are fre to place in
3108 * either in the block of the leader OR in irn's block.
3109 * Probably placing it into leaders block might reduce
3110 * the number of Conv due to CSE. */
3111 ir_node *block = get_nodes_block(leader);
3112 dbg_info *dbg = get_irn_dbg_info(irn);
3114 leader = new_rd_Conv(dbg, current_ir_graph, block, leader, mode);
3116 exchange(irn, leader);
3120 * Post-Walker, apply the analysis results;
3122 static void apply_result(ir_node *irn, void *ctx) {
3123 environment_t *env = ctx;
3124 node_t *node = get_irn_node(irn);
3126 if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
3127 /* blocks already handled, do not touch the End node */
3129 node_t *block = get_irn_node(get_nodes_block(irn));
3131 if (block->type.tv == tarval_unreachable) {
3132 ir_node *bad = get_irg_bad(current_ir_graph);
3134 /* here, bad might already have a node, but this can be safely ignored
3135 as long as bad has at least ONE valid node */
3136 set_irn_node(bad, node);
3138 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
3142 else if (node->type.tv == tarval_unreachable) {
3143 /* don't kick away Unknown */
3144 if (! is_Unknown(irn)) {
3145 ir_node *bad = get_irg_bad(current_ir_graph);
3147 /* see comment above */
3148 set_irn_node(bad, node);
3150 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
3155 else if (get_irn_mode(irn) == mode_X) {
3158 ir_node *cond = get_Proj_pred(irn);
3160 if (is_Cond(cond)) {
3161 node_t *sel = get_irn_node(get_Cond_selector(cond));
3163 if (is_tarval(sel->type.tv) && tarval_is_constant(sel->type.tv)) {
3164 /* Cond selector is a constant and the Proj is reachable, make a Jmp */
3165 ir_node *jmp = new_r_Jmp(current_ir_graph, block->node);
3166 set_irn_node(jmp, node);
3168 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
3169 DBG_OPT_COMBO(irn, jmp, FS_OPT_COMBO_CF);
3176 /* normal data node */
3177 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
3178 tarval *tv = node->type.tv;
3181 * Beware: never replace mode_T nodes by constants. Currently we must mark
3182 * mode_T nodes with constants, but do NOT replace them.
3184 if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
3185 /* can be replaced by a constant */
3186 ir_node *c = new_r_Const(current_ir_graph, block->node, get_tarval_mode(tv), tv);
3187 set_irn_node(c, node);
3189 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
3190 DBG_OPT_COMBO(irn, c, FS_OPT_COMBO_CONST);
3191 exchange_leader(irn, c);
3194 } else if (is_entity(node->type.sym.entity_p)) {
3195 if (! is_SymConst(irn)) {
3196 /* can be replaced by a SymConst */
3197 ir_node *symc = new_r_SymConst(current_ir_graph, block->node, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
3198 set_irn_node(symc, node);
3201 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
3202 DBG_OPT_COMBO(irn, symc, FS_OPT_COMBO_CONST);
3203 exchange_leader(irn, symc);
3206 } else if (is_Confirm(irn)) {
3207 /* Confirms are always follower, but do not kill them here */
3209 ir_node *leader = get_leader(node);
3211 if (leader != irn) {
3212 int non_strict_phi = 0;
3215 * Beware: Do not remove Phi(Unknown, ..., x, ..., Unknown)
3216 * as this might create non-strict programs.
3218 if (node->is_follower && is_Phi(irn) && !is_Unknown(leader)) {
3221 for (i = get_Phi_n_preds(irn) - 1; i >= 0; --i) {
3222 ir_node *pred = get_Phi_pred(irn, i);
3224 if (is_Unknown(pred)) {
3230 if (! non_strict_phi) {
3231 DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
3232 if (node->is_follower)
3233 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_FOLLOWER);
3235 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_CONGRUENT);
3236 exchange_leader(irn, leader);
3243 } /* apply_result */
3246 * Fix the keep-alives by deleting unreachable ones.
3248 static void apply_end(ir_node *end, environment_t *env) {
3249 int i, j, n = get_End_n_keepalives(end);
3253 NEW_ARR_A(ir_node *, in, n);
3255 /* fix the keep alive */
3256 for (i = j = 0; i < n; i++) {
3257 ir_node *ka = get_End_keepalive(end, i);
3258 node_t *node = get_irn_node(ka);
3261 node = get_irn_node(get_nodes_block(ka));
3263 if (node->type.tv != tarval_unreachable && !is_Bad(ka))
3267 set_End_keepalives(end, j, in);
3272 #define SET(code) op_##code->ops.generic = (op_func)compute_##code
3275 * sets the generic functions to compute.
3277 static void set_compute_functions(void) {
3280 /* set the default compute function */
3281 for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
3282 ir_op *op = get_irp_opcode(i);
3283 op->ops.generic = (op_func)default_compute;
3286 /* set specific functions */
3307 } /* set_compute_functions */
3309 void combo(ir_graph *irg) {
3311 ir_node *initial_bl;
3313 ir_graph *rem = current_ir_graph;
3315 current_ir_graph = irg;
3317 /* register a debug mask */
3318 FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
3320 DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
3322 obstack_init(&env.obst);
3323 env.worklist = NULL;
3327 #ifdef DEBUG_libfirm
3328 env.dbg_list = NULL;
3330 env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
3331 env.type2id_map = pmap_create();
3332 env.end_idx = get_opt_global_cse() ? 0 : -1;
3333 env.lambda_input = 0;
3334 env.commutative = 1;
3337 assure_irg_outs(irg);
3338 assure_cf_loop(irg);
3340 /* we have our own value_of function */
3341 set_value_of_func(get_node_tarval);
3343 set_compute_functions();
3344 DEBUG_ONLY(part_nr = 0);
3346 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
3348 /* create the initial partition and place it on the work list */
3349 env.initial = new_partition(&env);
3350 add_to_worklist(env.initial, &env);
3351 irg_walk_graph(irg, init_block_phis, create_initial_partitions, &env);
3353 /* set the hook: from now, every node has a partition and a type */
3354 DEBUG_ONLY(set_dump_node_vcgattr_hook(dump_partition_hook));
3357 tarval_UNKNOWN = tarval_top;
3359 tarval_UNKNOWN = tarval_bad;
3362 /* all nodes on the initial partition have type Top */
3363 env.initial->type_is_T_or_C = 1;
3365 /* Place the START Node's partition on cprop.
3366 Place the START Node on its local worklist. */
3367 initial_bl = get_irg_start_block(irg);
3368 start = get_irn_node(initial_bl);
3369 add_to_cprop(start, &env);
3373 if (env.worklist != NULL)
3375 } while (env.cprop != NULL || env.worklist != NULL);
3377 dump_all_partitions(&env);
3378 check_all_partitions(&env);
3381 dump_ir_block_graph(irg, "-partition");
3384 /* apply the result */
3385 irg_block_walk_graph(irg, NULL, apply_cf, &env);
3386 irg_walk_graph(irg, NULL, apply_result, &env);
3387 apply_end(get_irg_end(irg), &env);
3390 /* control flow might changed */
3391 set_irg_outs_inconsistent(irg);
3392 set_irg_extblk_inconsistent(irg);
3393 set_irg_doms_inconsistent(irg);
3394 set_irg_loopinfo_inconsistent(irg);
3397 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
3399 /* remove the partition hook */
3400 DEBUG_ONLY(set_dump_node_vcgattr_hook(NULL));
3402 pmap_destroy(env.type2id_map);
3403 del_set(env.opcode2id_map);
3404 obstack_free(&env.obst, NULL);
3406 /* restore value_of() default behavior */
3407 set_value_of_func(NULL);
3408 current_ir_graph = rem;