2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Cliff Click's Combined Analysis/Optimization
11 * This is a slightly enhanced version of Cliff Clicks combo algorithm
12 * - support for commutative nodes is added, Add(a,b) and Add(b,a) ARE congruent
13 * - supports all Firm direct (by a data edge) identities except Mux
14 * (Mux can be a 2-input or 1-input identity, only 2-input is implemented yet)
15 * - supports Confirm nodes (handle them like Copies but do NOT remove them)
16 * - let Cmp nodes calculate Top like all other data nodes: this would let
17 * Mux nodes to calculate Unknown instead of taking the true result
18 * - let Cond(Top) always select FALSE/default: This is tricky. Nodes are only reevaluated
19 * IFF the predecessor changed its type. Because nodes are initialized with Top
20 * this never happens, let all Proj(Cond) be unreachable.
21 * We avoid this condition by the same way we work around Phi: whenever a Block
22 * node is placed on the list, place its Cond nodes (and because they are Tuple
23 * all its Proj-nodes either on the cprop list)
24 * Especially, this changes the meaning of Click's example:
39 * using Click's version while is silent with our.
40 * - support for global congruences is implemented but not tested yet
42 * Note further that we use the terminology from Click's work here, which is different
43 * in some cases from Firm terminology. Especially, Click's type is a
44 * Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
50 #include "iroptimize.h"
57 #include "irgraph_t.h"
64 #include "iropt_dbg.h"
68 #include "irnodeset.h"
72 #include "firmstat_t.h"
77 /* define this to check that all type translations are monotone */
78 #define VERIFY_MONOTONE
80 /* define this to check the consistency of partitions */
81 #define CHECK_PARTITIONS
83 typedef struct node_t node_t;
84 typedef struct partition_t partition_t;
85 typedef struct opcode_key_t opcode_key_t;
86 typedef struct listmap_entry_t listmap_entry_t;
88 /** The type of the compute function. */
89 typedef void (*compute_func)(node_t *node);
95 ir_node *irn; /**< An IR node representing this opcode. */
99 * An entry in the list_map.
101 struct listmap_entry_t {
102 void *id; /**< The id. */
103 node_t *list; /**< The associated list for this id. */
104 listmap_entry_t *next; /**< Link to the next entry in the map. */
107 /** We must map id's to lists. */
108 typedef struct listmap_t {
109 set *map; /**< Map id's to listmap_entry_t's */
110 listmap_entry_t *values; /**< List of all values in the map. */
114 * A lattice element. Because we handle constants and symbolic constants different, we
115 * have to use this union.
126 ir_node *node; /**< The IR-node itself. */
127 list_head node_list; /**< Double-linked list of leader/follower entries. */
128 list_head cprop_list; /**< Double-linked partition.cprop list. */
129 partition_t *part; /**< points to the partition this node belongs to */
130 node_t *next; /**< Next node on local list (partition.touched, fallen). */
131 node_t *race_next; /**< Next node on race list. */
132 lattice_elem_t type; /**< The associated lattice element "type". */
133 int max_user_input; /**< Maximum input number of Def-Use edges. */
134 unsigned next_edge; /**< Index of the next Def-Use edge to use. */
135 unsigned n_followers; /**< Number of Follower in the outs set. */
136 unsigned on_touched:1; /**< Set, if this node is on the partition.touched set. */
137 unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
138 unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
139 unsigned is_follower:1; /**< Set, if this node is a follower. */
140 unsigned flagged:2; /**< 2 Bits, set if this node was visited by race 1 or 2. */
144 * A partition containing congruent nodes.
147 list_head Leader; /**< The head of partition Leader node list. */
148 list_head Follower; /**< The head of partition Follower node list. */
149 list_head cprop; /**< The head of partition.cprop list. */
150 list_head cprop_X; /**< The head of partition.cprop (Cond nodes and its Projs) list. */
151 partition_t *wl_next; /**< Next entry in the work list if any. */
152 partition_t *touched_next; /**< Points to the next partition in the touched set. */
153 partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
154 partition_t *split_next; /**< Points to the next partition in the list that must be split by split_by(). */
155 node_t *touched; /**< The partition.touched set of this partition. */
156 unsigned n_leader; /**< Number of entries in this partition.Leader. */
157 unsigned n_touched; /**< Number of entries in the partition.touched. */
158 int max_user_inputs; /**< Maximum number of user inputs of all entries. */
159 unsigned on_worklist:1; /**< Set, if this partition is in the work list. */
160 unsigned on_touched:1; /**< Set, if this partition is on the touched set. */
161 unsigned on_cprop:1; /**< Set, if this partition is on the cprop list. */
162 unsigned type_is_T_or_C:1;/**< Set, if all nodes in this partition have type Top or Constant. */
164 partition_t *dbg_next; /**< Link all partitions for debugging */
165 unsigned nr; /**< A unique number for (what-)mapping, >0. */
169 typedef struct environment_t {
170 struct obstack obst; /**< obstack to allocate data structures. */
171 partition_t *worklist; /**< The work list. */
172 partition_t *cprop; /**< The constant propagation list. */
173 partition_t *touched; /**< the touched set. */
174 partition_t *initial; /**< The initial partition. */
175 set *opcode2id_map; /**< The opcodeMode->id map. */
176 ir_node **kept_memory; /**< Array of memory nodes that must be kept. */
177 int end_idx; /**< -1 for local and 0 for global congruences. */
178 int lambda_input; /**< Captured argument for lambda_partition(). */
179 unsigned modified:1; /**< Set, if the graph was modified. */
180 unsigned unopt_cf:1; /**< If set, control flow is not optimized due to Unknown. */
181 /* options driving the optimization */
182 unsigned commutative:1; /**< Set, if commutation nodes should be handled specially. */
183 unsigned opt_unknown:1; /**< Set, if non-strict programs should be optimized. */
185 partition_t *dbg_list; /**< List of all partitions. */
189 /** Type of the what function. */
190 typedef void *(*what_func)(const node_t *node, environment_t *env);
192 #define get_irn_node(irn) ((node_t *)get_irn_link(irn))
193 #define set_irn_node(irn, node) set_irn_link(irn, node)
195 /* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
196 #undef tarval_unreachable
197 #define tarval_unreachable tarval_top
200 /** The debug module handle. */
201 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
203 /** The what reason. */
204 DEBUG_ONLY(static const char *what_reason;)
206 /** Next partition number. */
207 DEBUG_ONLY(static unsigned part_nr = 0;)
209 /** The tarval returned by Unknown nodes: set to either tarval_bad OR tarval_top. */
210 static ir_tarval *tarval_UNKNOWN;
213 static node_t *identity(node_t *node);
216 * Compare two opcode representatives.
218 static int cmp_irn_opcode(const ir_node *a, const ir_node *b)
222 if ((get_irn_op(a) != get_irn_op(b)) ||
223 (get_irn_mode(a) != get_irn_mode(b)))
226 /* compare if a's in and b's in are of equal length */
227 arity = get_irn_arity(a);
228 if (arity != get_irn_arity(b))
233 * Some ugliness here: Two Blocks having the same
234 * IJmp predecessor would be congruent, which of course is wrong.
235 * We fix it by never letting blocks be congruent
236 * which cannot be detected by combo either.
242 * here, we already know that the nodes are identical except their
245 if (a->op->ops.node_cmp_attr)
246 return a->op->ops.node_cmp_attr(a, b);
251 #ifdef CHECK_PARTITIONS
255 static void check_partition(const partition_t *T)
259 list_for_each_entry(node_t, node, &T->Leader, node_list) {
260 assert(node->is_follower == 0);
261 assert(node->flagged == 0);
262 assert(node->part == T);
265 assert(n == T->n_leader);
267 list_for_each_entry(node_t, node, &T->Follower, node_list) {
268 assert(node->is_follower == 1);
269 assert(node->flagged == 0);
270 assert(node->part == T);
275 * check that all leader nodes in the partition have the same opcode.
277 static void check_opcode(const partition_t *Z)
279 const ir_node *repr = NULL;
281 list_for_each_entry(node_t, node, &Z->Leader, node_list) {
282 ir_node *irn = node->node;
287 assert(cmp_irn_opcode(repr, irn) == 0);
292 static void check_all_partitions(environment_t *env)
297 for (P = env->dbg_list; P != NULL; P = P->dbg_next) {
299 if (! P->type_is_T_or_C)
301 list_for_each_entry(node_t, node, &P->Follower, node_list) {
302 node_t *leader = identity(node);
304 assert(leader != node && leader->part == node->part);
315 static void do_check_list(const node_t *list, int ofs, const partition_t *Z)
320 #define NEXT(e) *((const node_t **)((char *)(e) + (ofs)))
321 for (e = list; e != NULL; e = NEXT(e)) {
322 assert(e->part == Z);
333 * Check a local list.
335 static void check_list(const node_t *list, const partition_t *Z)
337 do_check_list(list, offsetof(node_t, next), Z);
341 #define check_partition(T)
342 #define check_list(list, Z)
343 #define check_all_partitions(env)
344 #endif /* CHECK_PARTITIONS */
347 static inline lattice_elem_t get_partition_type(const partition_t *X);
350 * Dump partition to output.
352 static void dump_partition(const char *msg, const partition_t *part)
355 lattice_elem_t type = get_partition_type(part);
357 DB((dbg, LEVEL_2, "%s part%u%s (%u, %+F) {\n ",
358 msg, part->nr, part->type_is_T_or_C ? "*" : "",
359 part->n_leader, type));
360 list_for_each_entry(node_t, node, &part->Leader, node_list) {
361 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
364 if (! list_empty(&part->Follower)) {
365 DB((dbg, LEVEL_2, "\n---\n "));
367 list_for_each_entry(node_t, node, &part->Follower, node_list) {
368 DB((dbg, LEVEL_2, "%s%+F", first ? "" : ", ", node->node));
372 DB((dbg, LEVEL_2, "\n}\n"));
378 static void do_dump_list(const char *msg, const node_t *node, int ofs)
383 #define GET_LINK(p, ofs) *((const node_t **)((char *)(p) + (ofs)))
385 DB((dbg, LEVEL_3, "%s = {\n ", msg));
386 for (p = node; p != NULL; p = GET_LINK(p, ofs)) {
387 DB((dbg, LEVEL_3, "%s%+F", first ? "" : ", ", p->node));
390 DB((dbg, LEVEL_3, "\n}\n"));
398 static void dump_race_list(const char *msg, const node_t *list)
400 do_dump_list(msg, list, offsetof(node_t, race_next));
404 * Dumps a local list.
406 static void dump_list(const char *msg, const node_t *list)
408 do_dump_list(msg, list, offsetof(node_t, next));
412 * Dump all partitions.
414 static void dump_all_partitions(const environment_t *env)
416 const partition_t *P;
418 DB((dbg, LEVEL_2, "All partitions\n===============\n"));
419 for (P = env->dbg_list; P != NULL; P = P->dbg_next)
420 dump_partition("", P);
426 static void dump_split_list(const partition_t *list)
428 const partition_t *p;
431 DB((dbg, LEVEL_2, "Split by %s produced = {\n", what_reason));
432 for (p = list; p != NULL; p = p->split_next) {
433 DB((dbg, LEVEL_2, "%c part%u", split, p->nr));
436 DB((dbg, LEVEL_2, "\n}\n"));
440 * Dump partition and type for a node.
442 static int dump_partition_hook(FILE *F, const ir_node *n, const ir_node *local)
444 const ir_node *irn = local != NULL ? local : n;
445 node_t *node = get_irn_node(irn);
447 ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
452 #define dump_partition(msg, part)
453 #define dump_race_list(msg, list)
454 #define dump_list(msg, list)
455 #define dump_all_partitions(env)
456 #define dump_split_list(list)
459 #if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
461 * Verify that a type transition is monotone
463 static void verify_type(const lattice_elem_t old_type, node_t *node)
465 if (old_type.tv == node->type.tv) {
469 if (old_type.tv == tarval_top) {
470 /* from Top down-to is always allowed */
473 if (node->type.tv == tarval_bottom || node->type.tv == tarval_reachable) {
477 panic("wrong translation from %+F to %+F on node %+F", old_type, node->type, node->node);
481 #define verify_type(old_type, node)
485 * Compare two pointer values of a listmap.
487 static int listmap_cmp_ptr(const void *elt, const void *key, size_t size)
489 const listmap_entry_t *e1 = (listmap_entry_t*)elt;
490 const listmap_entry_t *e2 = (listmap_entry_t*)key;
493 return e1->id != e2->id;
497 * Initializes a listmap.
499 * @param map the listmap
501 static void listmap_init(listmap_t *map)
503 map->map = new_set(listmap_cmp_ptr, 16);
508 * Terminates a listmap.
510 * @param map the listmap
512 static void listmap_term(listmap_t *map)
518 * Return the associated listmap entry for a given id.
520 * @param map the listmap
521 * @param id the id to search for
523 * @return the associated listmap entry for the given id
525 static listmap_entry_t *listmap_find(listmap_t *map, void *id)
527 listmap_entry_t key, *entry;
532 entry = set_insert(listmap_entry_t, map->map, &key, sizeof(key), hash_ptr(id));
534 if (entry->list == NULL) {
535 /* a new entry, put into the list */
536 entry->next = map->values;
543 * Calculate the hash value for an opcode map entry.
545 * @param entry an opcode map entry
547 * @return a hash value for the given opcode map entry
549 static unsigned opcode_hash(const opcode_key_t *entry)
551 /* we cannot use the ir ops hash function here, because it hashes the
553 const ir_node *n = entry->irn;
554 ir_opcode code = (ir_opcode)get_irn_opcode(n);
555 ir_mode *mode = get_irn_mode(n);
556 unsigned hash = (unsigned)(PTR_TO_INT(mode) * 9 + code) + get_irn_arity(n);
558 if (code == iro_Const)
559 hash ^= (unsigned)hash_ptr(get_Const_tarval(n));
560 else if (code == iro_Proj)
561 hash += (unsigned)get_Proj_proj(n);
566 * Compare two entries in the opcode map.
568 static int cmp_opcode(const void *elt, const void *key, size_t size)
570 const opcode_key_t *o1 = (opcode_key_t*)elt;
571 const opcode_key_t *o2 = (opcode_key_t*)key;
575 return cmp_irn_opcode(o1->irn, o2->irn);
579 * Compare two Def-Use edges for input position.
581 static int cmp_def_use_edge(const void *a, const void *b)
583 const ir_def_use_edge *ea = (const ir_def_use_edge*)a;
584 const ir_def_use_edge *eb = (const ir_def_use_edge*)b;
586 /* no overrun, because range is [-1, MAXINT] */
587 return ea->pos - eb->pos;
591 * We need the Def-Use edges sorted.
593 static void sort_irn_outs(node_t *node)
595 ir_node *irn = node->node;
596 unsigned n_outs = get_irn_n_outs(irn);
597 qsort(irn->o.out->edges, n_outs, sizeof(irn->o.out->edges[0]),
599 node->max_user_input = n_outs > 0 ? irn->o.out->edges[n_outs-1].pos : -1;
603 * Return the type of a node.
605 * @param irn an IR-node
607 * @return the associated type of this node
609 static inline lattice_elem_t get_node_type(const ir_node *irn)
611 return get_irn_node(irn)->type;
615 * Return the tarval of a node.
617 * @param irn an IR-node
619 * @return the associated type of this node
621 static inline ir_tarval *get_node_tarval(const ir_node *irn)
623 lattice_elem_t type = get_node_type(irn);
625 if (is_tarval(type.tv))
627 return tarval_bottom;
631 * Add a partition to the worklist.
633 static inline void add_to_worklist(partition_t *X, environment_t *env)
635 assert(X->on_worklist == 0);
636 DB((dbg, LEVEL_2, "Adding part%d to worklist\n", X->nr));
637 X->wl_next = env->worklist;
643 * Create a new empty partition.
645 * @param env the environment
647 * @return a newly allocated partition
649 static inline partition_t *new_partition(environment_t *env)
651 partition_t *part = OALLOC(&env->obst, partition_t);
653 INIT_LIST_HEAD(&part->Leader);
654 INIT_LIST_HEAD(&part->Follower);
655 INIT_LIST_HEAD(&part->cprop);
656 INIT_LIST_HEAD(&part->cprop_X);
657 part->wl_next = NULL;
658 part->touched_next = NULL;
659 part->cprop_next = NULL;
660 part->split_next = NULL;
661 part->touched = NULL;
664 part->max_user_inputs = 0;
665 part->on_worklist = 0;
666 part->on_touched = 0;
668 part->type_is_T_or_C = 0;
670 part->dbg_next = env->dbg_list;
671 env->dbg_list = part;
672 part->nr = part_nr++;
679 * Get the first node from a partition.
681 static inline node_t *get_first_node(const partition_t *X)
683 return list_entry(X->Leader.next, node_t, node_list);
687 * Return the type of a partition (assuming partition is non-empty and
688 * all elements have the same type).
690 * @param X a partition
692 * @return the type of the first element of the partition
694 static inline lattice_elem_t get_partition_type(const partition_t *X)
696 const node_t *first = get_first_node(X);
701 * Creates a partition node for the given IR-node and place it
702 * into the given partition.
704 * @param irn an IR-node
705 * @param part a partition to place the node in
706 * @param env the environment
708 * @return the created node
710 static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env)
712 /* create a partition node and place it in the partition */
713 node_t *node = OALLOC(&env->obst, node_t);
715 INIT_LIST_HEAD(&node->node_list);
716 INIT_LIST_HEAD(&node->cprop_list);
720 node->race_next = NULL;
721 node->type.tv = tarval_top;
722 node->max_user_input = 0;
724 node->n_followers = 0;
725 node->on_touched = 0;
728 node->is_follower = 0;
730 set_irn_node(irn, node);
732 list_add_tail(&node->node_list, &part->Leader);
739 * Pre-Walker, initialize all Nodes' type to U or top and place
740 * all nodes into the TOP partition.
742 static void create_initial_partitions(ir_node *irn, void *ctx)
744 environment_t *env = (environment_t*)ctx;
745 partition_t *part = env->initial;
748 node = create_partition_node(irn, part, env);
750 if (node->max_user_input > part->max_user_inputs)
751 part->max_user_inputs = node->max_user_input;
754 set_Block_phis(irn, NULL);
759 * Post-Walker, collect all Block-Phi lists, set Cond.
761 static void init_block_phis(ir_node *irn, void *ctx)
766 ir_node *block = get_nodes_block(irn);
767 add_Block_phi(block, irn);
772 * Add a node to the entry.partition.touched set and
773 * node->partition to the touched set if not already there.
776 * @param env the environment
778 static inline void add_to_touched(node_t *y, environment_t *env)
780 if (y->on_touched == 0) {
781 partition_t *part = y->part;
783 y->next = part->touched;
788 if (part->on_touched == 0) {
789 part->touched_next = env->touched;
791 part->on_touched = 1;
794 check_list(part->touched, part);
799 * Place a node on the cprop list.
802 * @param env the environment
804 static void add_to_cprop(node_t *y, environment_t *env)
808 /* Add y to y.partition.cprop. */
809 if (y->on_cprop == 0) {
810 partition_t *Y = y->part;
811 ir_node *irn = y->node;
812 ir_node *skipped = skip_Proj(irn);
814 /* place Conds and all its Projs on the cprop_X list */
815 if (is_Cond(skipped) || is_Switch(skipped))
816 list_add_tail(&y->cprop_list, &Y->cprop_X);
818 list_add_tail(&y->cprop_list, &Y->cprop);
821 DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
823 /* place its partition on the cprop list */
824 if (Y->on_cprop == 0) {
825 Y->cprop_next = env->cprop;
831 if (get_irn_mode(irn) == mode_T) {
832 /* mode_T nodes always produce tarval_bottom, so we must explicitly
833 * add its Projs to get constant evaluation to work */
834 for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
835 node_t *proj = get_irn_node(get_irn_out(irn, i));
837 add_to_cprop(proj, env);
839 } else if (is_Block(irn)) {
840 /* Due to the way we handle Phi's, we must place all Phis of a block on the list
841 * if someone placed the block. The Block is only placed if the reachability
842 * changes, and this must be re-evaluated in compute_Phi(). */
844 for (phi = get_Block_phis(irn); phi != NULL; phi = get_Phi_next(phi)) {
845 node_t *p = get_irn_node(phi);
846 add_to_cprop(p, env);
852 * Update the worklist: If Z is on worklist then add Z' to worklist.
853 * Else add the smaller of Z and Z' to worklist.
855 * @param Z the Z partition
856 * @param Z_prime the Z' partition, a previous part of Z
857 * @param env the environment
859 static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env)
861 if (Z->on_worklist || Z_prime->n_leader < Z->n_leader) {
862 add_to_worklist(Z_prime, env);
864 add_to_worklist(Z, env);
869 * Make all inputs to x no longer be F.def_use edges.
873 static void move_edges_to_leader(node_t *x)
875 ir_node *irn = x->node;
876 for (int i = get_irn_arity(irn) - 1; i >= 0; --i) {
877 node_t *pred = get_irn_node(get_irn_n(irn, i));
878 ir_node *p = pred->node;
879 unsigned n = get_irn_n_outs(p);
880 for (unsigned j = 0; j < pred->n_followers; ++j) {
881 ir_def_use_edge edge = p->o.out->edges[j];
882 if (edge.pos == i && edge.use == irn) {
883 /* found a follower edge to x, move it to the Leader */
884 /* remove this edge from the Follower set */
886 p->o.out->edges[j] = p->o.out->edges[pred->n_followers];
888 /* sort it into the leader set */
890 for (k = pred->n_followers+1; k < n; ++k) {
891 if (p->o.out->edges[k].pos >= edge.pos)
893 p->o.out->edges[k-1] = p->o.out->edges[k];
895 /* place the new edge here */
896 p->o.out->edges[k-1] = edge;
898 /* edge found and moved */
906 * Split a partition that has NO followers by a local list.
908 * @param Z partition to split
909 * @param g a (non-empty) node list
910 * @param env the environment
912 * @return a new partition containing the nodes of g
914 static partition_t *split_no_followers(partition_t *Z, node_t *g, environment_t *env)
916 partition_t *Z_prime;
921 dump_partition("Splitting ", Z);
922 dump_list("by list ", g);
926 /* Remove g from Z. */
927 for (node = g; node != NULL; node = node->next) {
928 assert(node->part == Z);
929 list_del(&node->node_list);
932 assert(n < Z->n_leader);
935 /* Move g to a new partition, Z'. */
936 Z_prime = new_partition(env);
938 for (node = g; node != NULL; node = node->next) {
939 list_add_tail(&node->node_list, &Z_prime->Leader);
940 node->part = Z_prime;
941 if (node->max_user_input > max_input)
942 max_input = node->max_user_input;
944 Z_prime->max_user_inputs = max_input;
945 Z_prime->n_leader = n;
948 check_partition(Z_prime);
950 /* for now, copy the type info tag, it will be adjusted in split_by(). */
951 Z_prime->type_is_T_or_C = Z->type_is_T_or_C;
953 dump_partition("Now ", Z);
954 dump_partition("Created new ", Z_prime);
956 update_worklist(Z, Z_prime, env);
962 * Make the Follower -> Leader transition for a node.
966 static void follower_to_leader(node_t *n)
968 assert(n->is_follower == 1);
970 DB((dbg, LEVEL_2, "%+F make the follower -> leader transition\n", n->node));
972 move_edges_to_leader(n);
973 list_del(&n->node_list);
974 list_add_tail(&n->node_list, &n->part->Leader);
979 * The environment for one race step.
981 typedef struct step_env {
982 node_t *initial; /**< The initial node list. */
983 node_t *unwalked; /**< The unwalked node list. */
984 node_t *walked; /**< The walked node list. */
985 unsigned index; /**< Next index of Follower use_def edge. */
986 unsigned side; /**< side number. */
990 * Return non-zero, if a input is a real follower
992 * @param irn the node to check
993 * @param input number of the input
995 static int is_real_follower(const ir_node *irn, int input)
999 switch (get_irn_opcode(irn)) {
1002 /* ignore the Confirm bound input */
1008 /* ignore the Mux sel input */
1013 /* dead inputs are not follower edges */
1014 ir_node *block = get_nodes_block(irn);
1015 node_t *pred = get_irn_node(get_Block_cfgpred(block, input));
1017 if (pred->type.tv == tarval_unreachable)
1027 /* only a Sub x,0 / Shift x,0 might be a follower */
1034 pred = get_irn_node(get_irn_n(irn, input));
1035 if (is_tarval(pred->type.tv) && tarval_is_null(pred->type.tv))
1039 pred = get_irn_node(get_irn_n(irn, input));
1040 if (is_tarval(pred->type.tv) && tarval_is_one(pred->type.tv))
1044 pred = get_irn_node(get_irn_n(irn, input));
1045 if (is_tarval(pred->type.tv) && tarval_is_all_one(pred->type.tv))
1049 assert(!"opcode not implemented yet");
1056 * Do one step in the race.
1058 static int step(step_env *env)
1062 if (env->initial != NULL) {
1063 /* Move node from initial to unwalked */
1065 env->initial = n->race_next;
1067 n->race_next = env->unwalked;
1073 while (env->unwalked != NULL) {
1074 /* let n be the first node in unwalked */
1076 while (env->index < n->n_followers) {
1077 const ir_def_use_edge *edge = &n->node->o.out->edges[env->index];
1079 /* let m be n.F.def_use[index] */
1080 node_t *m = get_irn_node(edge->use);
1082 assert(m->is_follower);
1084 * Some inputs, like the get_Confirm_bound are NOT
1085 * real followers, sort them out.
1087 if (! is_real_follower(m->node, edge->pos)) {
1093 /* only followers from our partition */
1094 if (m->part != n->part)
1097 if ((m->flagged & env->side) == 0) {
1098 m->flagged |= env->side;
1100 if (m->flagged != 3) {
1101 /* visited the first time */
1102 /* add m to unwalked not as first node (we might still need to
1103 check for more follower node */
1104 m->race_next = n->race_next;
1108 /* else already visited by the other side and on the other list */
1111 /* move n to walked */
1112 env->unwalked = n->race_next;
1113 n->race_next = env->walked;
1121 * Clear the flags from a list and check for
1122 * nodes that where touched from both sides.
1124 * @param list the list
1126 static int clear_flags(node_t *list)
1131 for (n = list; n != NULL; n = n->race_next) {
1132 if (n->flagged == 3) {
1133 /* we reach a follower from both sides, this will split congruent
1134 * inputs and make it a leader. */
1135 follower_to_leader(n);
1144 * Split a partition by a local list using the race.
1146 * @param pX pointer to the partition to split, might be changed!
1147 * @param gg a (non-empty) node list
1148 * @param env the environment
1150 * @return a new partition containing the nodes of gg
1152 static partition_t *split(partition_t **pX, node_t *gg, environment_t *env)
1154 partition_t *X = *pX;
1155 partition_t *X_prime;
1159 int max_input, transitions, winner, shf;
1161 DEBUG_ONLY(static int run = 0;)
1163 DB((dbg, LEVEL_2, "Run %d ", run++));
1164 if (list_empty(&X->Follower)) {
1165 /* if the partition has NO follower, we can use the fast
1166 splitting algorithm. */
1167 return split_no_followers(X, gg, env);
1169 /* else do the race */
1171 dump_partition("Splitting ", X);
1172 dump_list("by list ", gg);
1174 INIT_LIST_HEAD(&tmp);
1176 /* Remove gg from X.Leader and put into g */
1178 for (node_t *node = gg; node != NULL; node = node->next) {
1179 assert(node->part == X);
1180 assert(node->is_follower == 0);
1182 list_del(&node->node_list);
1183 list_add_tail(&node->node_list, &tmp);
1184 node->race_next = g;
1189 list_for_each_entry(node_t, node, &X->Leader, node_list) {
1190 node->race_next = h;
1193 /* restore X.Leader */
1194 list_splice(&tmp, &X->Leader);
1196 senv[0].initial = g;
1197 senv[0].unwalked = NULL;
1198 senv[0].walked = NULL;
1202 senv[1].initial = h;
1203 senv[1].unwalked = NULL;
1204 senv[1].walked = NULL;
1209 * Some informations on the race that are not stated clearly in Click's
1211 * 1) A follower stays on the side that reach him first.
1212 * 2) If the other side reaches a follower, if will be converted to
1213 * a leader. /This must be done after the race is over, else the
1214 * edges we are iterating on are renumbered./
1215 * 3) /New leader might end up on both sides./
1216 * 4) /If one side ends up with new Leaders, we must ensure that
1217 * they can split out by opcode, hence we have to put _every_
1218 * partition with new Leader nodes on the cprop list, as
1219 * opcode splitting is done by split_by() at the end of
1220 * constant propagation./
1223 if (step(&senv[0])) {
1227 if (step(&senv[1])) {
1232 assert(senv[winner].initial == NULL);
1233 assert(senv[winner].unwalked == NULL);
1235 /* clear flags from walked/unwalked */
1237 transitions = clear_flags(senv[0].unwalked) << shf;
1238 transitions |= clear_flags(senv[0].walked) << shf;
1240 transitions |= clear_flags(senv[1].unwalked) << shf;
1241 transitions |= clear_flags(senv[1].walked) << shf;
1243 dump_race_list("winner ", senv[winner].walked);
1245 /* Move walked_{winner} to a new partition, X'. */
1246 X_prime = new_partition(env);
1249 for (node_t *node = senv[winner].walked; node != NULL; node = node->race_next) {
1250 list_del(&node->node_list);
1251 node->part = X_prime;
1252 if (node->is_follower) {
1253 list_add_tail(&node->node_list, &X_prime->Follower);
1255 list_add_tail(&node->node_list, &X_prime->Leader);
1258 if (node->max_user_input > max_input)
1259 max_input = node->max_user_input;
1261 X_prime->n_leader = n;
1262 X_prime->max_user_inputs = max_input;
1263 X->n_leader -= X_prime->n_leader;
1265 /* for now, copy the type info tag, it will be adjusted in split_by(). */
1266 X_prime->type_is_T_or_C = X->type_is_T_or_C;
1269 * Even if a follower was not checked by both sides, it might have
1270 * loose its congruence, so we need to check this case for all follower.
1272 list_for_each_entry_safe(node_t, node, t, &X_prime->Follower, node_list) {
1273 if (identity(node) == node) {
1274 follower_to_leader(node);
1280 check_partition(X_prime);
1282 dump_partition("Now ", X);
1283 dump_partition("Created new ", X_prime);
1285 /* X' is the smaller part */
1286 add_to_worklist(X_prime, env);
1289 * If there where follower to leader transitions, ensure that the nodes
1290 * can be split out if necessary.
1292 if (transitions & 1) {
1293 /* place winner partition on the cprop list */
1294 if (X_prime->on_cprop == 0) {
1295 X_prime->cprop_next = env->cprop;
1296 env->cprop = X_prime;
1297 X_prime->on_cprop = 1;
1300 if (transitions & 2) {
1301 /* place other partition on the cprop list */
1302 if (X->on_cprop == 0) {
1303 X->cprop_next = env->cprop;
1309 /* we have to ensure that the partition containing g is returned */
1319 * Returns non-zero if the i'th input of a Phi node is live.
1321 * @param phi a Phi-node
1322 * @param i an input number
1324 * @return non-zero if the i'th input of the given Phi node is live
1326 static int is_live_input(ir_node *phi, int i)
1329 ir_node *block = get_nodes_block(phi);
1330 ir_node *pred = get_Block_cfgpred(block, i);
1331 lattice_elem_t type = get_node_type(pred);
1333 return type.tv != tarval_unreachable;
1335 /* else it's the control input, always live */
1340 * Return non-zero if a type is a constant.
1342 static int is_constant_type(lattice_elem_t type)
1344 if (type.tv != tarval_bottom && type.tv != tarval_top)
1350 * Check whether a type is neither Top or a constant.
1351 * Note: U is handled like Top here, R is a constant.
1353 * @param type the type to check
1355 static int type_is_neither_top_nor_const(const lattice_elem_t type)
1357 if (is_tarval(type.tv)) {
1358 if (type.tv == tarval_top)
1360 if (tarval_is_constant(type.tv))
1370 * Collect nodes to the touched list.
1372 * @param list the list which contains the nodes that must be evaluated
1373 * @param idx the index of the def_use edge to evaluate
1374 * @param env the environment
1376 static void collect_touched(list_head *list, int idx, environment_t *env)
1379 int end_idx = env->end_idx;
1381 list_for_each_entry(node_t, x, list, node_list) {
1383 /* leader edges start AFTER follower edges */
1384 x->next_edge = x->n_followers;
1386 unsigned num_edges = get_irn_n_outs(x->node);
1388 /* for all edges in x.L.def_use_{idx} */
1389 while (x->next_edge < num_edges) {
1390 const ir_def_use_edge *edge = &x->node->o.out->edges[x->next_edge];
1393 /* check if we have necessary edges */
1394 if (edge->pos > idx)
1401 /* only non-commutative nodes */
1402 if (env->commutative &&
1403 (idx == 0 || idx == 1) && is_op_commutative(get_irn_op(succ)))
1406 /* ignore the "control input" for non-pinned nodes
1407 if we are running in GCSE mode */
1408 if (idx < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
1411 y = get_irn_node(succ);
1412 assert(get_irn_n(succ, idx) == x->node);
1414 /* ignore block edges touching followers */
1415 if (idx == -1 && y->is_follower)
1418 if (is_constant_type(y->type)) {
1419 unsigned code = get_irn_opcode(succ);
1420 if (code == iro_Sub || code == iro_Cmp)
1421 add_to_cprop(y, env);
1424 /* Partitions of constants should not be split simply because their Nodes have unequal
1425 functions or incongruent inputs. */
1426 if (type_is_neither_top_nor_const(y->type) &&
1427 (! is_Phi(y->node) || is_live_input(y->node, idx))) {
1428 add_to_touched(y, env);
1435 * Collect commutative nodes to the touched list.
1437 * @param list the list which contains the nodes that must be evaluated
1438 * @param env the environment
1440 static void collect_commutative_touched(list_head *list, environment_t *env)
1444 list_for_each_entry(node_t, x, list, node_list) {
1445 unsigned num_edges = get_irn_n_outs(x->node);
1447 x->next_edge = x->n_followers;
1449 /* for all edges in x.L.def_use_{idx} */
1450 while (x->next_edge < num_edges) {
1451 const ir_def_use_edge *edge = &x->node->o.out->edges[x->next_edge];
1454 /* check if we have necessary edges */
1464 /* only commutative nodes */
1465 if (!is_op_commutative(get_irn_op(succ)))
1468 y = get_irn_node(succ);
1469 if (is_constant_type(y->type)) {
1470 unsigned code = get_irn_opcode(succ);
1471 if (code == iro_Eor)
1472 add_to_cprop(y, env);
1475 /* Partitions of constants should not be split simply because their Nodes have unequal
1476 functions or incongruent inputs. */
1477 if (type_is_neither_top_nor_const(y->type)) {
1478 add_to_touched(y, env);
1485 * Split the partitions if caused by the first entry on the worklist.
1487 * @param env the environment
1489 static void cause_splits(environment_t *env)
1491 partition_t *X, *Z, *N;
1494 /* remove the first partition from the worklist */
1496 env->worklist = X->wl_next;
1499 dump_partition("Cause_split: ", X);
1501 if (env->commutative) {
1502 /* handle commutative nodes first */
1504 /* empty the touched set: already done, just clear the list */
1505 env->touched = NULL;
1507 collect_commutative_touched(&X->Leader, env);
1508 collect_commutative_touched(&X->Follower, env);
1510 for (Z = env->touched; Z != NULL; Z = N) {
1512 node_t *touched = Z->touched;
1513 node_t *touched_aa = NULL;
1514 node_t *touched_ab = NULL;
1515 unsigned n_touched_aa = 0;
1516 unsigned n_touched_ab = 0;
1518 assert(Z->touched != NULL);
1520 /* beware, split might change Z */
1521 N = Z->touched_next;
1523 /* remove it from the touched set */
1526 /* Empty local Z.touched. */
1527 for (e = touched; e != NULL; e = n) {
1528 node_t *left = get_irn_node(get_irn_n(e->node, 0));
1529 node_t *right = get_irn_node(get_irn_n(e->node, 1));
1531 assert(e->is_follower == 0);
1536 * Note: op(a, a) is NOT congruent to op(a, b).
1537 * So, we must split the touched list.
1539 if (left->part == right->part) {
1540 e->next = touched_aa;
1544 e->next = touched_ab;
1549 assert(n_touched_aa + n_touched_ab == Z->n_touched);
1553 if (0 < n_touched_aa && n_touched_aa < Z->n_leader) {
1554 partition_t *Z_prime = Z;
1555 DB((dbg, LEVEL_2, "Split part%d by touched_aa\n", Z_prime->nr));
1556 split(&Z_prime, touched_aa, env);
1558 assert(n_touched_aa <= Z->n_leader);
1560 if (0 < n_touched_ab && n_touched_ab < Z->n_leader) {
1561 partition_t *Z_prime = Z;
1562 DB((dbg, LEVEL_2, "Split part%d by touched_ab\n", Z_prime->nr));
1563 split(&Z_prime, touched_ab, env);
1565 assert(n_touched_ab <= Z->n_leader);
1569 /* combine temporary leader and follower list */
1570 for (idx = -1; idx <= X->max_user_inputs; ++idx) {
1571 /* empty the touched set: already done, just clear the list */
1572 env->touched = NULL;
1574 collect_touched(&X->Leader, idx, env);
1575 collect_touched(&X->Follower, idx, env);
1577 for (Z = env->touched; Z != NULL; Z = N) {
1579 node_t *touched = Z->touched;
1580 unsigned n_touched = Z->n_touched;
1582 assert(Z->touched != NULL);
1584 /* beware, split might change Z */
1585 N = Z->touched_next;
1587 /* remove it from the touched set */
1590 /* Empty local Z.touched. */
1591 for (e = touched; e != NULL; e = e->next) {
1592 assert(e->is_follower == 0);
1598 if (0 < n_touched && n_touched < Z->n_leader) {
1599 DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
1600 split(&Z, touched, env);
1602 assert(n_touched <= Z->n_leader);
1608 * Implements split_by_what(): Split a partition by characteristics given
1609 * by the what function.
1611 * @param X the partition to split
1612 * @param What a function returning an Id for every node of the partition X
1613 * @param P a list to store the result partitions
1614 * @param env the environment
1618 static partition_t *split_by_what(partition_t *X, what_func What,
1619 partition_t **P, environment_t *env)
1623 listmap_entry_t *iter;
1626 /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
1628 list_for_each_entry(node_t, x, &X->Leader, node_list) {
1629 void *id = What(x, env);
1630 listmap_entry_t *entry;
1633 /* input not allowed, ignore */
1636 /* Add x to map[What(x)]. */
1637 entry = listmap_find(&map, id);
1638 x->next = entry->list;
1641 /* Let P be a set of Partitions. */
1643 /* for all sets S except one in the range of map do */
1644 for (iter = map.values; iter != NULL; iter = iter->next) {
1645 if (iter->next == NULL) {
1646 /* this is the last entry, ignore */
1651 /* Add SPLIT( X, S ) to P. */
1652 DB((dbg, LEVEL_2, "Split part%d by WHAT = %s\n", X->nr, what_reason));
1653 R = split(&X, S, env);
1665 /** lambda n.(n.type) */
1666 static void *lambda_type(const node_t *node, environment_t *env)
1669 return node->type.tv;
1672 /** lambda n.(n.opcode) */
1673 static void *lambda_opcode(const node_t *node, environment_t *env)
1675 opcode_key_t key, *entry;
1677 key.irn = node->node;
1679 entry = set_insert(opcode_key_t, env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
1683 /** lambda n.(n[i].partition) */
1684 static void *lambda_partition(const node_t *node, environment_t *env)
1686 ir_node *skipped = skip_Proj(node->node);
1689 int i = env->lambda_input;
1691 if (i >= get_irn_arity(node->node)) {
1693 * We are outside the allowed range: This can happen even
1694 * if we have split by opcode first: doing so might move Followers
1695 * to Leaders and those will have a different opcode!
1696 * Note that in this case the partition is on the cprop list and will be
1702 /* ignore the "control input" for non-pinned nodes
1703 if we are running in GCSE mode */
1704 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1707 pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
1708 p = get_irn_node(pred);
1712 /** lambda n.(n[i].partition) for commutative nodes */
1713 static void *lambda_commutative_partition(const node_t *node, environment_t *env)
1715 ir_node *irn = node->node;
1716 ir_node *skipped = skip_Proj(irn);
1717 ir_node *pred, *left, *right;
1719 partition_t *pl, *pr;
1720 int i = env->lambda_input;
1722 if (i >= get_irn_arity(node->node)) {
1724 * We are outside the allowed range: This can happen even
1725 * if we have split by opcode first: doing so might move Followers
1726 * to Leaders and those will have a different opcode!
1727 * Note that in this case the partition is on the cprop list and will be
1733 /* ignore the "control input" for non-pinned nodes
1734 if we are running in GCSE mode */
1735 if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
1739 pred = get_irn_n(skipped, i);
1740 p = get_irn_node(pred);
1744 if (is_op_commutative(get_irn_op(irn))) {
1745 /* normalize partition order by returning the "smaller" on input 0,
1746 the "bigger" on input 1. */
1747 left = get_binop_left(irn);
1748 pl = get_irn_node(left)->part;
1749 right = get_binop_right(irn);
1750 pr = get_irn_node(right)->part;
1753 return pl < pr ? pl : pr;
1755 return pl > pr ? pl : pr;
1757 /* a not split out Follower */
1758 pred = get_irn_n(irn, i);
1759 p = get_irn_node(pred);
1766 * Returns true if a type is a constant (and NOT Top
1769 static int is_con(const lattice_elem_t type)
1771 /* be conservative */
1772 if (is_tarval(type.tv))
1773 return tarval_is_constant(type.tv);
1774 return is_entity(type.sym.entity_p);
1778 * Implements split_by().
1780 * @param X the partition to split
1781 * @param env the environment
1783 static void split_by(partition_t *X, environment_t *env)
1785 partition_t *I, *P = NULL;
1788 dump_partition("split_by", X);
1790 if (X->n_leader == 1) {
1791 /* we have only one leader, no need to split, just check its type */
1792 node_t *x = get_first_node(X);
1793 X->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1797 DEBUG_ONLY(what_reason = "lambda n.(n.type)";)
1798 P = split_by_what(X, lambda_type, &P, env);
1801 /* adjust the type tags, we have split partitions by type */
1802 for (I = P; I != NULL; I = I->split_next) {
1803 node_t *x = get_first_node(I);
1804 I->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
1811 if (Y->n_leader > 1) {
1812 /* we do not want split the TOP or constant partitions */
1813 if (! Y->type_is_T_or_C) {
1814 partition_t *Q = NULL;
1816 DEBUG_ONLY(what_reason = "lambda n.(n.opcode)";)
1817 Q = split_by_what(Y, lambda_opcode, &Q, env);
1824 if (Z->n_leader > 1) {
1825 const node_t *first = get_first_node(Z);
1826 int arity = get_irn_arity(first->node);
1828 what_func what = lambda_partition;
1829 DEBUG_ONLY(char buf[64];)
1831 if (env->commutative && is_op_commutative(get_irn_op(first->node)))
1832 what = lambda_commutative_partition;
1835 * BEWARE: during splitting by input 2 for instance we might
1836 * create new partitions which are different by input 1, so collect
1837 * them and split further.
1839 Z->split_next = NULL;
1842 for (input = arity - 1; input >= -1; --input) {
1844 partition_t *Z_prime = R;
1847 if (Z_prime->n_leader > 1) {
1848 env->lambda_input = input;
1849 DEBUG_ONLY(snprintf(buf, sizeof(buf), "lambda n.(n[%d].partition)", input);)
1850 DEBUG_ONLY(what_reason = buf;)
1851 S = split_by_what(Z_prime, what, &S, env);
1854 Z_prime->split_next = S;
1857 } while (R != NULL);
1862 } while (Q != NULL);
1865 } while (P != NULL);
1869 * (Re-)compute the type for a given node.
1871 * @param node the node
1873 static void default_compute(node_t *node)
1876 ir_node *irn = node->node;
1878 /* if any of the data inputs have type top, the result is type top */
1879 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
1880 ir_node *pred = get_irn_n(irn, i);
1881 node_t *p = get_irn_node(pred);
1883 if (p->type.tv == tarval_top) {
1884 node->type.tv = tarval_top;
1889 if (get_irn_mode(node->node) == mode_X)
1890 node->type.tv = tarval_reachable;
1892 node->type.tv = computed_value(irn);
1896 * (Re-)compute the type for a Block node.
1898 * @param node the node
1900 static void compute_Block(node_t *node)
1903 ir_node *block = node->node;
1905 ir_graph *const irg = get_Block_irg(block);
1906 if (block == get_irg_start_block(irg) || get_Block_entity(block) != NULL) {
1907 /* start block and labelled blocks are always reachable */
1908 node->type.tv = tarval_reachable;
1912 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1913 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
1915 if (pred->type.tv == tarval_reachable) {
1916 /* A block is reachable, if at least of predecessor is reachable. */
1917 node->type.tv = tarval_reachable;
1921 node->type.tv = tarval_top;
1925 * (Re-)compute the type for a Bad node.
1927 * @param node the node
1929 static void compute_Bad(node_t *node)
1931 /* Bad nodes ALWAYS compute Top */
1932 node->type.tv = tarval_top;
1936 * (Re-)compute the type for an Unknown node.
1938 * @param node the node
1940 static void compute_Unknown(node_t *node)
1942 /* While Unknown nodes should compute Top this is dangerous:
1943 * a Top input to a Cond would lead to BOTH control flows unreachable.
1944 * While this is correct in the given semantics, it would destroy the Firm
1947 * It would be safe to compute Top IF it can be assured, that only Cmp
1948 * nodes are inputs to Conds. We check that first.
1949 * This is the way Frontends typically build Firm, but some optimizations
1950 * (jump threading for instance) might replace them by Phib's...
1952 node->type.tv = tarval_UNKNOWN;
1956 * (Re-)compute the type for a Jmp node.
1958 * @param node the node
1960 static void compute_Jmp(node_t *node)
1962 node_t *block = get_irn_node(get_nodes_block(node->node));
1964 node->type = block->type;
1968 * (Re-)compute the type for the Return node.
1970 * @param node the node
1972 static void compute_Return(node_t *node)
1974 /* The Return node is NOT dead if it is in a reachable block.
1975 * This is already checked in compute(). so we can return
1976 * Reachable here. */
1977 node->type.tv = tarval_reachable;
1981 * (Re-)compute the type for the End node.
1983 * @param node the node
1985 static void compute_End(node_t *node)
1987 /* the End node is NOT dead of course */
1988 node->type.tv = tarval_reachable;
1992 * (Re-)compute the type for a Call.
1994 * @param node the node
1996 static void compute_Call(node_t *node)
1999 * A Call computes always bottom, even if it has Unknown
2002 node->type.tv = tarval_bottom;
2006 * (Re-)compute the type for a SymConst node.
2008 * @param node the node
2010 static void compute_SymConst(node_t *node)
2012 ir_node *irn = node->node;
2013 node_t *block = get_irn_node(get_nodes_block(irn));
2015 if (block->type.tv == tarval_unreachable) {
2016 node->type.tv = tarval_top;
2019 switch (get_SymConst_kind(irn)) {
2020 case symconst_addr_ent:
2021 node->type.sym = get_SymConst_symbol(irn);
2024 node->type.tv = computed_value(irn);
2029 * (Re-)compute the type for a Phi node.
2031 * @param node the node
2033 static void compute_Phi(node_t *node)
2036 ir_node *phi = node->node;
2037 lattice_elem_t type;
2039 /* if a Phi is in a unreachable block, its type is TOP */
2040 node_t *block = get_irn_node(get_nodes_block(phi));
2042 if (block->type.tv == tarval_unreachable) {
2043 node->type.tv = tarval_top;
2047 /* Phi implements the Meet operation */
2048 type.tv = tarval_top;
2049 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
2050 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
2051 node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
2053 if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
2054 /* ignore TOP inputs: We must check here for unreachable blocks,
2055 because Firm constants live in the Start Block are NEVER Top.
2056 Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
2057 comes from a unreachable input. */
2060 if (pred->type.tv == tarval_bottom) {
2061 node->type.tv = tarval_bottom;
2063 } else if (type.tv == tarval_top) {
2064 /* first constant found */
2066 } else if (type.tv != pred->type.tv) {
2067 /* different constants or tarval_bottom */
2068 node->type.tv = tarval_bottom;
2071 /* else nothing, constants are the same */
2077 * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
2079 * @param node the node
2081 static void compute_Add(node_t *node)
2083 ir_node *sub = node->node;
2084 node_t *l = get_irn_node(get_Add_left(sub));
2085 node_t *r = get_irn_node(get_Add_right(sub));
2086 lattice_elem_t a = l->type;
2087 lattice_elem_t b = r->type;
2090 if (a.tv == tarval_top || b.tv == tarval_top) {
2091 node->type.tv = tarval_top;
2092 } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
2093 node->type.tv = tarval_bottom;
2095 /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
2096 must call tarval_add() first to handle this case! */
2097 if (is_tarval(a.tv)) {
2098 if (is_tarval(b.tv)) {
2099 node->type.tv = tarval_add(a.tv, b.tv);
2102 mode = get_tarval_mode(a.tv);
2103 if (a.tv == get_mode_null(mode)) {
2107 } else if (is_tarval(b.tv)) {
2108 mode = get_tarval_mode(b.tv);
2109 if (b.tv == get_mode_null(mode)) {
2114 node->type.tv = tarval_bottom;
2119 * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
2121 * @param node the node
2123 static void compute_Sub(node_t *node)
2125 ir_node *sub = node->node;
2126 node_t *l = get_irn_node(get_Sub_left(sub));
2127 node_t *r = get_irn_node(get_Sub_right(sub));
2128 lattice_elem_t a = l->type;
2129 lattice_elem_t b = r->type;
2132 if (a.tv == tarval_top || b.tv == tarval_top) {
2133 node->type.tv = tarval_top;
2134 } else if (is_con(a) && is_con(b)) {
2135 if (is_tarval(a.tv) && is_tarval(b.tv)) {
2136 node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
2137 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
2139 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
2142 node->type.tv = tarval_bottom;
2144 } else if (r->part == l->part &&
2145 (!mode_is_float(get_irn_mode(l->node)))) {
2147 * BEWARE: a - a is NOT always 0 for floating Point values, as
2148 * NaN op NaN = NaN, so we must check this here.
2150 ir_mode *mode = get_irn_mode(sub);
2151 tv = get_mode_null(mode);
2153 /* if the node was ONCE evaluated by all constants, but now
2154 this breaks AND we get from the argument partitions a different
2155 result, switch to bottom.
2156 This happens because initially all nodes are in the same partition ... */
2157 if (node->type.tv != tv)
2161 node->type.tv = tarval_bottom;
2166 * (Re-)compute the type for an Eor. Special case: both nodes are congruent.
2168 * @param node the node
2170 static void compute_Eor(node_t *node)
2172 ir_node *eor = node->node;
2173 node_t *l = get_irn_node(get_Eor_left(eor));
2174 node_t *r = get_irn_node(get_Eor_right(eor));
2175 lattice_elem_t a = l->type;
2176 lattice_elem_t b = r->type;
2179 if (a.tv == tarval_top || b.tv == tarval_top) {
2180 node->type.tv = tarval_top;
2181 } else if (is_con(a) && is_con(b)) {
2182 if (is_tarval(a.tv) && is_tarval(b.tv)) {
2183 node->type.tv = tarval_eor(a.tv, b.tv);
2184 } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
2186 } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
2189 node->type.tv = tarval_bottom;
2191 } else if (r->part == l->part) {
2192 ir_mode *mode = get_irn_mode(eor);
2193 tv = get_mode_null(mode);
2195 /* if the node was ONCE evaluated by all constants, but now
2196 this breaks AND we get from the argument partitions a different
2197 result, switch to bottom.
2198 This happens because initially all nodes are in the same partition ... */
2199 if (node->type.tv != tv)
2203 node->type.tv = tarval_bottom;
2208 * (Re-)compute the type for Cmp.
2210 * @param node the node
2212 static void compute_Cmp(node_t *node)
2214 ir_node *cmp = node->node;
2215 node_t *l = get_irn_node(get_Cmp_left(cmp));
2216 node_t *r = get_irn_node(get_Cmp_right(cmp));
2217 lattice_elem_t a = l->type;
2218 lattice_elem_t b = r->type;
2219 ir_relation relation = get_Cmp_relation(cmp);
2222 if (a.tv == tarval_top || b.tv == tarval_top) {
2223 node->type.tv = tarval_undefined;
2224 } else if (is_con(a) && is_con(b)) {
2225 default_compute(node);
2228 * BEWARE: a == a is NOT always True for floating Point values, as
2229 * NaN != NaN is defined, so we must check this here.
2230 * (while for some pnc we could still optimize we have to stay
2231 * consistent with compute_Cmp, so don't do anything for floats)
2233 } else if (r->part == l->part && !mode_is_float(get_irn_mode(l->node))) {
2234 tv = relation & ir_relation_equal ? tarval_b_true : tarval_b_false;
2236 /* if the node was ONCE evaluated to a constant, but now
2237 this breaks AND we get from the argument partitions a different
2238 result, ensure monotony by fall to bottom.
2239 This happens because initially all nodes are in the same partition ... */
2240 if (node->type.tv == tarval_bottom)
2242 else if (node->type.tv != tv && is_constant_type(node->type))
2246 node->type.tv = tarval_bottom;
2251 * (Re-)compute the type for a Proj(Cond).
2253 * @param node the node
2254 * @param cond the predecessor Cond node
2256 static void compute_Proj_Cond(node_t *node, ir_node *cond)
2258 ir_node *proj = node->node;
2259 long pnc = get_Proj_proj(proj);
2260 ir_node *sel = get_Cond_selector(cond);
2261 node_t *selector = get_irn_node(sel);
2264 * Note: it is crucial for the monotony that the Proj(Cond)
2265 * are evaluates after all predecessors of the Cond selector are
2271 * Due to the fact that 0 is a const, the Cmp gets immediately
2272 * on the cprop list. It will be evaluated before x is evaluated,
2273 * might leaving x as Top. When later x is evaluated, the Cmp
2274 * might change its value.
2275 * BUT if the Cond is evaluated before this happens, Proj(Cond, FALSE)
2276 * gets R, and later changed to F if Cmp is evaluated to True!
2278 * We prevent this by putting Conds in an extra cprop_X queue, which
2279 * gets evaluated after the cprop queue is empty.
2281 * Note that this even happens with Click's original algorithm, if
2282 * Cmp(x, 0) is evaluated to True first and later changed to False
2283 * if x was Top first and later changed to a Const ...
2284 * It is unclear how Click solved that problem ...
2286 * However, in rare cases even this does not help, if a Top reaches
2287 * a compare through a Phi, than Proj(Cond) is evaluated changing
2288 * the type of the Phi to something other.
2289 * So, we take the last resort and bind the type to R once
2292 * (This might be even the way Click works around the whole problem).
2294 * Finally, we may miss some optimization possibilities due to this:
2299 * If Top reaches the if first, than we decide for != here.
2300 * If y later is evaluated to 0, we cannot revert this decision
2301 * and must live with both outputs enabled. If this happens,
2302 * we get an unresolved if (true) in the code ...
2304 * In Click's version where this decision is done at the Cmp,
2305 * the Cmp is NOT optimized away than (if y evaluated to 1
2306 * for instance) and we get a if (1 == 0) here ...
2308 * Both solutions are suboptimal.
2309 * At least, we could easily detect this problem and run
2310 * cf_opt() (or even combo) again :-(
2312 if (node->type.tv == tarval_reachable)
2315 if (pnc == pn_Cond_true) {
2316 if (selector->type.tv == tarval_b_false) {
2317 node->type.tv = tarval_unreachable;
2318 } else if (selector->type.tv == tarval_b_true) {
2319 node->type.tv = tarval_reachable;
2320 } else if (selector->type.tv == tarval_bottom) {
2321 node->type.tv = tarval_reachable;
2323 assert(selector->type.tv == tarval_top);
2324 if (tarval_UNKNOWN == tarval_top) {
2325 /* any condition based on Top is "!=" */
2326 node->type.tv = tarval_unreachable;
2328 node->type.tv = tarval_unreachable;
2332 assert(pnc == pn_Cond_false);
2334 if (selector->type.tv == tarval_b_false) {
2335 node->type.tv = tarval_reachable;
2336 } else if (selector->type.tv == tarval_b_true) {
2337 node->type.tv = tarval_unreachable;
2338 } else if (selector->type.tv == tarval_bottom) {
2339 node->type.tv = tarval_reachable;
2341 assert(selector->type.tv == tarval_top);
2342 if (tarval_UNKNOWN == tarval_top) {
2343 /* any condition based on Top is "!=" */
2344 node->type.tv = tarval_reachable;
2346 node->type.tv = tarval_unreachable;
2352 static void compute_Proj_Switch(node_t *node, ir_node *switchn)
2354 ir_node *proj = node->node;
2355 long pnc = get_Proj_proj(proj);
2356 ir_node *sel = get_Switch_selector(switchn);
2357 node_t *selector = get_irn_node(sel);
2359 /* see long comment in compute_Proj_Cond */
2360 if (node->type.tv == tarval_reachable)
2363 if (selector->type.tv == tarval_bottom) {
2364 node->type.tv = tarval_reachable;
2365 } else if (selector->type.tv == tarval_top) {
2366 if (tarval_UNKNOWN == tarval_top && pnc == pn_Switch_default) {
2367 /* a switch based of Top is always "default" */
2368 node->type.tv = tarval_reachable;
2370 node->type.tv = tarval_unreachable;
2373 long value = get_tarval_long(selector->type.tv);
2374 const ir_switch_table *table = get_Switch_table(switchn);
2375 size_t n_entries = ir_switch_table_get_n_entries(table);
2378 for (e = 0; e < n_entries; ++e) {
2379 const ir_switch_table_entry *entry
2380 = ir_switch_table_get_entry_const(table, e);
2381 ir_tarval *min = entry->min;
2382 ir_tarval *max = entry->max;
2384 if (selector->type.tv == min) {
2385 node->type.tv = entry->pn == pnc
2386 ? tarval_reachable : tarval_unreachable;
2390 long minval = get_tarval_long(min);
2391 long maxval = get_tarval_long(max);
2392 if (minval <= value && value <= maxval) {
2393 node->type.tv = entry->pn == pnc
2394 ? tarval_reachable : tarval_unreachable;
2400 /* no entry matched: default */
2402 = pnc == pn_Switch_default ? tarval_reachable : tarval_unreachable;
2407 * (Re-)compute the type for a Proj-Node.
2409 * @param node the node
2411 static void compute_Proj(node_t *node)
2413 ir_node *proj = node->node;
2414 ir_mode *mode = get_irn_mode(proj);
2415 node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
2416 ir_node *pred = get_Proj_pred(proj);
2418 if (block->type.tv == tarval_unreachable) {
2419 /* a Proj in a unreachable Block stay Top */
2420 node->type.tv = tarval_top;
2423 if (get_irn_node(pred)->type.tv == tarval_top && !is_Cond(pred) && !is_Switch(pred)) {
2424 /* if the predecessor is Top, its Proj follow */
2425 node->type.tv = tarval_top;
2429 if (mode == mode_M) {
2430 /* mode M is always bottom */
2431 node->type.tv = tarval_bottom;
2433 } else if (mode == mode_X) {
2434 /* handle mode_X nodes */
2435 switch (get_irn_opcode(pred)) {
2437 /* the Proj_X from the Start is always reachable.
2438 However this is already handled at the top. */
2439 node->type.tv = tarval_reachable;
2442 compute_Proj_Cond(node, pred);
2445 compute_Proj_Switch(node, pred);
2452 default_compute(node);
2456 * (Re-)compute the type for a Confirm.
2458 * @param node the node
2460 static void compute_Confirm(node_t *node)
2462 ir_node *confirm = node->node;
2463 node_t *pred = get_irn_node(get_Confirm_value(confirm));
2465 if (get_Confirm_relation(confirm) == ir_relation_equal) {
2466 node_t *bound = get_irn_node(get_Confirm_bound(confirm));
2468 if (is_con(bound->type)) {
2469 /* is equal to a constant */
2470 node->type = bound->type;
2474 /* a Confirm is a copy OR a Const */
2475 node->type = pred->type;
2479 * (Re-)compute the type for a given node.
2481 * @param node the node
2483 static void compute(node_t *node)
2485 ir_node *irn = node->node;
2488 #ifndef VERIFY_MONOTONE
2490 * Once a node reaches bottom, the type cannot fall further
2491 * in the lattice and we can stop computation.
2492 * Do not take this exit if the monotony verifier is
2493 * enabled to catch errors.
2495 if (node->type.tv == tarval_bottom)
2499 if (!is_Block(irn)) {
2500 /* for pinned nodes, check its control input */
2501 if (get_irn_pinned(skip_Proj(irn)) == op_pin_state_pinned) {
2502 node_t *block = get_irn_node(get_nodes_block(irn));
2504 if (block->type.tv == tarval_unreachable) {
2505 node->type.tv = tarval_top;
2511 func = (compute_func)node->node->op->ops.generic;
2517 * Identity functions: Note that one might think that identity() is just a
2518 * synonym for equivalent_node(). While this is true, we cannot use it for the algorithm
2519 * here, because it expects that the identity node is one of the inputs, which is NOT
2520 * always true for equivalent_node() which can handle (and does sometimes) DAGs.
2521 * So, we have our own implementation, which copies some parts of equivalent_node()
2525 * Calculates the Identity for Phi nodes
2527 static node_t *identity_Phi(node_t *node)
2529 ir_node *phi = node->node;
2530 ir_node *block = get_nodes_block(phi);
2531 node_t *n_part = NULL;
2534 for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
2535 node_t *pred_X = get_irn_node(get_Block_cfgpred(block, i));
2537 if (pred_X->type.tv == tarval_reachable) {
2538 node_t *pred = get_irn_node(get_Phi_pred(phi, i));
2542 else if (n_part->part != pred->part) {
2543 /* incongruent inputs, not a follower */
2548 /* if n_part is NULL here, all inputs path are dead, the Phi computes
2549 * tarval_top, is in the TOP partition and should NOT being split! */
2550 assert(n_part != NULL);
2555 * Calculates the Identity for commutative 0 neutral nodes.
2557 static node_t *identity_comm_zero_binop(node_t *node)
2559 ir_node *op = node->node;
2560 node_t *a = get_irn_node(get_binop_left(op));
2561 node_t *b = get_irn_node(get_binop_right(op));
2562 ir_mode *mode = get_irn_mode(op);
2565 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2566 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2569 /* node: no input should be tarval_top, else the binop would be also
2570 * Top and not being split. */
2571 zero = get_mode_null(mode);
2572 if (a->type.tv == zero)
2574 if (b->type.tv == zero)
2580 * Calculates the Identity for Shift nodes.
2582 static node_t *identity_shift(node_t *node)
2584 ir_node *op = node->node;
2585 node_t *b = get_irn_node(get_binop_right(op));
2586 ir_mode *mode = get_irn_mode(b->node);
2589 /* node: no input should be tarval_top, else the binop would be also
2590 * Top and not being split. */
2591 zero = get_mode_null(mode);
2592 if (b->type.tv == zero)
2593 return get_irn_node(get_binop_left(op));
2598 * Calculates the Identity for Mul nodes.
2600 static node_t *identity_Mul(node_t *node)
2602 ir_node *op = node->node;
2603 node_t *a = get_irn_node(get_Mul_left(op));
2604 node_t *b = get_irn_node(get_Mul_right(op));
2605 ir_mode *mode = get_irn_mode(op);
2608 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2609 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2612 /* node: no input should be tarval_top, else the binop would be also
2613 * Top and not being split. */
2614 one = get_mode_one(mode);
2615 if (a->type.tv == one)
2617 if (b->type.tv == one)
2623 * Calculates the Identity for Sub nodes.
2625 static node_t *identity_Sub(node_t *node)
2627 ir_node *sub = node->node;
2628 node_t *b = get_irn_node(get_Sub_right(sub));
2629 ir_mode *mode = get_irn_mode(sub);
2631 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2632 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2635 /* node: no input should be tarval_top, else the binop would be also
2636 * Top and not being split. */
2637 if (b->type.tv == get_mode_null(mode))
2638 return get_irn_node(get_Sub_left(sub));
2643 * Calculates the Identity for And nodes.
2645 static node_t *identity_And(node_t *node)
2647 ir_node *andnode = node->node;
2648 node_t *a = get_irn_node(get_And_left(andnode));
2649 node_t *b = get_irn_node(get_And_right(andnode));
2650 ir_tarval *neutral = get_mode_all_one(get_irn_mode(andnode));
2652 /* node: no input should be tarval_top, else the And would be also
2653 * Top and not being split. */
2654 if (a->type.tv == neutral)
2656 if (b->type.tv == neutral)
2662 * Calculates the Identity for Confirm nodes.
2664 static node_t *identity_Confirm(node_t *node)
2666 ir_node *confirm = node->node;
2668 /* a Confirm is always a Copy */
2669 return get_irn_node(get_Confirm_value(confirm));
2673 * Calculates the Identity for Mux nodes.
2675 static node_t *identity_Mux(node_t *node)
2677 ir_node *mux = node->node;
2678 node_t *t = get_irn_node(get_Mux_true(mux));
2679 node_t *f = get_irn_node(get_Mux_false(mux));
2682 if (t->part == f->part)
2685 /* for now, the 1-input identity is not supported */
2690 * Calculates the Identity for nodes.
2692 static node_t *identity(node_t *node)
2694 ir_node *irn = node->node;
2696 switch (get_irn_opcode(irn)) {
2698 return identity_Phi(node);
2700 return identity_Mul(node);
2704 return identity_comm_zero_binop(node);
2709 return identity_shift(node);
2711 return identity_And(node);
2713 return identity_Sub(node);
2715 return identity_Confirm(node);
2717 return identity_Mux(node);
2724 * Node follower is a (new) follower of leader, segregate Leader
2727 static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader)
2729 DB((dbg, LEVEL_2, "%+F is a follower of %+F\n", follower, leader->node));
2730 /* The leader edges must remain sorted, but follower edges can
2732 ir_node *l = leader->node;
2733 unsigned n = get_irn_n_outs(l);
2734 for (unsigned i = leader->n_followers; i < n; ++i) {
2735 if (l->o.out->edges[i].use == follower) {
2736 ir_def_use_edge t = l->o.out->edges[i];
2738 for (unsigned j = i; j-- > leader->n_followers; )
2739 l->o.out->edges[j+1] = l->o.out->edges[j];
2740 l->o.out->edges[leader->n_followers] = t;
2741 ++leader->n_followers;
2748 * Node follower is a (new) follower segregate its Leader
2751 * @param follower the follower IR node
2753 static void segregate_def_use_chain(const ir_node *follower)
2757 for (i = get_irn_arity(follower) - 1; i >= 0; --i) {
2758 node_t *pred = get_irn_node(get_irn_n(follower, i));
2760 segregate_def_use_chain_1(follower, pred);
2765 * Propagate constant evaluation.
2767 * @param env the environment
2769 static void propagate(environment_t *env)
2773 lattice_elem_t old_type;
2775 unsigned n_fallen, old_type_was_T_or_C;
2777 while (env->cprop != NULL) {
2778 void *oldopcode = NULL;
2780 /* remove the first partition X from cprop */
2783 env->cprop = X->cprop_next;
2785 old_type_was_T_or_C = X->type_is_T_or_C;
2787 DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
2791 int cprop_empty = list_empty(&X->cprop);
2792 int cprop_X_empty = list_empty(&X->cprop_X);
2794 if (cprop_empty && cprop_X_empty) {
2795 /* both cprop lists are empty */
2799 /* remove the first Node x from X.cprop */
2801 /* Get a node from the cprop_X list only if
2802 * all data nodes are processed.
2803 * This ensures, that all inputs of the Cond
2804 * predecessor are processed if its type is still Top.
2806 x = list_entry(X->cprop_X.next, node_t, cprop_list);
2808 x = list_entry(X->cprop.next, node_t, cprop_list);
2811 //assert(x->part == X);
2812 list_del(&x->cprop_list);
2815 if (x->is_follower && identity(x) == x) {
2816 /* check the opcode first */
2817 if (oldopcode == NULL) {
2818 oldopcode = lambda_opcode(get_first_node(X), env);
2820 if (oldopcode != lambda_opcode(x, env)) {
2821 if (x->on_fallen == 0) {
2822 /* different opcode -> x falls out of this partition */
2827 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2831 /* x will make the follower -> leader transition */
2832 follower_to_leader(x);
2834 /* In case of a follower -> leader transition of a Phi node
2835 * we have to ensure that the current partition will be split
2836 * by lambda n.(n[i].partition).
2838 * This split may already happened before when some predecessors
2839 * of the Phi's Block are unreachable. Thus, we have to put the
2840 * current partition in the worklist to repeat the check.
2842 if (is_Phi(x->node) && ! x->part->on_worklist)
2843 add_to_worklist(x->part, env);
2846 /* compute a new type for x */
2848 DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
2850 if (x->type.tv != old_type.tv) {
2851 DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
2852 verify_type(old_type, x);
2854 if (x->on_fallen == 0) {
2855 /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
2856 not already on the list. */
2861 DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
2863 for (unsigned i = get_irn_n_outs(x->node); i-- > 0; ) {
2864 ir_node *succ = get_irn_out(x->node, i);
2865 node_t *y = get_irn_node(succ);
2867 /* Add y to y.partition.cprop. */
2868 add_to_cprop(y, env);
2873 if (n_fallen > 0 && n_fallen != X->n_leader) {
2874 DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
2875 Y = split(&X, fallen, env);
2877 * We have split out fallen node. The type of the result
2878 * partition is NOT set yet.
2880 Y->type_is_T_or_C = 0;
2884 /* remove the flags from the fallen list */
2885 for (x = fallen; x != NULL; x = x->next)
2888 if (old_type_was_T_or_C) {
2889 /* check if some nodes will make the leader -> follower transition */
2890 list_for_each_entry_safe(node_t, y, tmp, &Y->Leader, node_list) {
2891 if (y->type.tv != tarval_top && ! is_con(y->type)) {
2892 node_t *eq_node = identity(y);
2894 if (eq_node != y && eq_node->part == y->part) {
2895 DB((dbg, LEVEL_2, "Node %+F is a follower of %+F\n", y->node, eq_node->node));
2896 /* move to Follower */
2898 list_del(&y->node_list);
2899 list_add_tail(&y->node_list, &Y->Follower);
2902 segregate_def_use_chain(y->node);
2912 * Get the leader for a given node from its congruence class.
2914 * @param irn the node
2916 static ir_node *get_leader(node_t *node)
2918 partition_t *part = node->part;
2920 if (part->n_leader > 1 || node->is_follower) {
2921 if (node->is_follower) {
2922 DB((dbg, LEVEL_2, "Replacing follower %+F\n", node->node));
2925 DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
2927 return get_first_node(part)->node;
2933 * Returns non-zero if a mode_T node has only one reachable output.
2935 static int only_one_reachable_proj(ir_node *n)
2939 for (unsigned i = get_irn_n_outs(n); i-- > 0; ) {
2940 ir_node *proj = get_irn_out(n, i);
2943 /* skip non-control flow Proj's */
2944 if (get_irn_mode(proj) != mode_X)
2947 node = get_irn_node(proj);
2948 if (node->type.tv == tarval_reachable) {
2957 * Return non-zero if the control flow predecessor node pred
2958 * is the only reachable control flow exit of its block.
2960 * @param pred the control flow exit
2961 * @param block the destination block
2963 static int can_exchange(ir_node *pred, ir_node *block)
2965 if (is_Start(pred) || get_Block_entity(block) != NULL)
2967 else if (is_Jmp(pred))
2969 else if (is_Raise(pred)) {
2970 /* Raise is a tuple and usually has only one reachable ProjX,
2971 * but it must not be eliminated like a Jmp */
2974 else if (get_irn_mode(pred) == mode_T) {
2975 /* if the predecessor block has more than one
2976 reachable outputs we cannot remove the block */
2977 return only_one_reachable_proj(pred);
2983 * Block Post-Walker, apply the analysis results on control flow by
2984 * shortening Phi's and Block inputs.
2986 static void apply_cf(ir_node *block, void *ctx)
2988 environment_t *env = (environment_t*)ctx;
2989 node_t *node = get_irn_node(block);
2991 ir_node **ins, **in_X;
2992 ir_node *phi, *next;
2994 n = get_Block_n_cfgpreds(block);
2996 if (node->type.tv == tarval_unreachable) {
2999 for (i = n - 1; i >= 0; --i) {
3000 ir_node *pred = get_Block_cfgpred(block, i);
3002 if (! is_Bad(pred)) {
3003 ir_node *pred_block = get_nodes_block(skip_Proj(pred));
3004 if (!is_Bad(pred_block)) {
3005 node_t *pred_bl = get_irn_node(pred_block);
3007 if (pred_bl->flagged == 0) {
3008 pred_bl->flagged = 3;
3010 if (pred_bl->type.tv == tarval_reachable) {
3012 * We will remove an edge from block to its pred.
3013 * This might leave the pred block as an endless loop
3015 if (! is_backedge(block, i))
3016 keep_alive(pred_bl->node);
3023 ir_graph *const irg = get_Block_irg(block);
3024 if (block == get_irg_end_block(irg)) {
3025 /* Analysis found out that the end block is unreachable,
3026 * hence we remove all its control flow predecessors. */
3027 set_irn_in(block, 0, NULL);
3033 /* only one predecessor combine */
3034 ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
3036 if (can_exchange(pred, block)) {
3037 ir_node *new_block = get_nodes_block(pred);
3038 DB((dbg, LEVEL_1, "Fuse %+F with %+F\n", block, new_block));
3039 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
3040 exchange(block, new_block);
3041 node->node = new_block;
3047 NEW_ARR_A(ir_node *, in_X, n);
3049 for (i = 0; i < n; ++i) {
3050 ir_node *pred = get_Block_cfgpred(block, i);
3051 node_t *node = get_irn_node(pred);
3053 if (node->type.tv == tarval_reachable) {
3056 DB((dbg, LEVEL_1, "Removing dead input %d from %+F (%+F)\n", i, block, pred));
3057 if (! is_Bad(pred)) {
3058 ir_node *pred_block = get_nodes_block(skip_Proj(pred));
3059 if (!is_Bad(pred_block)) {
3060 node_t *pred_bl = get_irn_node(pred_block);
3062 if (!is_Bad(pred_bl->node) && pred_bl->flagged == 0) {
3063 pred_bl->flagged = 3;
3065 if (pred_bl->type.tv == tarval_reachable) {
3067 * We will remove an edge from block to its pred.
3068 * This might leave the pred block as an endless loop
3070 if (! is_backedge(block, i))
3071 keep_alive(pred_bl->node);
3082 NEW_ARR_A(ir_node *, ins, n);
3083 for (phi = get_Block_phis(block); phi != NULL; phi = next) {
3084 node_t *node = get_irn_node(phi);
3086 next = get_Phi_next(phi);
3087 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
3088 /* this Phi is replaced by a constant */
3089 ir_tarval *tv = node->type.tv;
3090 ir_node *c = new_r_Const(current_ir_graph, tv);
3092 set_irn_node(c, node);
3094 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
3095 DBG_OPT_COMBO(phi, c, FS_OPT_COMBO_CONST);
3100 for (i = 0; i < n; ++i) {
3101 node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
3103 if (pred->type.tv == tarval_reachable) {
3104 ins[j++] = get_Phi_pred(phi, i);
3108 /* this Phi is replaced by a single predecessor */
3109 ir_node *s = ins[0];
3110 node_t *phi_node = get_irn_node(phi);
3113 DB((dbg, LEVEL_1, "%+F is replaced by %+F because of cf change\n", phi, s));
3114 DBG_OPT_COMBO(phi, s, FS_OPT_COMBO_FOLLOWER);
3119 set_irn_in(phi, j, ins);
3127 /* this Block has only one live predecessor */
3128 ir_node *pred = skip_Proj(in_X[0]);
3130 if (can_exchange(pred, block)) {
3131 ir_node *new_block = get_nodes_block(pred);
3132 DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
3133 exchange(block, new_block);
3134 node->node = new_block;
3139 set_irn_in(block, k, in_X);
3144 * Exchange a node by its leader.
3145 * Beware: in rare cases the mode might be wrong here, for instance
3146 * AddP(x, NULL) is a follower of x, but with different mode.
3149 static void exchange_leader(ir_node *irn, ir_node *leader)
3151 ir_mode *mode = get_irn_mode(irn);
3152 if (mode != get_irn_mode(leader)) {
3153 /* The conv is a no-op, so we are free to place it
3154 * either in the block of the leader OR in irn's block.
3155 * Probably placing it into leaders block might reduce
3156 * the number of Conv due to CSE. */
3157 ir_node *block = get_nodes_block(leader);
3158 dbg_info *dbg = get_irn_dbg_info(irn);
3159 ir_node *nlead = new_rd_Conv(dbg, block, leader, mode);
3161 if (nlead != leader) {
3162 /* Note: this newly create irn has no node info because
3163 * it is created after the analysis. However, this node
3164 * replaces the node irn and should not be visited again,
3165 * so set its visited count to the count of irn.
3166 * Otherwise we might visited this node more than once if
3167 * irn had more than one user.
3169 set_irn_node(nlead, NULL);
3170 set_irn_visited(nlead, get_irn_visited(irn));
3174 exchange(irn, leader);
3178 * Check, if all users of a mode_M node are dead. Use
3179 * the Def-Use edges for this purpose, as they still
3180 * reflect the situation.
3182 static int all_users_are_dead(const ir_node *irn)
3184 unsigned n = get_irn_n_outs(irn);
3185 for (unsigned i = 0; i < n; ++i) {
3186 const ir_node *succ = get_irn_out(irn, i);
3187 const node_t *block = get_irn_node(get_nodes_block(succ));
3190 if (block->type.tv == tarval_unreachable) {
3191 /* block is unreachable */
3194 node = get_irn_node(succ);
3195 if (node->type.tv != tarval_top) {
3196 /* found a reachable user */
3200 /* all users are unreachable */
3205 * Walker: Find reachable mode_M nodes that have only
3206 * unreachable users. These nodes must be kept later.
3208 static void find_kept_memory(ir_node *irn, void *ctx)
3210 environment_t *env = (environment_t*)ctx;
3211 node_t *node, *block;
3213 if (get_irn_mode(irn) != mode_M)
3216 block = get_irn_node(get_nodes_block(irn));
3217 if (block->type.tv == tarval_unreachable)
3220 node = get_irn_node(irn);
3221 if (node->type.tv == tarval_top)
3224 /* ok, we found a live memory node. */
3225 if (all_users_are_dead(irn)) {
3226 DB((dbg, LEVEL_1, "%+F must be kept\n", irn));
3227 ARR_APP1(ir_node *, env->kept_memory, irn);
3232 * Post-Walker, apply the analysis results;
3234 static void apply_result(ir_node *irn, void *ctx)
3236 environment_t *env = (environment_t*)ctx;
3237 node_t *node = get_irn_node(irn);
3239 if (is_Block(irn) || is_End(irn) || is_Bad(irn)) {
3240 /* blocks already handled, do not touch the End node */
3242 node_t *block = get_irn_node(get_nodes_block(irn));
3244 if (block->type.tv == tarval_unreachable) {
3245 ir_graph *irg = get_irn_irg(irn);
3246 ir_mode *mode = get_irn_mode(node->node);
3247 ir_node *bad = new_r_Bad(irg, mode);
3249 /* here, bad might already have a node, but this can be safely ignored
3250 as long as bad has at least ONE valid node */
3251 set_irn_node(bad, node);
3253 DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
3256 } else if (node->type.tv == tarval_top) {
3257 ir_mode *mode = get_irn_mode(irn);
3259 if (mode == mode_M) {
3260 /* never kill a mode_M node */
3262 ir_node *pred = get_Proj_pred(irn);
3263 node_t *pnode = get_irn_node(pred);
3265 if (pnode->type.tv == tarval_top) {
3266 /* skip the predecessor */
3267 ir_node *mem = get_memop_mem(pred);
3269 DB((dbg, LEVEL_1, "%+F computes Top, replaced by %+F\n", irn, mem));
3274 /* leave other nodes, especially PhiM */
3275 } else if (mode == mode_T) {
3276 /* Do not kill mode_T nodes, kill their Projs */
3277 } else if (! is_Unknown(irn)) {
3278 /* don't kick away Unknown's, they might be still needed */
3279 ir_node *unk = new_r_Unknown(current_ir_graph, mode);
3281 /* control flow should already be handled at apply_cf() */
3282 assert(mode != mode_X);
3284 /* see comment above */
3285 set_irn_node(unk, node);
3287 DB((dbg, LEVEL_1, "%+F computes Top\n", irn));
3292 else if (get_irn_mode(irn) == mode_X) {
3295 ir_node *cond = get_Proj_pred(irn);
3297 if (is_Cond(cond) || is_Switch(cond)) {
3298 if (only_one_reachable_proj(cond)) {
3299 ir_node *jmp = new_r_Jmp(block->node);
3300 set_irn_node(jmp, node);
3302 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
3303 DBG_OPT_COMBO(irn, jmp, FS_OPT_COMBO_CF);
3307 if (is_Switch(cond)) {
3308 node_t *sel = get_irn_node(get_Switch_selector(cond));
3309 ir_tarval *tv = sel->type.tv;
3311 if (is_tarval(tv) && tarval_is_constant(tv)) {
3312 /* The selector is a constant, but more
3313 * than one output is active: An unoptimized
3322 /* normal data node */
3323 if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
3324 ir_tarval *tv = node->type.tv;
3327 * Beware: never replace mode_T nodes by constants. Currently we must mark
3328 * mode_T nodes with constants, but do NOT replace them.
3330 if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
3331 /* can be replaced by a constant */
3332 ir_node *c = new_r_Const(current_ir_graph, tv);
3333 set_irn_node(c, node);
3335 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
3336 DBG_OPT_COMBO(irn, c, FS_OPT_COMBO_CONST);
3337 exchange_leader(irn, c);
3340 } else if (is_entity(node->type.sym.entity_p)) {
3341 if (! is_SymConst(irn)) {
3342 /* can be replaced by a SymConst */
3343 ir_node *symc = new_r_SymConst(current_ir_graph, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
3344 set_irn_node(symc, node);
3347 DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
3348 DBG_OPT_COMBO(irn, symc, FS_OPT_COMBO_CONST);
3349 exchange_leader(irn, symc);
3352 } else if (is_Confirm(irn)) {
3353 /* Confirms are always follower, but do not kill them here */
3355 ir_node *leader = get_leader(node);
3357 if (leader != irn) {
3358 int non_strict_phi = 0;
3361 * Beware: Do not remove Phi(Unknown, ..., x, ..., Unknown)
3362 * as this might create non-strict programs.
3364 if (node->is_follower && is_Phi(irn) && !is_Unknown(leader)) {
3367 for (i = get_Phi_n_preds(irn) - 1; i >= 0; --i) {
3368 ir_node *pred = get_Phi_pred(irn, i);
3370 if (is_Unknown(pred)) {
3376 if (! non_strict_phi) {
3377 DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
3378 if (node->is_follower)
3379 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_FOLLOWER);
3381 DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_CONGRUENT);
3382 exchange_leader(irn, leader);
3392 * Fix the keep-alives by deleting unreachable ones.
3394 static void apply_end(ir_node *end, environment_t *env)
3396 int i, j, n = get_End_n_keepalives(end);
3397 ir_node **in = NULL;
3400 NEW_ARR_A(ir_node *, in, n);
3402 /* fix the keep alive */
3403 for (i = j = 0; i < n; i++) {
3404 ir_node *ka = get_End_keepalive(end, i);
3410 if (!is_Block(ka)) {
3411 block = get_nodes_block(ka);
3418 node = get_irn_node(block);
3419 if (node->type.tv != tarval_unreachable)
3423 set_End_keepalives(end, j, in);
3428 #define SET(code) op_##code->ops.generic = (op_func)compute_##code
3431 * sets the generic functions to compute.
3433 static void set_compute_functions(void)
3437 /* set the default compute function */
3438 for (i = 0, n = ir_get_n_opcodes(); i < n; ++i) {
3439 ir_op *op = ir_get_opcode(i);
3440 op->ops.generic = (op_func)default_compute;
3443 /* set specific functions */
3464 static void add_memory_keeps(ir_node **kept_memory, size_t len)
3466 ir_node *end = get_irg_end(current_ir_graph);
3471 ir_nodeset_init(&set);
3473 /* check, if those nodes are already kept */
3474 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i)
3475 ir_nodeset_insert(&set, get_End_keepalive(end, i));
3477 for (idx = 0; idx < len; ++idx) {
3478 ir_node *ka = kept_memory[idx];
3480 if (! ir_nodeset_contains(&set, ka)) {
3481 add_End_keepalive(end, ka);
3484 ir_nodeset_destroy(&set);
3487 void combo(ir_graph *irg)
3490 ir_node *initial_bl;
3492 ir_graph *rem = current_ir_graph;
3495 assure_irg_properties(irg,
3496 IR_GRAPH_PROPERTY_NO_BADS
3497 | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
3498 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
3500 current_ir_graph = irg;
3502 /* register a debug mask */
3503 FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
3505 DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
3507 obstack_init(&env.obst);
3508 env.worklist = NULL;
3512 #ifdef DEBUG_libfirm
3513 env.dbg_list = NULL;
3515 env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
3516 env.kept_memory = NEW_ARR_F(ir_node *, 0);
3517 env.end_idx = get_opt_global_cse() ? 0 : -1;
3518 env.lambda_input = 0;
3521 /* options driving the optimization */
3522 env.commutative = 1;
3523 env.opt_unknown = 1;
3525 /* we have our own value_of function */
3526 set_value_of_func(get_node_tarval);
3528 set_compute_functions();
3529 DEBUG_ONLY(part_nr = 0;)
3531 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
3533 if (env.opt_unknown)
3534 tarval_UNKNOWN = tarval_top;
3536 tarval_UNKNOWN = tarval_bad;
3538 /* create the initial partition and place it on the work list */
3539 env.initial = new_partition(&env);
3540 add_to_worklist(env.initial, &env);
3541 irg_walk_graph(irg, create_initial_partitions, init_block_phis, &env);
3543 /* set the hook: from now, every node has a partition and a type */
3544 DEBUG_ONLY(set_dump_node_vcgattr_hook(dump_partition_hook);)
3546 /* all nodes on the initial partition have type Top */
3547 env.initial->type_is_T_or_C = 1;
3549 /* Place the START Node's partition on cprop.
3550 Place the START Node on its local worklist. */
3551 initial_bl = get_irg_start_block(irg);
3552 start = get_irn_node(initial_bl);
3553 add_to_cprop(start, &env);
3557 if (env.worklist != NULL)
3559 } while (env.cprop != NULL || env.worklist != NULL);
3561 dump_all_partitions(&env);
3562 check_all_partitions(&env);
3564 /* apply the result */
3566 /* check, which nodes must be kept */
3567 irg_walk_graph(irg, NULL, find_kept_memory, &env);
3569 /* kill unreachable control flow */
3570 irg_block_walk_graph(irg, NULL, apply_cf, &env);
3571 /* Kill keep-alives of dead blocks: this speeds up apply_result()
3572 * and fixes assertion because dead cf to dead blocks is NOT removed by
3574 apply_end(get_irg_end(irg), &env);
3575 irg_walk_graph(irg, NULL, apply_result, &env);
3577 len = ARR_LEN(env.kept_memory);
3579 add_memory_keeps(env.kept_memory, len);
3582 DB((dbg, LEVEL_1, "Unoptimized Control Flow left"));
3585 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
3587 /* remove the partition hook */
3588 DEBUG_ONLY(set_dump_node_vcgattr_hook(NULL);)
3590 DEL_ARR_F(env.kept_memory);
3591 del_set(env.opcode2id_map);
3592 obstack_free(&env.obst, NULL);
3594 /* restore value_of() default behavior */
3595 set_value_of_func(NULL);
3596 current_ir_graph = rem;
3598 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
3601 /* Creates an ir_graph pass for combo. */
3602 ir_graph_pass_t *combo_pass(const char *name)
3604 return def_graph_pass(name ? name : "combo", combo);