* @author Michael Beck
* @version $Id$
*
+ * This is a slightly enhanced version of Cliff Clicks combo algorithm
+ * - support for commutative nodes is added, Add(a,b) and Add(b,a) ARE congruent
+ * - supports all Firm direct (by a data edge) identities except Mux
+ * (Mux can be a 2-input or 1-input identity, only 2-input is implemented yet)
+ * - supports Confirm nodes (handle them like Copies but do NOT remove them)
+ * - let Cmp nodes calculate Top like all othe data nodes: this would let
+ * Mux nodes to calculate Unknown instead of taking the true result
+ * - let Cond(Top) always select FALSE/default: This is tricky. Nodes are only reavaluated
+ * IFF the predecessor changed its type. Because nodes are initialized with Top
+ * this never happens, let all Proj(Cond) be unreachable.
+ * We avoid this condition by the same way we work around Phi: whenever a Block
+ * node is placed on the list, place its Cond nodes (and because they are Tuple
+ * all its Proj-nodes either on the cprop list)
+ * Especially, this changes the meaning of Click's example:
+ *
+ * int main() {
+ * int x;
+ *
+ * if (x == 2)
+ * printf("x == 2\n");
+ * if (x == 3)
+ * printf("x == 3\n");
+ * }
+ *
+ * Would print:
+ * x == 2
+ * x == 3
+ *
+ * using Click's version while is silent with our.
+ * - support for global congruences is implemented but not tested yet
+ *
* Note further that we use the terminology from Click's work here, which is different
* in some cases from Firm terminology. Especially, Click's type is a
* Firm tarval/entity, nevertheless we call it type here for "maximum compatibility".
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include <assert.h>
#include "iroptimize.h"
-#include "archop.h"
#include "irflag.h"
#include "ircons.h"
#include "list.h"
#include "irop.h"
#include "irouts.h"
#include "irgmod.h"
+#include "iropt_dbg.h"
#include "debug.h"
+#include "array_t.h"
#include "error.h"
+#include "irnodeset.h"
#include "tv_t.h"
/* define this to check the consistency of partitions */
#define CHECK_PARTITIONS
-/* define this to disable followers (may be buggy) */
-#undef NO_FOLLOWER
-
typedef struct node_t node_t;
typedef struct partition_t partition_t;
typedef struct opcode_key_t opcode_key_t;
union {
long proj; /**< For Proj nodes, its proj number */
ir_entity *ent; /**< For Sel Nodes, its entity */
+ int intVal; /**< For Conv/Div Nodes: strict/remainderless */
} u;
};
unsigned on_cprop:1; /**< Set, if this node is on the partition.cprop list. */
unsigned on_fallen:1; /**< Set, if this node is on the fallen list. */
unsigned is_follower:1; /**< Set, if this node is a follower. */
- unsigned is_flagged:1; /**< Set, if this node was visited by the race. */
- unsigned by_all_const:1; /**< Set, if this node was once evaluated by all constants. */
+ unsigned flagged:2; /**< 2 Bits, set if this node was visited by race 1 or 2. */
};
/**
list_head Leader; /**< The head of partition Leader node list. */
list_head Follower; /**< The head of partition Follower node list. */
list_head cprop; /**< The head of partition.cprop list. */
+ list_head cprop_X; /**< The head of partition.cprop (Cond nodes and its Projs) list. */
partition_t *wl_next; /**< Next entry in the work list if any. */
partition_t *touched_next; /**< Points to the next partition in the touched set. */
partition_t *cprop_next; /**< Points to the next partition in the cprop list. */
partition_t *initial; /**< The initial partition. */
set *opcode2id_map; /**< The opcodeMode->id map. */
pmap *type2id_map; /**< The type->id map. */
+ ir_node **kept_memory; /**< Array of memory nodes that must be kept. */
int end_idx; /**< -1 for local and 0 for global congruences. */
int lambda_input; /**< Captured argument for lambda_partition(). */
- int modified; /**< Set, if the graph was modified. */
+ unsigned modified:1; /**< Set, if the graph was modified. */
+ unsigned unopt_cf:1; /**< If set, control flow is not optimized due to Unknown. */
+ /* options driving the optimization */
+ unsigned commutative:1; /**< Set, if commutation nodes should be handled specially. */
+ unsigned opt_unknown:1; /**< Set, if non-strict programs should be optimized. */
#ifdef DEBUG_libfirm
partition_t *dbg_list; /**< List of all partitions. */
#endif
/** Type of the what function. */
typedef void *(*what_func)(const node_t *node, environment_t *env);
-#define get_irn_node(follower) ((node_t *)get_irn_link(follower))
-#define set_irn_node(follower, node) set_irn_link(follower, node)
+#define get_irn_node(irn) ((node_t *)get_irn_link(irn))
+#define set_irn_node(irn, node) set_irn_link(irn, node)
/* we do NOT use tarval_unreachable here, instead we use Top for this purpose */
#undef tarval_unreachable
/** The debug module handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
+/** The what reason. */
+DEBUG_ONLY(static const char *what_reason;)
+
/** Next partition number. */
DEBUG_ONLY(static unsigned part_nr = 0);
+/** The tarval returned by Unknown nodes: set to either tarval_bad OR tarval_top. */
+static tarval *tarval_UNKNOWN;
+
/* forward */
static node_t *identity(node_t *node);
list_for_each_entry(node_t, node, &T->Leader, node_list) {
assert(node->is_follower == 0);
- assert(node->is_flagged == 0);
+ assert(node->flagged == 0);
assert(node->part == T);
++n;
}
list_for_each_entry(node_t, node, &T->Follower, node_list) {
assert(node->is_follower == 1);
- assert(node->is_flagged == 0);
+ assert(node->flagged == 0);
assert(node->part == T);
}
} /* check_partition */
+/**
+ * check that all leader nodes in the partition have the same opcode.
+ */
+static void check_opcode(const partition_t *Z) {
+ node_t *node;
+ opcode_key_t key;
+ int first = 1;
+
+ list_for_each_entry(node_t, node, &Z->Leader, node_list) {
+ ir_node *irn = node->node;
+
+ if (first) {
+ key.code = get_irn_opcode(irn);
+ key.mode = get_irn_mode(irn);
+ key.arity = get_irn_arity(irn);
+ key.u.proj = 0;
+ key.u.ent = NULL;
+
+ switch (get_irn_opcode(irn)) {
+ case iro_Proj:
+ key.u.proj = get_Proj_proj(irn);
+ break;
+ case iro_Sel:
+ key.u.ent = get_Sel_entity(irn);
+ break;
+ case iro_Conv:
+ key.u.intVal = get_Conv_strict(irn);
+ break;
+ case iro_Div:
+ key.u.intVal = is_Div_remainderless(irn);
+ break;
+ default:
+ break;
+ }
+ first = 0;
+ } else {
+ assert(key.code == get_irn_opcode(irn));
+ assert(key.mode == get_irn_mode(irn));
+ assert(key.arity == get_irn_arity(irn));
+
+ switch (get_irn_opcode(irn)) {
+ case iro_Proj:
+ assert(key.u.proj == get_Proj_proj(irn));
+ break;
+ case iro_Sel:
+ assert(key.u.ent == get_Sel_entity(irn));
+ break;
+ case iro_Conv:
+ assert(key.u.intVal == get_Conv_strict(irn));
+ break;
+ case iro_Div:
+ assert(key.u.intVal == is_Div_remainderless(irn));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+} /* check_opcode */
+
+static void check_all_partitions(environment_t *env) {
+#ifdef DEBUG_libfirm
+ partition_t *P;
+ node_t *node;
+
+ for (P = env->dbg_list; P != NULL; P = P->dbg_next) {
+ check_partition(P);
+ if (! P->type_is_T_or_C)
+ check_opcode(P);
+ list_for_each_entry(node_t, node, &P->Follower, node_list) {
+ node_t *leader = identity(node);
+
+ assert(leader != node && leader->part == node->part);
+ }
+ }
+#endif
+}
+
/**
* Check list.
*/
#else
#define check_partition(T)
#define check_list(list, Z)
+#define check_all_partitions(env)
#endif /* CHECK_PARTITIONS */
#ifdef DEBUG_libfirm
-static INLINE lattice_elem_t get_partition_type(const partition_t *X);
+static inline lattice_elem_t get_partition_type(const partition_t *X);
/**
* Dump partition to output.
DB((dbg, LEVEL_3, "\n}\n"));
#undef GET_LINK
-}
+} /* do_dump_list */
/**
* Dumps a race list.
*/
static void dump_race_list(const char *msg, const node_t *list) {
do_dump_list(msg, list, offsetof(node_t, race_next));
-}
+} /* dump_race_list */
/**
* Dumps a local list.
*/
static void dump_list(const char *msg, const node_t *list) {
do_dump_list(msg, list, offsetof(node_t, next));
-}
+} /* dump_list */
/**
* Dump all partitions.
DB((dbg, LEVEL_2, "All partitions\n===============\n"));
for (P = env->dbg_list; P != NULL; P = P->dbg_next)
dump_partition("", P);
-}
+} /* dump_all_partitions */
+
+/**
+ * Sump a split list.
+ */
+static void dump_split_list(const partition_t *list) {
+ const partition_t *p;
+
+ DB((dbg, LEVEL_2, "Split by %s produced = {\n", what_reason));
+ for (p = list; p != NULL; p = p->split_next)
+ DB((dbg, LEVEL_2, "part%u, ", p->nr));
+ DB((dbg, LEVEL_2, "\n}\n"));
+} /* dump_split_list */
+
+/**
+ * Dump partition and type for a node.
+ */
+static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
+ ir_node *irn = local != NULL ? local : n;
+ node_t *node = get_irn_node(irn);
+
+ ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
+ return 1;
+} /* dump_partition_hook */
#else
#define dump_partition(msg, part)
#define dump_race_list(msg, list)
#define dump_list(msg, list)
#define dump_all_partitions(env)
+#define dump_split_list(list)
#endif
#if defined(VERIFY_MONOTONE) && defined (DEBUG_libfirm)
/**
* Verify that a type transition is monotone
*/
-static void verify_type(const lattice_elem_t old_type, const lattice_elem_t new_type) {
- if (old_type.tv == new_type.tv) {
+static void verify_type(const lattice_elem_t old_type, node_t *node) {
+ if (old_type.tv == node->type.tv) {
/* no change */
return;
}
/* from Top down-to is always allowed */
return;
}
- if (old_type.tv == tarval_reachable) {
- panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
- }
- if (new_type.tv == tarval_bottom || new_type.tv == tarval_reachable) {
+ if (node->type.tv == tarval_bottom || node->type.tv == tarval_reachable) {
/* bottom reached */
return;
}
- panic("verify_type(): wrong translation from %+F to %+F", old_type, new_type);
-}
+ panic("combo: wrong translation from %+F to %+F on node %+F", old_type, node->type, node->node);
+} /* verify_type */
+
#else
-#define verify_type(old_type, new_type)
+#define verify_type(old_type, node)
#endif
/**
* @param map the listmap
* @param id the id to search for
*
- * @return the asociated listmap entry for the given id
+ * @return the associated listmap entry for the given id
*/
static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
listmap_entry_t key, *entry;
* @return a hash value for the given opcode map entry
*/
static unsigned opcode_hash(const opcode_key_t *entry) {
- return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent);
+ return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ent) + entry->arity;
} /* opcode_hash */
/**
(void) size;
return o1->code != o2->code || o1->mode != o2->mode ||
o1->arity != o2->arity ||
- o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent;
+ o1->u.proj != o2->u.proj || o1->u.ent != o2->u.ent ||
+ o1->u.intVal != o2->u.intVal;
} /* cmp_opcode */
/**
*
* @return the associated type of this node
*/
-static INLINE lattice_elem_t get_node_type(const ir_node *irn) {
+static inline lattice_elem_t get_node_type(const ir_node *irn) {
return get_irn_node(irn)->type;
} /* get_node_type */
*
* @return the associated type of this node
*/
-static INLINE tarval *get_node_tarval(const ir_node *irn) {
+static inline tarval *get_node_tarval(const ir_node *irn) {
lattice_elem_t type = get_node_type(irn);
if (is_tarval(type.tv))
/**
* Add a partition to the worklist.
*/
-static INLINE void add_to_worklist(partition_t *X, environment_t *env) {
+static inline void add_to_worklist(partition_t *X, environment_t *env) {
assert(X->on_worklist == 0);
+ DB((dbg, LEVEL_2, "Adding part%d to worklist\n", X->nr));
X->wl_next = env->worklist;
X->on_worklist = 1;
env->worklist = X;
*
* @return a newly allocated partition
*/
-static INLINE partition_t *new_partition(environment_t *env) {
+static inline partition_t *new_partition(environment_t *env) {
partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
INIT_LIST_HEAD(&part->Leader);
INIT_LIST_HEAD(&part->Follower);
INIT_LIST_HEAD(&part->cprop);
+ INIT_LIST_HEAD(&part->cprop_X);
part->wl_next = NULL;
part->touched_next = NULL;
part->cprop_next = NULL;
/**
* Get the first node from a partition.
*/
-static INLINE node_t *get_first_node(const partition_t *X) {
+static inline node_t *get_first_node(const partition_t *X) {
return list_entry(X->Leader.next, node_t, node_list);
} /* get_first_node */
*
* @return the type of the first element of the partition
*/
-static INLINE lattice_elem_t get_partition_type(const partition_t *X) {
+static inline lattice_elem_t get_partition_type(const partition_t *X) {
const node_t *first = get_first_node(X);
return first->type;
} /* get_partition_type */
node->on_cprop = 0;
node->on_fallen = 0;
node->is_follower = 0;
- node->by_all_const = 0;
- node->is_flagged = 0;
+ node->flagged = 0;
set_irn_node(irn, node);
list_add_tail(&node->node_list, &part->Leader);
} /* create_partition_node */
/**
- * Pre-Walker, init all Block-Phi lists.
- */
-static void init_block_phis(ir_node *irn, void *env) {
- (void) env;
-
- if (is_Block(irn)) {
- set_Block_phis(irn, NULL);
- }
-} /* init_block_phis */
-
-/**
- * Post-Walker, initialize all Nodes' type to U or top and place
+ * Pre-Walker, initialize all Nodes' type to U or top and place
* all nodes into the TOP partition.
*/
static void create_initial_partitions(ir_node *irn, void *ctx) {
if (node->max_user_input > part->max_user_inputs)
part->max_user_inputs = node->max_user_input;
+ if (is_Block(irn)) {
+ set_Block_phis(irn, NULL);
+ }
+} /* create_initial_partitions */
+
+/**
+ * Post-Walker, collect all Block-Phi lists, set Cond.
+ */
+static void init_block_phis(ir_node *irn, void *ctx) {
+ (void) ctx;
+
if (is_Phi(irn)) {
add_Block_phi(get_nodes_block(irn), irn);
}
-} /* create_initial_partitions */
+} /* init_block_phis */
/**
* Add a node to the entry.partition.touched set and
* @param y a node
* @param env the environment
*/
-static INLINE void add_to_touched(node_t *y, environment_t *env) {
+static inline void add_to_touched(node_t *y, environment_t *env) {
if (y->on_touched == 0) {
partition_t *part = y->part;
* @param env the environment
*/
static void add_to_cprop(node_t *y, environment_t *env) {
+ ir_node *irn;
+
/* Add y to y.partition.cprop. */
if (y->on_cprop == 0) {
partition_t *Y = y->part;
+ ir_node *irn = y->node;
- list_add_tail(&y->cprop_list, &Y->cprop);
+ /* place Conds and all its Projs on the cprop_X list */
+ if (is_Cond(skip_Proj(irn)))
+ list_add_tail(&y->cprop_list, &Y->cprop_X);
+ else
+ list_add_tail(&y->cprop_list, &Y->cprop);
y->on_cprop = 1;
DB((dbg, LEVEL_3, "Add %+F to part%u.cprop\n", y->node, Y->nr));
Y->on_cprop = 1;
}
}
- if (get_irn_mode(y->node) == mode_T) {
+ irn = y->node;
+ if (get_irn_mode(irn) == mode_T) {
/* mode_T nodes always produce tarval_bottom, so we must explicitly
add it's Proj's to get constant evaluation to work */
int i;
- for (i = get_irn_n_outs(y->node) - 1; i >= 0; --i) {
- node_t *proj = get_irn_node(get_irn_out(y->node, i));
+ for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+ node_t *proj = get_irn_node(get_irn_out(irn, i));
add_to_cprop(proj, env);
}
- } else if (is_Block(y->node)) {
+ } else if (is_Block(irn)) {
/* Due to the way we handle Phi's, we must place all Phis of a block on the list
* if someone placed the block. The Block is only placed if the reachability
* changes, and this must be re-evaluated in compute_Phi(). */
ir_node *phi;
- for (phi = get_Block_phis(y->node); phi != NULL; phi = get_Phi_next(phi)) {
+ for (phi = get_Block_phis(irn); phi != NULL; phi = get_Phi_next(phi)) {
node_t *p = get_irn_node(phi);
add_to_cprop(p, env);
}
return Z_prime;
} /* split_no_followers */
-#ifdef NO_FOLLOWER
-
-#define split(Z, g, env) split_no_followers(*(Z), g, env)
-
-#else
-
/**
* Make the Follower -> Leader transition for a node.
*
node_t *unwalked; /**< The unwalked node list. */
node_t *walked; /**< The walked node list. */
int index; /**< Next index of Follower use_def edge. */
+ unsigned side; /**< side number. */
} step_env;
/**
* @param input number of the input
*/
static int is_real_follower(const ir_node *irn, int input) {
- if (input == 1 && is_Confirm(irn)) {
- /* return the Confirm bound input */
- return 0;
+ node_t *pred;
+
+ switch (get_irn_opcode(irn)) {
+ case iro_Confirm:
+ if (input == 1) {
+ /* ignore the Confirm bound input */
+ return 0;
+ }
+ break;
+ case iro_Mux:
+ if (input == 0) {
+ /* ignore the Mux sel input */
+ return 0;
+ }
+ break;
+ case iro_Phi: {
+ /* dead inputs are not follower edges */
+ ir_node *block = get_nodes_block(irn);
+ node_t *pred = get_irn_node(get_Block_cfgpred(block, input));
+
+ if (pred->type.tv == tarval_unreachable)
+ return 0;
+ break;
}
- if (input == 0 && is_Mux(irn)) {
- /* ignore the Mux sel input */
+ case iro_Sub:
+ case iro_Shr:
+ case iro_Shl:
+ case iro_Shrs:
+ case iro_Rotl:
+ if (input == 1) {
+ /* only a Sub x,0 / Shift x,0 might be a follower */
+ return 0;
+ }
+ break;
+ case iro_Add:
+ case iro_Or:
+ case iro_Eor:
+ pred = get_irn_node(get_irn_n(irn, input));
+ if (is_tarval(pred->type.tv) && tarval_is_null(pred->type.tv))
+ return 0;
+ break;
+ case iro_Mul:
+ pred = get_irn_node(get_irn_n(irn, input));
+ if (is_tarval(pred->type.tv) && tarval_is_one(pred->type.tv))
+ return 0;
+ break;
+ case iro_And:
+ pred = get_irn_node(get_irn_n(irn, input));
+ if (is_tarval(pred->type.tv) && tarval_is_all_one(pred->type.tv))
+ return 0;
+ break;
+ default:
+ assert(!"opcode not implemented yet");
+ break;
}
return 1;
-}
+} /* is_real_follower */
/**
* Do one step in the race.
if (m->part != n->part)
continue;
- if (m->is_flagged == 0) {
- m->is_flagged = 1;
+ if ((m->flagged & env->side) == 0) {
+ m->flagged |= env->side;
- /* visited the first time */
- /* add m to unwalked not as first node (we might still need to
- check for more follower node */
- m->race_next = n->race_next;
- n->race_next = m;
- return 0;
+ if (m->flagged != 3) {
+ /* visited the first time */
+ /* add m to unwalked not as first node (we might still need to
+ check for more follower node */
+ m->race_next = n->race_next;
+ n->race_next = m;
+ return 0;
+ }
+ /* else already visited by the other side and on the other list */
}
}
/* move n to walked */
*
* @param list the list
*/
-static void clear_flags(node_t *list) {
+static int clear_flags(node_t *list) {
+ int res = 0;
node_t *n;
- for (n = list; n != NULL; n = n->race_next)
- n->is_flagged = 0;
+ for (n = list; n != NULL; n = n->race_next) {
+ if (n->flagged == 3) {
+ /* we reach a follower from both sides, this will split congruent
+ * inputs and make it a leader. */
+ follower_to_leader(n);
+ res = 1;
+ }
+ n->flagged = 0;
+ }
+ return res;
} /* clear_flags */
/**
partition_t *X = *pX;
partition_t *X_prime;
list_head tmp;
- step_env env1, env2, *winner;
+ step_env senv[2];
node_t *g, *h, *node, *t;
- int max_input;
+ int max_input, transitions, winner, shf;
unsigned n;
DEBUG_ONLY(static int run = 0;)
/* restore X.Leader */
list_splice(&tmp, &X->Leader);
- env1.initial = g;
- env1.unwalked = NULL;
- env1.walked = NULL;
- env1.index = 0;
+ senv[0].initial = g;
+ senv[0].unwalked = NULL;
+ senv[0].walked = NULL;
+ senv[0].index = 0;
+ senv[0].side = 1;
- env2.initial = h;
- env2.unwalked = NULL;
- env2.walked = NULL;
- env2.index = 0;
+ senv[1].initial = h;
+ senv[1].unwalked = NULL;
+ senv[1].walked = NULL;
+ senv[1].index = 0;
+ senv[1].side = 2;
+ /*
+ * Some informations on the race that are not stated clearly in Click's
+ * thesis.
+ * 1) A follower stays on the side that reach him first.
+ * 2) If the other side reches a follower, if will be converted to
+ * a leader. /This must be done after the race is over, else the
+ * edges we are iterating on are renumbered./
+ * 3) /New leader might end up on both sides./
+ * 4) /If one side ends up with new Leaders, we must ensure that
+ * they can split out by opcode, hence we have to put _every_
+ * partition with new Leader nodes on the cprop list, as
+ * opcode splitting is done by split_by() at the end of
+ * constant propagation./
+ */
for (;;) {
- if (step(&env1)) {
- winner = &env1;
+ if (step(&senv[0])) {
+ winner = 0;
break;
}
- if (step(&env2)) {
- winner = &env2;
+ if (step(&senv[1])) {
+ winner = 1;
break;
}
}
- assert(winner->initial == NULL);
- assert(winner->unwalked == NULL);
+ assert(senv[winner].initial == NULL);
+ assert(senv[winner].unwalked == NULL);
/* clear flags from walked/unwalked */
- clear_flags(env1.unwalked);
- clear_flags(env1.walked);
- clear_flags(env2.unwalked);
- clear_flags(env2.walked);
+ shf = winner;
+ transitions = clear_flags(senv[0].unwalked) << shf;
+ transitions |= clear_flags(senv[0].walked) << shf;
+ shf ^= 1;
+ transitions |= clear_flags(senv[1].unwalked) << shf;
+ transitions |= clear_flags(senv[1].walked) << shf;
- dump_race_list("winner ", winner->walked);
+ dump_race_list("winner ", senv[winner].walked);
/* Move walked_{winner} to a new partition, X'. */
X_prime = new_partition(env);
max_input = 0;
n = 0;
- for (node = winner->walked; node != NULL; node = node->race_next) {
+ for (node = senv[winner].walked; node != NULL; node = node->race_next) {
list_del(&node->node_list);
node->part = X_prime;
if (node->is_follower) {
* loose its congruence, so we need to check this case for all follower.
*/
list_for_each_entry_safe(node_t, node, t, &X_prime->Follower, node_list) {
- if (identity(node) == node)
+ if (identity(node) == node) {
follower_to_leader(node);
+ transitions |= 1;
+ }
}
check_partition(X);
/* X' is the smaller part */
add_to_worklist(X_prime, env);
+ /*
+ * If there where follower to leader transitions, ensure that the nodes
+ * can be split out if necessary.
+ */
+ if (transitions & 1) {
+ /* place winner partition on the cprop list */
+ if (X_prime->on_cprop == 0) {
+ X_prime->cprop_next = env->cprop;
+ env->cprop = X_prime;
+ X_prime->on_cprop = 1;
+ }
+ }
+ if (transitions & 2) {
+ /* place other partition on the cprop list */
+ if (X->on_cprop == 0) {
+ X->cprop_next = env->cprop;
+ env->cprop = X;
+ X->on_cprop = 1;
+ }
+ }
+
dump_partition("Now ", X);
dump_partition("Created new ", X_prime);
/* we have to ensure that the partition containing g is returned */
- if (winner == &env2) {
+ if (winner != 0) {
*pX = X_prime;
return X;
}
return X_prime;
} /* split */
-#endif /* NO_FOLLOWER */
/**
* Returns non-zero if the i'th input of a Phi node is live.
return 0;
}
return 1;
-}
+} /* type_is_neither_top_nor_const */
/**
* Collect nodes to the touched list.
succ = edge->use;
+ /* only non-commutative nodes */
+ if (env->commutative &&
+ (idx == 0 || idx == 1) && is_op_commutative(get_irn_op(succ)))
+ continue;
+
/* ignore the "control input" for non-pinned nodes
if we are running in GCSE mode */
if (idx < end_idx && get_irn_pinned(succ) != op_pin_state_pinned)
}
} /* collect_touched */
+/**
+ * Collect commutative nodes to the touched list.
+ *
+ * @param X the partition of the list
+ * @param list the list which contains the nodes that must be evaluated
+ * @param env the environment
+ */
+static void collect_commutative_touched(partition_t *X, list_head *list, environment_t *env) {
+ int first = 1;
+ int both_input = 0;
+ node_t *x, *y;
+
+ list_for_each_entry(node_t, x, list, node_list) {
+ int num_edges;
+
+ num_edges = get_irn_n_outs(x->node);
+
+ x->next_edge = x->n_followers + 1;
+
+ /* for all edges in x.L.def_use_{idx} */
+ while (x->next_edge <= num_edges) {
+ const ir_def_use_edge *edge = &x->node->out[x->next_edge];
+ ir_node *succ;
+
+ /* check if we have necessary edges */
+ if (edge->pos > 1)
+ break;
+
+ ++x->next_edge;
+ if (edge->pos < 0)
+ continue;
+
+ succ = edge->use;
+
+ /* only commutative nodes */
+ if (!is_op_commutative(get_irn_op(succ)))
+ continue;
+
+ y = get_irn_node(succ);
+ if (is_constant_type(y->type)) {
+ ir_opcode code = get_irn_opcode(succ);
+ if (code == iro_Eor)
+ add_to_cprop(y, env);
+ }
+
+ /* Partitions of constants should not be split simply because their Nodes have unequal
+ functions or incongruent inputs. */
+ if (type_is_neither_top_nor_const(y->type)) {
+ int other_idx = edge->pos ^ 1;
+ node_t *other = get_irn_node(get_irn_n(succ, other_idx));
+ int equal = X == other->part;
+
+ /*
+ * Note: op(a, a) is NOT congruent to op(a, b).
+ * So, either all touch nodes must have both inputs congruent,
+ * or not. We decide this by the first occurred node.
+ */
+ if (first) {
+ first = 0;
+ both_input = equal;
+ }
+ if (both_input == equal)
+ add_to_touched(y, env);
+ }
+ }
+ }
+} /* collect_commutative_touched */
+
/**
* Split the partitions if caused by the first entry on the worklist.
*
dump_partition("Cause_split: ", X);
+ if (env->commutative) {
+ /* handle commutative nodes first */
+
+ /* empty the touched set: already done, just clear the list */
+ env->touched = NULL;
+
+ collect_commutative_touched(X, &X->Leader, env);
+ collect_commutative_touched(X, &X->Follower, env);
+
+ for (Z = env->touched; Z != NULL; Z = N) {
+ node_t *e;
+ node_t *touched = Z->touched;
+ unsigned n_touched = Z->n_touched;
+
+ assert(Z->touched != NULL);
+
+ /* beware, split might change Z */
+ N = Z->touched_next;
+
+ /* remove it from the touched set */
+ Z->on_touched = 0;
+
+ /* Empty local Z.touched. */
+ for (e = touched; e != NULL; e = e->next) {
+ assert(e->is_follower == 0);
+ e->on_touched = 0;
+ }
+ Z->touched = NULL;
+ Z->n_touched = 0;
+
+ if (0 < n_touched && n_touched < Z->n_leader) {
+ DB((dbg, LEVEL_2, "Split part%d by touched\n", Z->nr));
+ split(&Z, touched, env);
+ } else
+ assert(n_touched <= Z->n_leader);
+ }
+ }
+
/* combine temporary leader and follower list */
for (idx = -1; idx <= X->max_user_inputs; ++idx) {
/* empty the touched set: already done, just clear the list */
S = iter->list;
/* Add SPLIT( X, S ) to P. */
- DB((dbg, LEVEL_2, "Split part%d by what\n", X->nr));
+ DB((dbg, LEVEL_2, "Split part%d by WHAT = %s\n", X->nr, what_reason));
R = split(&X, S, env);
R->split_next = *P;
*P = R;
case iro_Sel:
key.u.ent = get_Sel_entity(irn);
break;
+ case iro_Conv:
+ key.u.intVal = get_Conv_strict(irn);
+ break;
+ case iro_Div:
+ key.u.intVal = is_Div_remainderless(irn);
+ break;
default:
break;
}
int i = env->lambda_input;
if (i >= get_irn_arity(node->node)) {
- /* we are outside the allowed range */
+ /*
+ * We are outside the allowed range: This can happen even
+ * if we have split by opcode first: doing so might move Followers
+ * to Leaders and those will have a different opcode!
+ * Note that in this case the partition is on the cprop list and will be
+ * split again.
+ */
return NULL;
}
return p->part;
} /* lambda_partition */
+/** lambda n.(n[i].partition) for commutative nodes */
+static void *lambda_commutative_partition(const node_t *node, environment_t *env) {
+ ir_node *irn = node->node;
+ ir_node *skipped = skip_Proj(irn);
+ ir_node *pred, *left, *right;
+ node_t *p;
+ partition_t *pl, *pr;
+ int i = env->lambda_input;
+
+ if (i >= get_irn_arity(node->node)) {
+ /*
+ * We are outside the allowed range: This can happen even
+ * if we have split by opcode first: doing so might move Followers
+ * to Leaders and those will have a different opcode!
+ * Note that in this case the partition is on the cprop list and will be
+ * split again.
+ */
+ return NULL;
+ }
+
+ /* ignore the "control input" for non-pinned nodes
+ if we are running in GCSE mode */
+ if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
+ return NULL;
+
+ if (i == -1) {
+ pred = get_irn_n(skipped, i);
+ p = get_irn_node(pred);
+ return p->part;
+ }
+
+ if (is_op_commutative(get_irn_op(irn))) {
+ /* normalize partition order by returning the "smaller" on input 0,
+ the "bigger" on input 1. */
+ left = get_binop_left(irn);
+ pl = get_irn_node(left)->part;
+ right = get_binop_right(irn);
+ pr = get_irn_node(right)->part;
+
+ if (i == 0)
+ return pl < pr ? pl : pr;
+ else
+ return pl > pr ? pl : pr;
+ } else {
+ /* a not split out Follower */
+ pred = get_irn_n(irn, i);
+ p = get_irn_node(pred);
+
+ return p->part;
+ }
+} /* lambda_commutative_partition */
+
/**
- * Returns true if a type is a constant.
+ * Returns true if a type is a constant (and NOT Top
+ * or Bottom).
*/
static int is_con(const lattice_elem_t type) {
/* be conservative */
return;
}
- DB((dbg, LEVEL_2, "WHAT = lambda n.(n.type) on part%d\n", X->nr));
+ DEBUG_ONLY(what_reason = "lambda n.(n.type)";)
P = split_by_what(X, lambda_type, &P, env);
+ dump_split_list(P);
/* adjust the type tags, we have split partitions by type */
for (I = P; I != NULL; I = I->split_next) {
if (! Y->type_is_T_or_C) {
partition_t *Q = NULL;
- DB((dbg, LEVEL_2, "WHAT = lambda n.(n.opcode) on part%d\n", Y->nr));
+ DEBUG_ONLY(what_reason = "lambda n.(n.opcode)";)
Q = split_by_what(Y, lambda_opcode, &Q, env);
+ dump_split_list(Q);
do {
partition_t *Z = Q;
const node_t *first = get_first_node(Z);
int arity = get_irn_arity(first->node);
partition_t *R, *S;
+ what_func what = lambda_partition;
+ DEBUG_ONLY(char buf[64];)
+
+ if (env->commutative && is_op_commutative(get_irn_op(first->node)))
+ what = lambda_commutative_partition;
/*
* BEWARE: during splitting by input 2 for instance we might
R = R->split_next;
if (Z_prime->n_leader > 1) {
env->lambda_input = input;
- DB((dbg, LEVEL_2, "WHAT = lambda n.(n[%d].partition) on part%d\n", input, Z_prime->nr));
- S = split_by_what(Z_prime, lambda_partition, &S, env);
+ DEBUG_ONLY(snprintf(buf, sizeof(buf), "lambda n.(n[%d].partition)", input);)
+ DEBUG_ONLY(what_reason = buf;)
+ S = split_by_what(Z_prime, what, &S, env);
+ dump_split_list(S);
} else {
Z_prime->split_next = S;
S = Z_prime;
static void default_compute(node_t *node) {
int i;
ir_node *irn = node->node;
- node_t *block = get_irn_node(get_nodes_block(irn));
-
- if (block->type.tv == tarval_unreachable) {
- node->type.tv = tarval_top;
- return;
- }
/* if any of the data inputs have type top, the result is type top */
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
int i;
ir_node *block = node->node;
- if (block == get_irg_start_block(current_ir_graph)) {
- /* start block is always reachable */
+ if (block == get_irg_start_block(current_ir_graph) || has_Block_label(block)) {
+ /* start block and labelled blocks are always reachable */
node->type.tv = tarval_reachable;
return;
}
* nodes are inputs to Conds. We check that first.
* This is the way Frontends typically build Firm, but some optimizations
* (cond_eval for instance) might replace them by Phib's...
- *
- * For now, we compute bottom here.
*/
- node->type.tv = tarval_bottom;
+ node->type.tv = tarval_UNKNOWN;
} /* compute_Unknown */
/**
} /* compute_Jmp */
/**
- * (Re-)compute the type for the End node.
+ * (Re-)compute the type for the Return node.
*
* @param node the node
*/
-static void compute_End(node_t *node) {
- /* the End node is NOT dead of course */
+static void compute_Return(node_t *node) {
+ /* The Return node is NOT dead if it is in a reachable block.
+ * This is already checked in compute(). so we can return
+ * Reachable here. */
node->type.tv = tarval_reachable;
-}
+} /* compute_Return */
/**
- * (Re-)compute the type for a SymConst node.
+ * (Re-)compute the type for the End node.
*
* @param node the node
*/
-static void compute_SymConst(node_t *node) {
+static void compute_End(node_t *node) {
+ /* the End node is NOT dead of course */
+ node->type.tv = tarval_reachable;
+} /* compute_End */
+
+/**
+ * (Re-)compute the type for a Call.
+ *
+ * @param node the node
+ */
+static void compute_Call(node_t *node) {
+ /*
+ * A Call computes always bottom, even if it has Unknown
+ * predecessors.
+ */
+ node->type.tv = tarval_bottom;
+} /* compute_Call */
+
+/**
+ * (Re-)compute the type for a SymConst node.
+ *
+ * @param node the node
+ */
+static void compute_SymConst(node_t *node) {
ir_node *irn = node->node;
node_t *block = get_irn_node(get_nodes_block(irn));
} else {
node->type.tv = tarval_bottom;
}
- node->by_all_const = 1;
} else if (r->part == l->part &&
(!mode_is_float(get_irn_mode(l->node)))) {
/*
tv = get_mode_null(mode);
/* if the node was ONCE evaluated by all constants, but now
- this breakes AND we cat by partition a different result, switch to bottom.
+ this breaks AND we get from the argument partitions a different
+ result, switch to bottom.
This happens because initially all nodes are in the same partition ... */
- if (node->by_all_const && node->type.tv != tv)
+ if (node->type.tv != tv)
tv = tarval_bottom;
node->type.tv = tv;
} else {
}
} /* compute_Sub */
+/**
+ * (Re-)compute the type for an Eor. Special case: both nodes are congruent.
+ *
+ * @param node the node
+ */
+static void compute_Eor(node_t *node) {
+ ir_node *eor = node->node;
+ node_t *l = get_irn_node(get_Eor_left(eor));
+ node_t *r = get_irn_node(get_Eor_right(eor));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+ tarval *tv;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ } else if (is_con(a) && is_con(b)) {
+ if (is_tarval(a.tv) && is_tarval(b.tv)) {
+ node->type.tv = tarval_eor(a.tv, b.tv);
+ } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
+ node->type = b;
+ } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
+ node->type = a;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+ } else if (r->part == l->part) {
+ ir_mode *mode = get_irn_mode(eor);
+ tv = get_mode_null(mode);
+
+ /* if the node was ONCE evaluated by all constants, but now
+ this breaks AND we get from the argument partitions a different
+ result, switch to bottom.
+ This happens because initially all nodes are in the same partition ... */
+ if (node->type.tv != tv)
+ tv = tarval_bottom;
+ node->type.tv = tv;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Eor */
+
/**
* (Re-)compute the type for Cmp.
*
if (a.tv == tarval_top || b.tv == tarval_top) {
node->type.tv = tarval_top;
- } else if (is_con(a) && is_con(b)) {
- /* both nodes are constants, we can probably do something */
- node->type.tv = tarval_b_true;
} else if (r->part == l->part) {
/* both nodes congruent, we can probably do something */
node->type.tv = tarval_b_true;
+ } else if (is_con(a) && is_con(b)) {
+ /* both nodes are constants, we can probably do something */
+ node->type.tv = tarval_b_true;
} else {
node->type.tv = tarval_bottom;
}
-} /* compute_Proj_Cmp */
+} /* compute_Cmp */
/**
* (Re-)compute the type for a Proj(Cmp).
tarval *tv;
if (a.tv == tarval_top || b.tv == tarval_top) {
- node->type.tv = tarval_top;
+ node->type.tv = tarval_undefined;
} else if (is_con(a) && is_con(b)) {
default_compute(node);
- node->by_all_const = 1;
} else if (r->part == l->part &&
(!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
/*
* BEWARE: a == a is NOT always True for floating Point values, as
* NaN != NaN is defined, so we must check this here.
*/
- tv = new_tarval_from_long(pnc & pn_Cmp_Eq, mode_b);
+ tv = pnc & pn_Cmp_Eq ? tarval_b_true: tarval_b_false;
/* if the node was ONCE evaluated by all constants, but now
- this breakes AND we cat by partition a different result, switch to bottom.
+ this breaks AND we get from the argument partitions a different
+ result, switch to bottom.
This happens because initially all nodes are in the same partition ... */
- if (node->by_all_const && node->type.tv != tv)
+ if (node->type.tv != tv)
tv = tarval_bottom;
node->type.tv = tv;
} else {
ir_node *sel = get_Cond_selector(cond);
node_t *selector = get_irn_node(sel);
+ /*
+ * Note: it is crucial for the monotony that the Proj(Cond)
+ * are evaluates after all predecessors of the Cond selector are
+ * processed.
+ * Example
+ *
+ * if (x != 0)
+ *
+ * Due to the fact that 0 is a const, the Cmp gets immediately
+ * on the cprop list. It will be evaluated before x is evaluated,
+ * might leaving x as Top. When later x is evaluated, the Cmp
+ * might change its value.
+ * BUT if the Cond is evaluated before this happens, Proj(Cond, FALSE)
+ * gets R, and later changed to F if Cmp is evaluated to True!
+ *
+ * We prevent this by putting Conds in an extra cprop_X queue, which
+ * gets evaluated after the cprop queue is empty.
+ *
+ * Note that this even happens with Click's original algorithm, if
+ * Cmp(x, 0) is evaluated to True first and later changed to False
+ * if x was Top first and later changed to a Const ...
+ * It is unclear how Click solved that problem ...
+ *
+ * However, in rare cases even this does not help, if a Top reaches
+ * a compare through a Phi, than Proj(Cond) is evaluated changing
+ * the type of the Phi to something other.
+ * So, we take the last resort and bind the type to R once
+ * it is calculated.
+ *
+ * (This might be even the way Click works around the whole problem).
+ *
+ * Finally, we may miss some optimization possibilities due to this:
+ *
+ * x = phi(Top, y)
+ * if (x == 0)
+ *
+ * If Top reaches the if first, than we decide for != here.
+ * If y later is evaluated to 0, we cannot revert this decision
+ * and must live with both outputs enabled. If this happens,
+ * we get an unresolved if (true) in the code ...
+ *
+ * In Click's version where this decision is done at the Cmp,
+ * the Cmp is NOT optimized away than (if y evaluated to 1
+ * for instance) and we get a if (1 == 0) here ...
+ *
+ * Both solutions are suboptimal.
+ * At least, we could easily detect this problem and run
+ * cf_opt() (or even combo) again :-(
+ */
+ if (node->type.tv == tarval_reachable)
+ return;
+
if (get_irn_mode(sel) == mode_b) {
/* an IF */
if (pnc == pn_Cond_true) {
node->type.tv = tarval_reachable;
} else {
assert(selector->type.tv == tarval_top);
- node->type.tv = tarval_unreachable;
+ if (tarval_UNKNOWN == tarval_top) {
+ /* any condition based on Top is "!=" */
+ node->type.tv = tarval_unreachable;
+ } else {
+ node->type.tv = tarval_unreachable;
+ }
}
} else {
assert(pnc == pn_Cond_false);
node->type.tv = tarval_reachable;
} else {
assert(selector->type.tv == tarval_top);
- node->type.tv = tarval_unreachable;
+ if (tarval_UNKNOWN == tarval_top) {
+ /* any condition based on Top is "!=" */
+ node->type.tv = tarval_reachable;
+ } else {
+ node->type.tv = tarval_unreachable;
+ }
}
}
} else {
if (selector->type.tv == tarval_bottom) {
node->type.tv = tarval_reachable;
} else if (selector->type.tv == tarval_top) {
- node->type.tv = tarval_unreachable;
+ if (tarval_UNKNOWN == tarval_top &&
+ pnc == get_Cond_defaultProj(cond)) {
+ /* a switch based of Top is always "default" */
+ node->type.tv = tarval_reachable;
+ } else {
+ node->type.tv = tarval_unreachable;
+ }
} else {
long value = get_tarval_long(selector->type.tv);
if (pnc == get_Cond_defaultProj(cond)) {
node->type.tv = tarval_top;
return;
}
- if (get_irn_node(pred)->type.tv == tarval_top) {
+ if (get_irn_node(pred)->type.tv == tarval_top && !is_Cond(pred)) {
/* if the predecessor is Top, its Proj follow */
node->type.tv = tarval_top;
return;
node->type = pred->type;
} /* compute_Confirm */
-/**
- * (Re-)compute the type for a Max.
- *
- * @param node the node
- */
-static void compute_Max(node_t *node) {
- ir_node *op = node->node;
- node_t *l = get_irn_node(get_binop_left(op));
- node_t *r = get_irn_node(get_binop_right(op));
- lattice_elem_t a = l->type;
- lattice_elem_t b = r->type;
-
- if (a.tv == tarval_top || b.tv == tarval_top) {
- node->type.tv = tarval_top;
- } else if (is_con(a) && is_con(b)) {
- /* both nodes are constants, we can probably do something */
- if (a.tv == b.tv) {
- /* this case handles symconsts as well */
- node->type = a;
- } else {
- ir_mode *mode = get_irn_mode(op);
- tarval *tv_min = get_mode_min(mode);
-
- if (a.tv == tv_min)
- node->type = b;
- else if (b.tv == tv_min)
- node->type = a;
- else if (is_tarval(a.tv) && is_tarval(b.tv)) {
- if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
- node->type.tv = a.tv;
- else
- node->type.tv = b.tv;
- } else {
- node->type.tv = tarval_bad;
- }
- }
- } else if (r->part == l->part) {
- /* both nodes congruent, we can probably do something */
- node->type = a;
- } else {
- node->type.tv = tarval_bottom;
- }
-} /* compute_Max */
-
-/**
- * (Re-)compute the type for a Min.
- *
- * @param node the node
- */
-static void compute_Min(node_t *node) {
- ir_node *op = node->node;
- node_t *l = get_irn_node(get_binop_left(op));
- node_t *r = get_irn_node(get_binop_right(op));
- lattice_elem_t a = l->type;
- lattice_elem_t b = r->type;
-
- if (a.tv == tarval_top || b.tv == tarval_top) {
- node->type.tv = tarval_top;
- } else if (is_con(a) && is_con(b)) {
- /* both nodes are constants, we can probably do something */
- if (a.tv == b.tv) {
- /* this case handles symconsts as well */
- node->type = a;
- } else {
- ir_mode *mode = get_irn_mode(op);
- tarval *tv_max = get_mode_max(mode);
-
- if (a.tv == tv_max)
- node->type = b;
- else if (b.tv == tv_max)
- node->type = a;
- else if (is_tarval(a.tv) && is_tarval(b.tv)) {
- if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
- node->type.tv = a.tv;
- else
- node->type.tv = b.tv;
- } else {
- node->type.tv = tarval_bad;
- }
- }
- } else if (r->part == l->part) {
- /* both nodes congruent, we can probably do something */
- node->type = a;
- } else {
- node->type.tv = tarval_bottom;
- }
-} /* compute_Min */
-
/**
* (Re-)compute the type for a given node.
*
* @param node the node
*/
static void compute(node_t *node) {
+ ir_node *irn = node->node;
compute_func func;
- if (is_no_Block(node->node)) {
- node_t *block = get_irn_node(get_nodes_block(node->node));
+#ifndef VERIFY_MONOTONE
+ /*
+ * Once a node reaches bottom, the type cannot fall further
+ * in the lattice and we can stop computation.
+ * Do not take this exit if the monotony verifier is
+ * enabled to catch errors.
+ */
+ if (node->type.tv == tarval_bottom)
+ return;
+#endif
- if (block->type.tv == tarval_unreachable) {
- node->type.tv = tarval_top;
- return;
+ if (is_no_Block(irn)) {
+ /* for pinned nodes, check its control input */
+ if (get_irn_pinned(skip_Proj(irn)) == op_pin_state_pinned) {
+ node_t *block = get_irn_node(get_nodes_block(irn));
+
+ if (block->type.tv == tarval_unreachable) {
+ node->type.tv = tarval_top;
+ return;
+ }
}
}
return node;
} /* identity_comm_zero_binop */
-#define identity_Add identity_comm_zero_binop
-#define identity_Or identity_comm_zero_binop
+/**
+ * Calculates the Identity for Shift nodes.
+ */
+static node_t *identity_shift(node_t *node) {
+ ir_node *op = node->node;
+ node_t *b = get_irn_node(get_binop_right(op));
+ ir_mode *mode = get_irn_mode(b->node);
+ tarval *zero;
+
+ /* node: no input should be tarval_top, else the binop would be also
+ * Top and not being split. */
+ zero = get_mode_null(mode);
+ if (b->type.tv == zero)
+ return get_irn_node(get_binop_left(op));
+ return node;
+} /* identity_shift */
/**
* Calculates the Identity for Mul nodes.
if (b->type.tv == get_mode_null(mode))
return get_irn_node(get_Sub_left(sub));
return node;
-} /* identity_Mul */
+} /* identity_Sub */
/**
* Calculates the Identity for And nodes.
ir_node *mux = node->node;
node_t *t = get_irn_node(get_Mux_true(mux));
node_t *f = get_irn_node(get_Mux_false(mux));
- node_t *sel;
+ /*node_t *sel; */
if (t->part == f->part)
return t;
return node;
} /* identity_Mux */
-/**
- * Calculates the Identity for Min nodes.
- */
-static node_t *identity_Min(node_t *node) {
- ir_node *op = node->node;
- node_t *a = get_irn_node(get_binop_left(op));
- node_t *b = get_irn_node(get_binop_right(op));
- ir_mode *mode = get_irn_mode(op);
- tarval *tv_max;
-
- if (a->part == b->part) {
- /* leader of multiple predecessors */
- return a;
- }
-
- /* works even with NaN */
- tv_max = get_mode_max(mode);
- if (a->type.tv == tv_max)
- return b;
- if (b->type.tv == tv_max)
- return a;
- return node;
-} /* identity_Min */
-
-/**
- * Calculates the Identity for Max nodes.
- */
-static node_t *identity_Max(node_t *node) {
- ir_node *op = node->node;
- node_t *a = get_irn_node(get_binop_left(op));
- node_t *b = get_irn_node(get_binop_right(op));
- ir_mode *mode = get_irn_mode(op);
- tarval *tv_min;
-
- if (a->part == b->part) {
- /* leader of multiple predecessors */
- return a;
- }
-
- /* works even with NaN */
- tv_min = get_mode_min(mode);
- if (a->type.tv == tv_min)
- return b;
- if (b->type.tv == tv_min)
- return a;
- return node;
-} /* identity_Max */
-
/**
* Calculates the Identity for nodes.
*/
switch (get_irn_opcode(irn)) {
case iro_Phi:
return identity_Phi(node);
- case iro_Add:
- return identity_Add(node);
case iro_Mul:
return identity_Mul(node);
+ case iro_Add:
case iro_Or:
- return identity_Or(node);
+ case iro_Eor:
+ return identity_comm_zero_binop(node);
+ case iro_Shr:
+ case iro_Shl:
+ case iro_Shrs:
+ case iro_Rotl:
+ return identity_shift(node);
case iro_And:
return identity_And(node);
case iro_Sub:
return identity_Confirm(node);
case iro_Mux:
return identity_Mux(node);
- case iro_Min:
- return identity_Min(node);
- case iro_Max:
- return identity_Max(node);
default:
return node;
}
} /* segregate_def_use_chain_1 */
/**
- * Node follower is a (new) follower of leader, segregate Leader
- * out edges. If follower is a n-congruent Input identity, all follower
- * inputs congruent to follower are also leader.
+ * Node follower is a (new) follower segregate its Leader
+ * out edges.
*
* @param follower the follower IR node
*/
DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
fallen = NULL;
n_fallen = 0;
- while (! list_empty(&X->cprop)) {
+ for (;;) {
+ int cprop_empty = list_empty(&X->cprop);
+ int cprop_X_empty = list_empty(&X->cprop_X);
+
+ if (cprop_empty && cprop_X_empty) {
+ /* both cprop lists are empty */
+ break;
+ }
+
/* remove the first Node x from X.cprop */
- x = list_entry(X->cprop.next, node_t, cprop_list);
+ if (cprop_empty) {
+ /* Get a node from the cprop_X list only if
+ * all data nodes are processed.
+ * This ensures, that all inputs of the Cond
+ * predecessor are processed if its type is still Top.
+ */
+ x = list_entry(X->cprop_X.next, node_t, cprop_list);
+ } else {
+ x = list_entry(X->cprop.next, node_t, cprop_list);
+ }
+
//assert(x->part == X);
list_del(&x->cprop_list);
x->on_cprop = 0;
DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
compute(x);
if (x->type.tv != old_type.tv) {
- verify_type(old_type, x->type);
DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
+ verify_type(old_type, x);
if (x->on_fallen == 0) {
/* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
if (n_fallen > 0 && n_fallen != X->n_leader) {
DB((dbg, LEVEL_2, "Splitting part%d by fallen\n", X->nr));
Y = split(&X, fallen, env);
+ /*
+ * We have split out fallen node. The type of the result
+ * partition is NOT set yet.
+ */
+ Y->type_is_T_or_C = 0;
} else {
Y = X;
}
for (x = fallen; x != NULL; x = x->next)
x->on_fallen = 0;
-#ifndef NO_FOLLOWER
if (old_type_was_T_or_C) {
node_t *y, *tmp;
- if (Y->on_worklist == 0)
- add_to_worklist(Y, env);
-
/* check if some nodes will make the leader -> follower transition */
list_for_each_entry_safe(node_t, y, tmp, &Y->Leader, node_list) {
- if (!is_Phi(y->node) &&
- y->type.tv != tarval_top && ! is_con(y->type)) {
+ if (y->type.tv != tarval_top && ! is_con(y->type)) {
node_t *eq_node = identity(y);
if (eq_node != y && eq_node->part == y->part) {
}
}
}
-#endif
split_by(Y, env);
}
} /* propagate */
partition_t *part = node->part;
if (part->n_leader > 1 || node->is_follower) {
- if (node->is_follower)
+ if (node->is_follower) {
DB((dbg, LEVEL_2, "Replacing follower %+F\n", node->node));
+ }
else
DB((dbg, LEVEL_2, "Found congruence class for %+F\n", node->node));
return node->node;
} /* get_leader */
+/**
+ * Returns non-zero if a mode_T node has only one reachable output.
+ */
+static int only_one_reachable_proj(ir_node *n) {
+ int i, k = 0;
+
+ for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
+ ir_node *proj = get_irn_out(n, i);
+ node_t *node;
+
+ /* skip non-control flow Proj's */
+ if (get_irn_mode(proj) != mode_X)
+ continue;
+
+ node = get_irn_node(proj);
+ if (node->type.tv == tarval_reachable) {
+ if (++k > 1)
+ return 0;
+ }
+ }
+ return 1;
+} /* only_one_reachable_proj */
+
/**
* Return non-zero if the control flow predecessor node pred
* is the only reachable control flow exit of its block.
*
- * @param pred the control flow exit
+ * @param pred the control flow exit
+ * @param block the destination block
*/
-static int can_exchange(ir_node *pred) {
- if (is_Start(pred))
+static int can_exchange(ir_node *pred, ir_node *block) {
+ if (is_Start(pred) || has_Block_label(block))
return 0;
else if (is_Jmp(pred))
return 1;
else if (get_irn_mode(pred) == mode_T) {
- int i, k;
-
/* if the predecessor block has more than one
- reachable outputs we cannot remove the block */
- k = 0;
- for (i = get_irn_n_outs(pred) - 1; i >= 0; --i) {
- ir_node *proj = get_irn_out(pred, i);
- node_t *node;
-
- /* skip non-control flow Proj's */
- if (get_irn_mode(proj) != mode_X)
- continue;
-
- node = get_irn_node(proj);
- if (node->type.tv == tarval_reachable) {
- if (++k > 1)
- return 0;
- }
- }
- return 1;
+ reachable outputs we cannot remove the block */
+ return only_one_reachable_proj(pred);
}
return 0;
-}
+} /* can_exchange */
/**
* Block Post-Walker, apply the analysis results on control flow by
ir_node **ins, **in_X;
ir_node *phi, *next;
- if (block == get_irg_end_block(current_ir_graph) ||
- block == get_irg_start_block(current_ir_graph)) {
+ n = get_Block_n_cfgpreds(block);
+
+ if (node->type.tv == tarval_unreachable) {
+ env->modified = 1;
+
+ for (i = n - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(block, i);
+
+ if (! is_Bad(pred)) {
+ node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
+
+ if (pred_bl->flagged == 0) {
+ pred_bl->flagged = 3;
+
+ if (pred_bl->type.tv == tarval_reachable) {
+ /*
+ * We will remove an edge from block to its pred.
+ * This might leave the pred block as an endless loop
+ */
+ if (! is_backedge(block, i))
+ keep_alive(pred_bl->node);
+ }
+ }
+ }
+ }
+
/* the EndBlock is always reachable even if the analysis
finds out the opposite :-) */
- return;
- }
- if (node->type.tv == tarval_unreachable) {
- /* mark dead blocks */
- set_Block_dead(block);
+ if (block != get_irg_end_block(current_ir_graph)) {
+ /* mark dead blocks */
+ set_Block_dead(block);
+ DB((dbg, LEVEL_1, "Removing dead %+F\n", block));
+ } else {
+ /* the endblock is unreachable */
+ set_irn_in(block, 0, NULL);
+ }
return;
}
- n = get_Block_n_cfgpreds(block);
-
if (n == 1) {
/* only one predecessor combine */
ir_node *pred = skip_Proj(get_Block_cfgpred(block, 0));
- if (can_exchange(pred)) {
- exchange(block, get_nodes_block(pred));
+ if (can_exchange(pred, block)) {
+ ir_node *new_block = get_nodes_block(pred);
+ DB((dbg, LEVEL_1, "Fuse %+F with %+F\n", block, new_block));
+ DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
+ exchange(block, new_block);
+ node->node = new_block;
env->modified = 1;
}
return;
if (node->type.tv == tarval_reachable) {
in_X[k++] = pred;
+ } else {
+ DB((dbg, LEVEL_1, "Removing dead input %d from %+F (%+F)\n", i, block, pred));
+ if (! is_Bad(pred)) {
+ node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
+
+ if (pred_bl->flagged == 0) {
+ pred_bl->flagged = 3;
+
+ if (pred_bl->type.tv == tarval_reachable) {
+ /*
+ * We will remove an edge from block to its pred.
+ * This might leave the pred block as an endless loop
+ */
+ if (! is_backedge(block, i))
+ keep_alive(pred_bl->node);
+ }
+ }
+ }
}
}
if (k >= n)
return;
+ /* fix Phi's */
NEW_ARR_A(ir_node *, ins, n);
for (phi = get_Block_phis(block); phi != NULL; phi = next) {
node_t *node = get_irn_node(phi);
if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
/* this Phi is replaced by a constant */
tarval *tv = node->type.tv;
- ir_node *c = new_r_Const(current_ir_graph, block, get_tarval_mode(tv), tv);
+ ir_node *c = new_Const(tv);
set_irn_node(c, node);
node->node = c;
DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", phi, c));
+ DBG_OPT_COMBO(phi, c, FS_OPT_COMBO_CONST);
exchange(phi, c);
env->modified = 1;
} else {
ins[j++] = get_Phi_pred(phi, i);
}
}
- if (j <= 1) {
+ if (j == 1) {
/* this Phi is replaced by a single predecessor */
ir_node *s = ins[0];
+ node_t *phi_node = get_irn_node(phi);
node->node = s;
DB((dbg, LEVEL_1, "%+F is replaced by %+F because of cf change\n", phi, s));
+ DBG_OPT_COMBO(phi, s, FS_OPT_COMBO_FOLLOWER);
exchange(phi, s);
+ phi_node->node = s;
env->modified = 1;
} else {
set_irn_in(phi, j, ins);
}
}
- if (k <= 1) {
+ /* fix block */
+ if (k == 1) {
/* this Block has only one live predecessor */
ir_node *pred = skip_Proj(in_X[0]);
- if (can_exchange(pred)) {
- exchange(block, get_nodes_block(pred));
+ if (can_exchange(pred, block)) {
+ ir_node *new_block = get_nodes_block(pred);
+ DBG_OPT_COMBO(block, new_block, FS_OPT_COMBO_CF);
+ exchange(block, new_block);
+ node->node = new_block;
env->modified = 1;
+ return;
}
- } else {
- set_irn_in(block, k, in_X);
- env->modified = 1;
}
-}
+ set_irn_in(block, k, in_X);
+ env->modified = 1;
+} /* apply_cf */
+
+/**
+ * Exchange a node by its leader.
+ * Beware: in rare cases the mode might be wrong here, for instance
+ * AddP(x, NULL) is a follower of x, but with different mode.
+ * Fix it here.
+ */
+static void exchange_leader(ir_node *irn, ir_node *leader) {
+ ir_mode *mode = get_irn_mode(irn);
+ if (mode != get_irn_mode(leader)) {
+ /* The conv is a no-op, so we are free to place it
+ * either in the block of the leader OR in irn's block.
+ * Probably placing it into leaders block might reduce
+ * the number of Conv due to CSE. */
+ ir_node *block = get_nodes_block(leader);
+ dbg_info *dbg = get_irn_dbg_info(irn);
+
+ leader = new_rd_Conv(dbg, current_ir_graph, block, leader, mode);
+ }
+ exchange(irn, leader);
+} /* exchange_leader */
+
+/**
+ * Check, if all users of a mode_M node are dead. Use
+ * the Def-Use edges for this purpose, as they still
+ * reflect the situation.
+ */
+static int all_users_are_dead(const ir_node *irn) {
+ int i, n = get_irn_n_outs(irn);
+
+ for (i = 1; i <= n; ++i) {
+ const ir_node *succ = irn->out[i].use;
+ const node_t *block = get_irn_node(get_nodes_block(succ));
+ const node_t *node;
+
+ if (block->type.tv == tarval_unreachable) {
+ /* block is unreachable */
+ continue;
+ }
+ node = get_irn_node(succ);
+ if (node->type.tv != tarval_top) {
+ /* found a reachable user */
+ return 0;
+ }
+ }
+ /* all users are unreachable */
+ return 1;
+} /* all_user_are_dead */
+
+/**
+ * Walker: Find reachable mode_M nodes that have only
+ * unreachable users. These nodes must be kept later.
+ */
+static void find_kept_memory(ir_node *irn, void *ctx) {
+ environment_t *env = ctx;
+ node_t *node, *block;
+
+ if (get_irn_mode(irn) != mode_M)
+ return;
+
+ block = get_irn_node(get_nodes_block(irn));
+ if (block->type.tv == tarval_unreachable)
+ return;
+
+ node = get_irn_node(irn);
+ if (node->type.tv == tarval_top)
+ return;
+
+ /* ok, we found a live memory node. */
+ if (all_users_are_dead(irn)) {
+ DB((dbg, LEVEL_1, "%+F must be kept\n", irn));
+ ARR_APP1(ir_node *, env->kept_memory, irn);
+ }
+} /* find_kept_memory */
/**
* Post-Walker, apply the analysis results;
DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
exchange(irn, bad);
env->modified = 1;
- }
- else if (node->type.tv == tarval_unreachable) {
- ir_node *bad = get_irg_bad(current_ir_graph);
-
- /* see comment above */
- set_irn_node(bad, node);
- node->node = bad;
- DB((dbg, LEVEL_1, "%+F is unreachable\n", irn));
- exchange(irn, bad);
- env->modified = 1;
+ } else if (node->type.tv == tarval_top) {
+ ir_mode *mode = get_irn_mode(irn);
+
+ if (mode == mode_M) {
+ /* never kill a mode_M node */
+ if (is_Proj(irn)) {
+ ir_node *pred = get_Proj_pred(irn);
+ node_t *pnode = get_irn_node(pred);
+
+ if (pnode->type.tv == tarval_top) {
+ /* skip the predecessor */
+ ir_node *mem = get_memop_mem(pred);
+ node->node = mem;
+ DB((dbg, LEVEL_1, "%+F computes Top, replaced by %+F\n", irn, mem));
+ exchange(irn, mem);
+ env->modified = 1;
+ }
+ }
+ /* leave other nodes, especially PhiM */
+ } else if (mode == mode_T) {
+ /* Do not kill mode_T nodes, kill their Projs */
+ } else if (! is_Unknown(irn)) {
+ /* don't kick away Unknown's, they might be still needed */
+ ir_node *unk = new_r_Unknown(current_ir_graph, mode);
+
+ /* control flow should already be handled at apply_cf() */
+ assert(mode != mode_X);
+
+ /* see comment above */
+ set_irn_node(unk, node);
+ node->node = unk;
+ DB((dbg, LEVEL_1, "%+F computes Top\n", irn));
+ exchange(irn, unk);
+ env->modified = 1;
+ }
}
else if (get_irn_mode(irn) == mode_X) {
if (is_Proj(irn)) {
ir_node *cond = get_Proj_pred(irn);
if (is_Cond(cond)) {
- node_t *sel = get_irn_node(get_Cond_selector(cond));
-
- if (is_tarval(sel->type.tv) && tarval_is_constant(sel->type.tv)) {
- /* Cond selector is a constant, make a Jmp */
+ if (only_one_reachable_proj(cond)) {
ir_node *jmp = new_r_Jmp(current_ir_graph, block->node);
set_irn_node(jmp, node);
node->node = jmp;
DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, jmp));
+ DBG_OPT_COMBO(irn, jmp, FS_OPT_COMBO_CF);
exchange(irn, jmp);
env->modified = 1;
+ } else {
+ node_t *sel = get_irn_node(get_Cond_selector(cond));
+ tarval *tv = sel->type.tv;
+
+ if (is_tarval(tv) && tarval_is_constant(tv)) {
+ /* The selector is a constant, but more
+ * than one output is active: An unoptimized
+ * case found. */
+ env->unopt_cf = 1;
+ }
}
}
}
*/
if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
/* can be replaced by a constant */
- ir_node *c = new_r_Const(current_ir_graph, block->node, get_tarval_mode(tv), tv);
+ ir_node *c = new_Const(tv);
set_irn_node(c, node);
node->node = c;
DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
- exchange(irn, c);
+ DBG_OPT_COMBO(irn, c, FS_OPT_COMBO_CONST);
+ exchange_leader(irn, c);
env->modified = 1;
}
} else if (is_entity(node->type.sym.entity_p)) {
if (! is_SymConst(irn)) {
- /* can be replaced by a Symconst */
+ /* can be replaced by a SymConst */
ir_node *symc = new_r_SymConst(current_ir_graph, block->node, get_irn_mode(irn), node->type.sym, symconst_addr_ent);
set_irn_node(symc, node);
node->node = symc;
DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, symc));
- exchange(irn, symc);
+ DBG_OPT_COMBO(irn, symc, FS_OPT_COMBO_CONST);
+ exchange_leader(irn, symc);
env->modified = 1;
}
} else if (is_Confirm(irn)) {
ir_node *leader = get_leader(node);
if (leader != irn) {
- DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
- exchange(irn, leader);
- env->modified = 1;
+ int non_strict_phi = 0;
+
+ /*
+ * Beware: Do not remove Phi(Unknown, ..., x, ..., Unknown)
+ * as this might create non-strict programs.
+ */
+ if (node->is_follower && is_Phi(irn) && !is_Unknown(leader)) {
+ int i;
+
+ for (i = get_Phi_n_preds(irn) - 1; i >= 0; --i) {
+ ir_node *pred = get_Phi_pred(irn, i);
+
+ if (is_Unknown(pred)) {
+ non_strict_phi = 1;
+ break;
+ }
+ }
+ }
+ if (! non_strict_phi) {
+ DB((dbg, LEVEL_1, "%+F from part%d is replaced by %+F\n", irn, node->part->nr, leader));
+ if (node->is_follower)
+ DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_FOLLOWER);
+ else
+ DBG_OPT_COMBO(irn, leader, FS_OPT_COMBO_CONGRUENT);
+ exchange_leader(irn, leader);
+ env->modified = 1;
+ }
}
}
}
ir_node *ka = get_End_keepalive(end, i);
node_t *node = get_irn_node(ka);
- /* Use the is_flagged bit to mark already visited nodes.
- * This should not be ready but better safe than sorry. */
- if (node->is_flagged == 0) {
- node->is_flagged = 1;
-
- if (! is_Block(ka))
- node = get_irn_node(get_nodes_block(ka));
+ if (! is_Block(ka))
+ node = get_irn_node(get_nodes_block(ka));
- if (node->type.tv != tarval_unreachable)
- in[j++] = ka;
- }
+ if (node->type.tv != tarval_unreachable && !is_Bad(ka))
+ in[j++] = ka;
}
if (j != n) {
set_End_keepalives(end, j, in);
SET(Phi);
SET(Add);
SET(Sub);
+ SET(Eor);
SET(SymConst);
SET(Cmp);
SET(Proj);
SET(Confirm);
+ SET(Return);
SET(End);
+ SET(Call);
+} /* set_compute_functions */
- if (op_Max != NULL)
- SET(Max);
- if (op_Min != NULL)
- SET(Min);
+/**
+ * Add memory keeps.
+ */
+static void add_memory_keeps(ir_node **kept_memory, int len) {
+ ir_node *end = get_irg_end(current_ir_graph);
+ int i;
+ ir_nodeset_t set;
-} /* set_compute_functions */
+ ir_nodeset_init(&set);
-static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
- ir_node *irn = local != NULL ? local : n;
- node_t *node = get_irn_node(irn);
+ /* check, if those nodes are already kept */
+ for (i = get_End_n_keepalives(end) - 1; i >= 0; --i)
+ ir_nodeset_insert(&set, get_End_keepalive(end, i));
- ir_fprintf(F, "info2 : \"partition %u type %+F\"\n", node->part->nr, node->type);
- return 1;
-}
+ for (i = len - 1; i >= 0; --i) {
+ ir_node *ka = kept_memory[i];
+
+ if (! ir_nodeset_contains(&set, ka)) {
+ add_End_keepalive(end, ka);
+ }
+ }
+ ir_nodeset_destroy(&set);
+} /* add_memory_keeps */
void combo(ir_graph *irg) {
environment_t env;
ir_node *initial_bl;
node_t *start;
ir_graph *rem = current_ir_graph;
+ int len;
current_ir_graph = irg;
/* register a debug mask */
FIRM_DBG_REGISTER(dbg, "firm.opt.combo");
- //firm_dbg_set_mask(dbg, SET_LEVEL_3);
DB((dbg, LEVEL_1, "Doing COMBO for %+F\n", irg));
#endif
env.opcode2id_map = new_set(cmp_opcode, iro_Last * 4);
env.type2id_map = pmap_create();
+ env.kept_memory = NEW_ARR_F(ir_node *, 0);
env.end_idx = get_opt_global_cse() ? 0 : -1;
env.lambda_input = 0;
env.modified = 0;
+ env.unopt_cf = 0;
+ /* options driving the optimization */
+ env.commutative = 1;
+ env.opt_unknown = 1;
assure_irg_outs(irg);
+ assure_cf_loop(irg);
/* we have our own value_of function */
set_value_of_func(get_node_tarval);
set_compute_functions();
DEBUG_ONLY(part_nr = 0);
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
+
+ if (env.opt_unknown)
+ tarval_UNKNOWN = tarval_top;
+ else
+ tarval_UNKNOWN = tarval_bad;
+
/* create the initial partition and place it on the work list */
env.initial = new_partition(&env);
add_to_worklist(env.initial, &env);
- irg_walk_graph(irg, init_block_phis, create_initial_partitions, &env);
+ irg_walk_graph(irg, create_initial_partitions, init_block_phis, &env);
+
+ /* set the hook: from now, every node has a partition and a type */
+ DEBUG_ONLY(set_dump_node_vcgattr_hook(dump_partition_hook));
/* all nodes on the initial partition have type Top */
env.initial->type_is_T_or_C = 1;
} while (env.cprop != NULL || env.worklist != NULL);
dump_all_partitions(&env);
+ check_all_partitions(&env);
#if 0
- set_dump_node_vcgattr_hook(dump_partition_hook);
dump_ir_block_graph(irg, "-partition");
- set_dump_node_vcgattr_hook(NULL);
-#else
- (void)dump_partition_hook;
#endif
/* apply the result */
+
+ /* check, which nodes must be kept */
+ irg_walk_graph(irg, NULL, find_kept_memory, &env);
+
+ /* kill unreachable control flow */
irg_block_walk_graph(irg, NULL, apply_cf, &env);
- irg_walk_graph(irg, NULL, apply_result, &env);
+ /* Kill keep-alives of dead blocks: this speeds up apply_result()
+ * and fixes assertion because dead cf to dead blocks is NOT removed by
+ * apply_cf(). */
apply_end(get_irg_end(irg), &env);
+ irg_walk_graph(irg, NULL, apply_result, &env);
+
+ len = ARR_LEN(env.kept_memory);
+ if (len > 0)
+ add_memory_keeps(env.kept_memory, len);
+
+ if (env.unopt_cf) {
+ DB((dbg, LEVEL_1, "Unoptimized Control Flow left"));
+ }
if (env.modified) {
/* control flow might changed */
set_irg_loopinfo_inconsistent(irg);
}
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
+
+ /* remove the partition hook */
+ DEBUG_ONLY(set_dump_node_vcgattr_hook(NULL));
+
+ DEL_ARR_F(env.kept_memory);
pmap_destroy(env.type2id_map);
del_set(env.opcode2id_map);
obstack_free(&env.obst, NULL);