+ /* Let map be an empty mapping from the range of What to (local) list of Nodes. */
+ listmap_init(&map);
+ list_for_each_entry(node_t, x, &X->Leader, node_list) {
+ void *id = What(x, env);
+ listmap_entry_t *entry;
+
+ if (id == NULL) {
+ /* input not allowed, ignore */
+ continue;
+ }
+ /* Add x to map[What(x)]. */
+ entry = listmap_find(&map, id);
+ x->next = entry->list;
+ entry->list = x;
+ }
+ /* Let P be a set of Partitions. */
+
+ /* for all sets S except one in the range of map do */
+ for (iter = map.values; iter != NULL; iter = iter->next) {
+ if (iter->next == NULL) {
+ /* this is the last entry, ignore */
+ break;
+ }
+ S = iter->list;
+
+ /* Add SPLIT( X, S ) to P. */
+ DB((dbg, LEVEL_2, "Split part%d by WHAT = %s\n", X->nr, what_reason));
+ R = split(&X, S, env);
+ R->split_next = *P;
+ *P = R;
+ }
+ /* Add X to P. */
+ X->split_next = *P;
+ *P = X;
+
+ listmap_term(&map);
+ return *P;
+} /* split_by_what */
+
+/** lambda n.(n.type) */
+static void *lambda_type(const node_t *node, environment_t *env) {
+ (void)env;
+ return node->type.tv;
+} /* lambda_type */
+
+/** lambda n.(n.opcode) */
+static void *lambda_opcode(const node_t *node, environment_t *env) {
+ opcode_key_t key, *entry;
+ ir_node *irn = node->node;
+
+ key.code = get_irn_opcode(irn);
+ key.mode = get_irn_mode(irn);
+ key.arity = get_irn_arity(irn);
+ key.u.proj = 0;
+ key.u.ent = NULL;
+
+ switch (get_irn_opcode(irn)) {
+ case iro_Proj:
+ key.u.proj = get_Proj_proj(irn);
+ break;
+ case iro_Sel:
+ key.u.ent = get_Sel_entity(irn);
+ break;
+ case iro_Conv:
+ key.u.intVal = get_Conv_strict(irn);
+ break;
+ case iro_Div:
+ key.u.intVal = is_Div_remainderless(irn);
+ break;
+ default:
+ break;
+ }
+
+ entry = set_insert(env->opcode2id_map, &key, sizeof(key), opcode_hash(&key));
+ return entry;
+} /* lambda_opcode */
+
+/** lambda n.(n[i].partition) */
+static void *lambda_partition(const node_t *node, environment_t *env) {
+ ir_node *skipped = skip_Proj(node->node);
+ ir_node *pred;
+ node_t *p;
+ int i = env->lambda_input;
+
+ if (i >= get_irn_arity(node->node)) {
+ /*
+ * We are outside the allowed range: This can happen even
+ * if we have split by opcode first: doing so might move Followers
+ * to Leaders and those will have a different opcode!
+ * Note that in this case the partition is on the cprop list and will be
+ * split again.
+ */
+ return NULL;
+ }
+
+ /* ignore the "control input" for non-pinned nodes
+ if we are running in GCSE mode */
+ if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
+ return NULL;
+
+ pred = i == -1 ? get_irn_n(skipped, i) : get_irn_n(node->node, i);
+ p = get_irn_node(pred);
+
+ return p->part;
+} /* lambda_partition */
+
+/** lambda n.(n[i].partition) for commutative nodes */
+static void *lambda_commutative_partition(const node_t *node, environment_t *env) {
+ ir_node *irn = node->node;
+ ir_node *skipped = skip_Proj(irn);
+ ir_node *pred, *left, *right;
+ node_t *p;
+ partition_t *pl, *pr;
+ int i = env->lambda_input;
+
+ if (i >= get_irn_arity(node->node)) {
+ /*
+ * We are outside the allowed range: This can happen even
+ * if we have split by opcode first: doing so might move Followers
+ * to Leaders and those will have a different opcode!
+ * Note that in this case the partition is on the cprop list and will be
+ * split again.
+ */
+ return NULL;
+ }
+
+ /* ignore the "control input" for non-pinned nodes
+ if we are running in GCSE mode */
+ if (i < env->end_idx && get_irn_pinned(skipped) != op_pin_state_pinned)
+ return NULL;
+
+ if (i == -1) {
+ pred = get_irn_n(skipped, i);
+ p = get_irn_node(pred);
+ return p->part;
+ }
+
+ if (is_op_commutative(get_irn_op(irn))) {
+ /* normalize partition order by returning the "smaller" on input 0,
+ the "bigger" on input 1. */
+ left = get_binop_left(irn);
+ pl = get_irn_node(left)->part;
+ right = get_binop_right(irn);
+ pr = get_irn_node(right)->part;
+
+ if (i == 0)
+ return pl < pr ? pl : pr;
+ else
+ return pl > pr ? pl : pr;
+ } else {
+ /* a not split out Follower */
+ pred = get_irn_n(irn, i);
+ p = get_irn_node(pred);
+
+ return p->part;
+ }
+} /* lambda_commutative_partition */
+
+/**
+ * Returns true if a type is a constant (and NOT Top
+ * or Bottom).
+ */
+static int is_con(const lattice_elem_t type) {
+ /* be conservative */
+ if (is_tarval(type.tv))
+ return tarval_is_constant(type.tv);
+ return is_entity(type.sym.entity_p);
+} /* is_con */
+
+/**
+ * Implements split_by().
+ *
+ * @param X the partition to split
+ * @param env the environment
+ */
+static void split_by(partition_t *X, environment_t *env) {
+ partition_t *I, *P = NULL;
+ int input;
+
+ dump_partition("split_by", X);
+
+ if (X->n_leader == 1) {
+ /* we have only one leader, no need to split, just check it's type */
+ node_t *x = get_first_node(X);
+ X->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
+ return;
+ }
+
+ DEBUG_ONLY(what_reason = "lambda n.(n.type)";)
+ P = split_by_what(X, lambda_type, &P, env);
+ dump_split_list(P);
+
+ /* adjust the type tags, we have split partitions by type */
+ for (I = P; I != NULL; I = I->split_next) {
+ node_t *x = get_first_node(I);
+ I->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
+ }
+
+ do {
+ partition_t *Y = P;
+
+ P = P->split_next;
+ if (Y->n_leader > 1) {
+ /* we do not want split the TOP or constant partitions */
+ if (! Y->type_is_T_or_C) {
+ partition_t *Q = NULL;
+
+ DEBUG_ONLY(what_reason = "lambda n.(n.opcode)";)
+ Q = split_by_what(Y, lambda_opcode, &Q, env);
+ dump_split_list(Q);
+
+ do {
+ partition_t *Z = Q;
+
+ Q = Q->split_next;
+ if (Z->n_leader > 1) {
+ const node_t *first = get_first_node(Z);
+ int arity = get_irn_arity(first->node);
+ partition_t *R, *S;
+ what_func what = lambda_partition;
+ DEBUG_ONLY(char buf[64];)
+
+ if (env->commutative && is_op_commutative(get_irn_op(first->node)))
+ what = lambda_commutative_partition;
+
+ /*
+ * BEWARE: during splitting by input 2 for instance we might
+ * create new partitions which are different by input 1, so collect
+ * them and split further.
+ */
+ Z->split_next = NULL;
+ R = Z;
+ S = NULL;
+ for (input = arity - 1; input >= -1; --input) {
+ do {
+ partition_t *Z_prime = R;
+
+ R = R->split_next;
+ if (Z_prime->n_leader > 1) {
+ env->lambda_input = input;
+ DEBUG_ONLY(snprintf(buf, sizeof(buf), "lambda n.(n[%d].partition)", input);)
+ DEBUG_ONLY(what_reason = buf;)
+ S = split_by_what(Z_prime, what, &S, env);
+ dump_split_list(S);
+ } else {
+ Z_prime->split_next = S;
+ S = Z_prime;
+ }
+ } while (R != NULL);
+ R = S;
+ S = NULL;
+ }
+ }
+ } while (Q != NULL);
+ }
+ }
+ } while (P != NULL);
+} /* split_by */
+
+/**
+ * (Re-)compute the type for a given node.
+ *
+ * @param node the node
+ */
+static void default_compute(node_t *node) {
+ int i;
+ ir_node *irn = node->node;
+
+ /* if any of the data inputs have type top, the result is type top */
+ for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
+ ir_node *pred = get_irn_n(irn, i);
+ node_t *p = get_irn_node(pred);
+
+ if (p->type.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ return;
+ }
+ }
+
+ if (get_irn_mode(node->node) == mode_X)
+ node->type.tv = tarval_reachable;
+ else
+ node->type.tv = computed_value(irn);
+} /* default_compute */
+
+/**
+ * (Re-)compute the type for a Block node.
+ *
+ * @param node the node
+ */
+static void compute_Block(node_t *node) {
+ int i;
+ ir_node *block = node->node;
+
+ if (block == get_irg_start_block(current_ir_graph)) {
+ /* start block is always reachable */
+ node->type.tv = tarval_reachable;
+ return;
+ }
+
+ for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
+ node_t *pred = get_irn_node(get_Block_cfgpred(block, i));
+
+ if (pred->type.tv == tarval_reachable) {
+ /* A block is reachable, if at least of predecessor is reachable. */
+ node->type.tv = tarval_reachable;
+ return;
+ }
+ }
+ node->type.tv = tarval_top;
+} /* compute_Block */
+
+/**
+ * (Re-)compute the type for a Bad node.
+ *
+ * @param node the node
+ */
+static void compute_Bad(node_t *node) {
+ /* Bad nodes ALWAYS compute Top */
+ node->type.tv = tarval_top;
+} /* compute_Bad */
+
+/**
+ * (Re-)compute the type for an Unknown node.
+ *
+ * @param node the node
+ */
+static void compute_Unknown(node_t *node) {
+ /* While Unknown nodes should compute Top this is dangerous:
+ * a Top input to a Cond would lead to BOTH control flows unreachable.
+ * While this is correct in the given semantics, it would destroy the Firm
+ * graph.
+ *
+ * It would be safe to compute Top IF it can be assured, that only Cmp
+ * nodes are inputs to Conds. We check that first.
+ * This is the way Frontends typically build Firm, but some optimizations
+ * (cond_eval for instance) might replace them by Phib's...
+ */
+ node->type.tv = tarval_UNKNOWN;
+} /* compute_Unknown */
+
+/**
+ * (Re-)compute the type for a Jmp node.
+ *
+ * @param node the node
+ */
+static void compute_Jmp(node_t *node) {
+ node_t *block = get_irn_node(get_nodes_block(node->node));
+
+ node->type = block->type;
+} /* compute_Jmp */
+
+/**
+ * (Re-)compute the type for the Return node.
+ *
+ * @param node the node
+ */
+static void compute_Return(node_t *node) {
+ /* The Return node is NOT dead if it is in a reachable block.
+ * This is already checked in compute(). so we can return
+ * Reachable here. */
+ node->type.tv = tarval_reachable;
+} /* compute_Return */
+
+/**
+ * (Re-)compute the type for the End node.
+ *
+ * @param node the node
+ */
+static void compute_End(node_t *node) {
+ /* the End node is NOT dead of course */
+ node->type.tv = tarval_reachable;
+} /* compute_End */
+
+/**
+ * (Re-)compute the type for a Call.
+ *
+ * @param node the node
+ */
+static void compute_Call(node_t *node) {
+ /*
+ * A Call computes always bottom, even if it has Unknown
+ * predecessors.
+ */
+ node->type.tv = tarval_bottom;
+} /* compute_Call */
+
+/**
+ * (Re-)compute the type for a SymConst node.
+ *
+ * @param node the node
+ */
+static void compute_SymConst(node_t *node) {
+ ir_node *irn = node->node;
+ node_t *block = get_irn_node(get_nodes_block(irn));
+
+ if (block->type.tv == tarval_unreachable) {
+ node->type.tv = tarval_top;
+ return;
+ }
+ switch (get_SymConst_kind(irn)) {
+ case symconst_addr_ent:
+ /* case symconst_addr_name: cannot handle this yet */
+ node->type.sym = get_SymConst_symbol(irn);
+ break;
+ default:
+ node->type.tv = computed_value(irn);
+ }
+} /* compute_SymConst */
+
+/**
+ * (Re-)compute the type for a Phi node.
+ *
+ * @param node the node
+ */
+static void compute_Phi(node_t *node) {
+ int i;
+ ir_node *phi = node->node;
+ lattice_elem_t type;
+
+ /* if a Phi is in a unreachable block, its type is TOP */
+ node_t *block = get_irn_node(get_nodes_block(phi));
+
+ if (block->type.tv == tarval_unreachable) {
+ node->type.tv = tarval_top;
+ return;
+ }
+
+ /* Phi implements the Meet operation */
+ type.tv = tarval_top;
+ for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
+ node_t *pred = get_irn_node(get_Phi_pred(phi, i));
+ node_t *pred_X = get_irn_node(get_Block_cfgpred(block->node, i));
+
+ if (pred_X->type.tv == tarval_unreachable || pred->type.tv == tarval_top) {
+ /* ignore TOP inputs: We must check here for unreachable blocks,
+ because Firm constants live in the Start Block are NEVER Top.
+ Else, a Phi (1,2) will produce Bottom, even if the 2 for instance
+ comes from a unreachable input. */
+ continue;
+ }
+ if (pred->type.tv == tarval_bottom) {
+ node->type.tv = tarval_bottom;
+ return;
+ } else if (type.tv == tarval_top) {
+ /* first constant found */
+ type = pred->type;
+ } else if (type.tv != pred->type.tv) {
+ /* different constants or tarval_bottom */
+ node->type.tv = tarval_bottom;
+ return;
+ }
+ /* else nothing, constants are the same */
+ }
+ node->type = type;
+} /* compute_Phi */
+
+/**
+ * (Re-)compute the type for an Add. Special case: one nodes is a Zero Const.
+ *
+ * @param node the node
+ */
+static void compute_Add(node_t *node) {
+ ir_node *sub = node->node;
+ node_t *l = get_irn_node(get_Add_left(sub));
+ node_t *r = get_irn_node(get_Add_right(sub));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+ ir_mode *mode;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ } else if (a.tv == tarval_bottom || b.tv == tarval_bottom) {
+ node->type.tv = tarval_bottom;
+ } else {
+ /* x + 0 = 0 + x = x, but beware of floating point +0 + -0, so we
+ must call tarval_add() first to handle this case! */
+ if (is_tarval(a.tv)) {
+ if (is_tarval(b.tv)) {
+ node->type.tv = tarval_add(a.tv, b.tv);
+ return;
+ }
+ mode = get_tarval_mode(a.tv);
+ if (a.tv == get_mode_null(mode)) {
+ node->type = b;
+ return;
+ }
+ } else if (is_tarval(b.tv)) {
+ mode = get_tarval_mode(b.tv);
+ if (b.tv == get_mode_null(mode)) {
+ node->type = a;
+ return;
+ }
+ }
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Add */
+
+/**
+ * (Re-)compute the type for a Sub. Special case: both nodes are congruent.
+ *
+ * @param node the node
+ */
+static void compute_Sub(node_t *node) {
+ ir_node *sub = node->node;
+ node_t *l = get_irn_node(get_Sub_left(sub));
+ node_t *r = get_irn_node(get_Sub_right(sub));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+ tarval *tv;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ } else if (is_con(a) && is_con(b)) {
+ if (is_tarval(a.tv) && is_tarval(b.tv)) {
+ node->type.tv = tarval_sub(a.tv, b.tv, get_irn_mode(sub));
+ } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
+ node->type = b;
+ } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
+ node->type = a;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+ } else if (r->part == l->part &&
+ (!mode_is_float(get_irn_mode(l->node)))) {
+ /*
+ * BEWARE: a - a is NOT always 0 for floating Point values, as
+ * NaN op NaN = NaN, so we must check this here.
+ */
+ ir_mode *mode = get_irn_mode(sub);
+ tv = get_mode_null(mode);
+
+ /* if the node was ONCE evaluated by all constants, but now
+ this breaks AND we get from the argument partitions a different
+ result, switch to bottom.
+ This happens because initially all nodes are in the same partition ... */
+ if (node->type.tv != tv)
+ tv = tarval_bottom;
+ node->type.tv = tv;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Sub */
+
+/**
+ * (Re-)compute the type for an Eor. Special case: both nodes are congruent.
+ *
+ * @param node the node
+ */
+static void compute_Eor(node_t *node) {
+ ir_node *eor = node->node;
+ node_t *l = get_irn_node(get_Eor_left(eor));
+ node_t *r = get_irn_node(get_Eor_right(eor));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+ tarval *tv;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ } else if (is_con(a) && is_con(b)) {
+ if (is_tarval(a.tv) && is_tarval(b.tv)) {
+ node->type.tv = tarval_eor(a.tv, b.tv);
+ } else if (is_tarval(a.tv) && tarval_is_null(a.tv)) {
+ node->type = b;
+ } else if (is_tarval(b.tv) && tarval_is_null(b.tv)) {
+ node->type = a;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+ } else if (r->part == l->part) {
+ ir_mode *mode = get_irn_mode(eor);
+ tv = get_mode_null(mode);
+
+ /* if the node was ONCE evaluated by all constants, but now
+ this breaks AND we get from the argument partitions a different
+ result, switch to bottom.
+ This happens because initially all nodes are in the same partition ... */
+ if (node->type.tv != tv)
+ tv = tarval_bottom;
+ node->type.tv = tv;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Eor */
+
+/**
+ * (Re-)compute the type for Cmp.
+ *
+ * @param node the node
+ */
+static void compute_Cmp(node_t *node) {
+ ir_node *cmp = node->node;
+ node_t *l = get_irn_node(get_Cmp_left(cmp));
+ node_t *r = get_irn_node(get_Cmp_right(cmp));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ } else if (r->part == l->part) {
+ /* both nodes congruent, we can probably do something */
+ node->type.tv = tarval_b_true;
+ } else if (is_con(a) && is_con(b)) {
+ /* both nodes are constants, we can probably do something */
+ node->type.tv = tarval_b_true;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Cmp */
+
+/**
+ * (Re-)compute the type for a Proj(Cmp).
+ *
+ * @param node the node
+ * @param cond the predecessor Cmp node
+ */
+static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
+ ir_node *proj = node->node;
+ node_t *l = get_irn_node(get_Cmp_left(cmp));
+ node_t *r = get_irn_node(get_Cmp_right(cmp));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+ pn_Cmp pnc = get_Proj_proj(proj);
+ tarval *tv;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_undefined;
+ } else if (is_con(a) && is_con(b)) {
+ default_compute(node);
+ } else if (r->part == l->part &&
+ (!mode_is_float(get_irn_mode(l->node)) || pnc == pn_Cmp_Lt || pnc == pn_Cmp_Gt)) {
+ /*
+ * BEWARE: a == a is NOT always True for floating Point values, as
+ * NaN != NaN is defined, so we must check this here.
+ */
+ tv = pnc & pn_Cmp_Eq ? tarval_b_true: tarval_b_false;
+
+ /* if the node was ONCE evaluated by all constants, but now
+ this breaks AND we get from the argument partitions a different
+ result, switch to bottom.
+ This happens because initially all nodes are in the same partition ... */
+ if (node->type.tv != tv)
+ tv = tarval_bottom;
+ node->type.tv = tv;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Proj_Cmp */
+
+/**
+ * (Re-)compute the type for a Proj(Cond).
+ *
+ * @param node the node
+ * @param cond the predecessor Cond node
+ */
+static void compute_Proj_Cond(node_t *node, ir_node *cond) {
+ ir_node *proj = node->node;
+ long pnc = get_Proj_proj(proj);
+ ir_node *sel = get_Cond_selector(cond);
+ node_t *selector = get_irn_node(sel);
+
+ /*
+ * Note: it is crucial for the monotony that the Proj(Cond)
+ * are evaluates after all predecessors of the Cond selector are
+ * processed.
+ * Example
+ *
+ * if (x != 0)
+ *
+ * Due to the fact that 0 is a const, the Cmp gets immediately
+ * on the cprop list. It will be evaluated before x is evaluated,
+ * might leaving x as Top. When later x is evaluated, the Cmp
+ * might change its value.
+ * BUT if the Cond is evaluated before this happens, Proj(Cond, FALSE)
+ * gets R, and later changed to F if Cmp is evaluated to True!
+ *
+ * We prevent this by putting Conds in an extra cprop_X queue, which
+ * gets evaluated after the cprop queue is empty.
+ *
+ * Note that this even happens with Click's original algorithm, if
+ * Cmp(x, 0) is evaluated to True first and later changed to False
+ * if x was Top first and later changed to a Const ...
+ * It is unclear how Click solved that problem ...
+ *
+ * However, in rare cases even this does not help, if a Top reaches
+ * a compare through a Phi, than Proj(Cond) is evaluated changing
+ * the type of the Phi to something other.
+ * So, we take the last resort and bind the type to R once
+ * it is calculated.
+ *
+ * (This might be even the way Click works around the whole problem).
+ *
+ * Finally, we may miss some optimization possibilities due to this:
+ *
+ * x = phi(Top, y)
+ * if (x == 0)
+ *
+ * If Top reaches the if first, than we decide for != here.
+ * If y later is evaluated to 0, we cannot revert this decision
+ * and must live with both outputs enabled. If this happens,
+ * we get an unresolved if (true) in the code ...
+ *
+ * In Click's version where this decision is done at the Cmp,
+ * the Cmp is NOT optimized away than (if y evaluated to 1
+ * for instance) and we get a if (1 == 0) here ...
+ *
+ * Both solutions are suboptimal.
+ * At least, we could easily detect this problem and run
+ * cf_opt() (or even combo) again :-(
+ */
+ if (node->type.tv == tarval_reachable)
+ return;
+
+ if (get_irn_mode(sel) == mode_b) {
+ /* an IF */
+ if (pnc == pn_Cond_true) {
+ if (selector->type.tv == tarval_b_false) {
+ node->type.tv = tarval_unreachable;
+ } else if (selector->type.tv == tarval_b_true) {
+ node->type.tv = tarval_reachable;
+ } else if (selector->type.tv == tarval_bottom) {
+ node->type.tv = tarval_reachable;
+ } else {
+ assert(selector->type.tv == tarval_top);
+ if (tarval_UNKNOWN == tarval_top) {
+ /* any condition based on Top is "!=" */
+ node->type.tv = tarval_unreachable;
+ } else {
+ node->type.tv = tarval_unreachable;
+ }
+ }
+ } else {
+ assert(pnc == pn_Cond_false);
+
+ if (selector->type.tv == tarval_b_false) {
+ node->type.tv = tarval_reachable;
+ } else if (selector->type.tv == tarval_b_true) {
+ node->type.tv = tarval_unreachable;
+ } else if (selector->type.tv == tarval_bottom) {
+ node->type.tv = tarval_reachable;
+ } else {
+ assert(selector->type.tv == tarval_top);
+ if (tarval_UNKNOWN == tarval_top) {
+ /* any condition based on Top is "!=" */
+ node->type.tv = tarval_reachable;
+ } else {
+ node->type.tv = tarval_unreachable;
+ }
+ }
+ }
+ } else {
+ /* an SWITCH */
+ if (selector->type.tv == tarval_bottom) {
+ node->type.tv = tarval_reachable;
+ } else if (selector->type.tv == tarval_top) {
+ if (tarval_UNKNOWN == tarval_top &&
+ pnc == get_Cond_defaultProj(cond)) {
+ /* a switch based of Top is always "default" */
+ node->type.tv = tarval_reachable;
+ } else {
+ node->type.tv = tarval_unreachable;
+ }
+ } else {
+ long value = get_tarval_long(selector->type.tv);
+ if (pnc == get_Cond_defaultProj(cond)) {
+ /* default switch, have to check ALL other cases */
+ int i;
+
+ for (i = get_irn_n_outs(cond) - 1; i >= 0; --i) {
+ ir_node *succ = get_irn_out(cond, i);
+
+ if (succ == proj)
+ continue;
+ if (value == get_Proj_proj(succ)) {
+ /* we found a match, will NOT take the default case */
+ node->type.tv = tarval_unreachable;
+ return;
+ }
+ }
+ /* all cases checked, no match, will take default case */
+ node->type.tv = tarval_reachable;
+ } else {
+ /* normal case */
+ node->type.tv = value == pnc ? tarval_reachable : tarval_unreachable;
+ }
+ }
+ }
+} /* compute_Proj_Cond */
+
+/**
+ * (Re-)compute the type for a Proj-Node.
+ *
+ * @param node the node
+ */
+static void compute_Proj(node_t *node) {
+ ir_node *proj = node->node;
+ ir_mode *mode = get_irn_mode(proj);
+ node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
+ ir_node *pred = get_Proj_pred(proj);
+
+ if (block->type.tv == tarval_unreachable) {
+ /* a Proj in a unreachable Block stay Top */
+ node->type.tv = tarval_top;
+ return;
+ }
+ if (get_irn_node(pred)->type.tv == tarval_top && !is_Cond(pred)) {
+ /* if the predecessor is Top, its Proj follow */
+ node->type.tv = tarval_top;
+ return;
+ }
+
+ if (mode == mode_M) {
+ /* mode M is always bottom */
+ node->type.tv = tarval_bottom;
+ return;
+ }
+ if (mode != mode_X) {
+ if (is_Cmp(pred))
+ compute_Proj_Cmp(node, pred);
+ else
+ default_compute(node);
+ return;
+ }
+ /* handle mode_X nodes */
+
+ switch (get_irn_opcode(pred)) {
+ case iro_Start:
+ /* the Proj_X from the Start is always reachable.
+ However this is already handled at the top. */
+ node->type.tv = tarval_reachable;
+ break;
+ case iro_Cond:
+ compute_Proj_Cond(node, pred);
+ break;
+ default:
+ default_compute(node);
+ }
+} /* compute_Proj */
+
+/**
+ * (Re-)compute the type for a Confirm.
+ *
+ * @param node the node
+ */
+static void compute_Confirm(node_t *node) {
+ ir_node *confirm = node->node;
+ node_t *pred = get_irn_node(get_Confirm_value(confirm));
+
+ if (get_Confirm_cmp(confirm) == pn_Cmp_Eq) {
+ node_t *bound = get_irn_node(get_Confirm_bound(confirm));
+
+ if (is_con(bound->type)) {
+ /* is equal to a constant */
+ node->type = bound->type;
+ return;
+ }
+ }
+ /* a Confirm is a copy OR a Const */
+ node->type = pred->type;
+} /* compute_Confirm */
+
+/**
+ * (Re-)compute the type for a Max.
+ *
+ * @param node the node
+ */
+static void compute_Max(node_t *node) {
+ ir_node *op = node->node;
+ node_t *l = get_irn_node(get_binop_left(op));
+ node_t *r = get_irn_node(get_binop_right(op));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ } else if (is_con(a) && is_con(b)) {
+ /* both nodes are constants, we can probably do something */
+ if (a.tv == b.tv) {
+ /* this case handles SymConsts as well */
+ node->type = a;
+ } else {
+ ir_mode *mode = get_irn_mode(op);
+ tarval *tv_min = get_mode_min(mode);
+
+ if (a.tv == tv_min)
+ node->type = b;
+ else if (b.tv == tv_min)
+ node->type = a;
+ else if (is_tarval(a.tv) && is_tarval(b.tv)) {
+ if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
+ node->type.tv = a.tv;
+ else
+ node->type.tv = b.tv;
+ } else {
+ node->type.tv = tarval_bad;
+ }
+ }
+ } else if (r->part == l->part) {
+ /* both nodes congruent, we can probably do something */
+ node->type = a;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Max */
+
+/**
+ * (Re-)compute the type for a Min.
+ *
+ * @param node the node
+ */
+static void compute_Min(node_t *node) {
+ ir_node *op = node->node;
+ node_t *l = get_irn_node(get_binop_left(op));
+ node_t *r = get_irn_node(get_binop_right(op));
+ lattice_elem_t a = l->type;
+ lattice_elem_t b = r->type;
+
+ if (a.tv == tarval_top || b.tv == tarval_top) {
+ node->type.tv = tarval_top;
+ } else if (is_con(a) && is_con(b)) {
+ /* both nodes are constants, we can probably do something */
+ if (a.tv == b.tv) {
+ /* this case handles SymConsts as well */
+ node->type = a;
+ } else {
+ ir_mode *mode = get_irn_mode(op);
+ tarval *tv_max = get_mode_max(mode);
+
+ if (a.tv == tv_max)
+ node->type = b;
+ else if (b.tv == tv_max)
+ node->type = a;
+ else if (is_tarval(a.tv) && is_tarval(b.tv)) {
+ if (tarval_cmp(a.tv, b.tv) & pn_Cmp_Gt)
+ node->type.tv = a.tv;
+ else
+ node->type.tv = b.tv;
+ } else {
+ node->type.tv = tarval_bad;
+ }
+ }
+ } else if (r->part == l->part) {
+ /* both nodes congruent, we can probably do something */
+ node->type = a;
+ } else {
+ node->type.tv = tarval_bottom;
+ }
+} /* compute_Min */
+
+/**
+ * (Re-)compute the type for a given node.
+ *
+ * @param node the node
+ */
+static void compute(node_t *node) {
+ ir_node *irn = node->node;
+ compute_func func;
+
+#ifndef VERIFY_MONOTONE
+ /*
+ * Once a node reaches bottom, the type cannot fall further
+ * in the lattice and we can stop computation.
+ * Do not take this exit if the monotony verifier is
+ * enabled to catch errors.
+ */
+ if (node->type.tv == tarval_bottom)
+ return;
+#endif
+
+ if (is_no_Block(irn)) {
+ /* for pinned nodes, check its control input */
+ if (get_irn_pinned(skip_Proj(irn)) == op_pin_state_pinned) {
+ node_t *block = get_irn_node(get_nodes_block(irn));
+
+ if (block->type.tv == tarval_unreachable) {
+ node->type.tv = tarval_top;
+ return;
+ }
+ }
+ }
+
+ func = (compute_func)node->node->op->ops.generic;
+ if (func != NULL)
+ func(node);
+} /* compute */
+
+/*
+ * Identity functions: Note that one might thing that identity() is just a
+ * synonym for equivalent_node(). While this is true, we cannot use it for the algorithm
+ * here, because it expects that the identity node is one of the inputs, which is NOT
+ * always true for equivalent_node() which can handle (and does sometimes) DAGs.
+ * So, we have our own implementation, which copies some parts of equivalent_node()
+ */
+
+/**
+ * Calculates the Identity for Phi nodes
+ */
+static node_t *identity_Phi(node_t *node) {
+ ir_node *phi = node->node;
+ ir_node *block = get_nodes_block(phi);
+ node_t *n_part = NULL;
+ int i;
+
+ for (i = get_Phi_n_preds(phi) - 1; i >= 0; --i) {
+ node_t *pred_X = get_irn_node(get_Block_cfgpred(block, i));
+
+ if (pred_X->type.tv == tarval_reachable) {
+ node_t *pred = get_irn_node(get_Phi_pred(phi, i));
+
+ if (n_part == NULL)
+ n_part = pred;
+ else if (n_part->part != pred->part) {
+ /* incongruent inputs, not a follower */
+ return node;
+ }
+ }
+ }
+ /* if n_part is NULL here, all inputs path are dead, the Phi computes
+ * tarval_top, is in the TOP partition and should NOT being split! */
+ assert(n_part != NULL);
+ return n_part;
+} /* identity_Phi */
+
+/**
+ * Calculates the Identity for commutative 0 neutral nodes.
+ */
+static node_t *identity_comm_zero_binop(node_t *node) {
+ ir_node *op = node->node;
+ node_t *a = get_irn_node(get_binop_left(op));
+ node_t *b = get_irn_node(get_binop_right(op));
+ ir_mode *mode = get_irn_mode(op);
+ tarval *zero;
+
+ /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
+ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
+ return node;
+
+ /* node: no input should be tarval_top, else the binop would be also
+ * Top and not being split. */
+ zero = get_mode_null(mode);
+ if (a->type.tv == zero)
+ return b;
+ if (b->type.tv == zero)
+ return a;
+ return node;
+} /* identity_comm_zero_binop */
+
+/**
+ * Calculates the Identity for Shift nodes.
+ */
+static node_t *identity_shift(node_t *node) {
+ ir_node *op = node->node;
+ node_t *b = get_irn_node(get_binop_right(op));
+ ir_mode *mode = get_irn_mode(b->node);
+ tarval *zero;
+
+ /* node: no input should be tarval_top, else the binop would be also
+ * Top and not being split. */
+ zero = get_mode_null(mode);
+ if (b->type.tv == zero)
+ return get_irn_node(get_binop_left(op));
+ return node;
+} /* identity_shift */
+
+/**
+ * Calculates the Identity for Mul nodes.
+ */
+static node_t *identity_Mul(node_t *node) {
+ ir_node *op = node->node;
+ node_t *a = get_irn_node(get_Mul_left(op));
+ node_t *b = get_irn_node(get_Mul_right(op));
+ ir_mode *mode = get_irn_mode(op);
+ tarval *one;
+
+ /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
+ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
+ return node;
+
+ /* node: no input should be tarval_top, else the binop would be also
+ * Top and not being split. */
+ one = get_mode_one(mode);
+ if (a->type.tv == one)
+ return b;
+ if (b->type.tv == one)
+ return a;
+ return node;
+} /* identity_Mul */
+
+/**
+ * Calculates the Identity for Sub nodes.
+ */
+static node_t *identity_Sub(node_t *node) {
+ ir_node *sub = node->node;
+ node_t *b = get_irn_node(get_Sub_right(sub));
+ ir_mode *mode = get_irn_mode(sub);
+
+ /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
+ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
+ return node;
+
+ /* node: no input should be tarval_top, else the binop would be also
+ * Top and not being split. */
+ if (b->type.tv == get_mode_null(mode))
+ return get_irn_node(get_Sub_left(sub));
+ return node;
+} /* identity_Sub */
+
+/**
+ * Calculates the Identity for And nodes.
+ */
+static node_t *identity_And(node_t *node) {
+ ir_node *and = node->node;
+ node_t *a = get_irn_node(get_And_left(and));
+ node_t *b = get_irn_node(get_And_right(and));
+ tarval *neutral = get_mode_all_one(get_irn_mode(and));
+
+ /* node: no input should be tarval_top, else the And would be also
+ * Top and not being split. */
+ if (a->type.tv == neutral)
+ return b;
+ if (b->type.tv == neutral)
+ return a;
+ return node;
+} /* identity_And */
+
+/**
+ * Calculates the Identity for Confirm nodes.
+ */
+static node_t *identity_Confirm(node_t *node) {
+ ir_node *confirm = node->node;
+
+ /* a Confirm is always a Copy */
+ return get_irn_node(get_Confirm_value(confirm));
+} /* identity_Confirm */
+
+/**
+ * Calculates the Identity for Mux nodes.
+ */
+static node_t *identity_Mux(node_t *node) {
+ ir_node *mux = node->node;
+ node_t *t = get_irn_node(get_Mux_true(mux));
+ node_t *f = get_irn_node(get_Mux_false(mux));
+ /*node_t *sel; */
+
+ if (t->part == f->part)
+ return t;
+
+ /* for now, the 1-input identity is not supported */
+#if 0
+ sel = get_irn_node(get_Mux_sel(mux));
+
+ /* Mux sel input is mode_b, so it is always a tarval */
+ if (sel->type.tv == tarval_b_true)
+ return t;
+ if (sel->type.tv == tarval_b_false)
+ return f;
+#endif
+ return node;
+} /* identity_Mux */
+
+/**
+ * Calculates the Identity for Min nodes.
+ */
+static node_t *identity_Min(node_t *node) {
+ ir_node *op = node->node;
+ node_t *a = get_irn_node(get_binop_left(op));
+ node_t *b = get_irn_node(get_binop_right(op));
+ ir_mode *mode = get_irn_mode(op);
+ tarval *tv_max;
+
+ if (a->part == b->part) {
+ /* leader of multiple predecessors */
+ return a;
+ }
+
+ /* works even with NaN */
+ tv_max = get_mode_max(mode);
+ if (a->type.tv == tv_max)
+ return b;
+ if (b->type.tv == tv_max)
+ return a;
+ return node;
+} /* identity_Min */
+
+/**
+ * Calculates the Identity for Max nodes.
+ */
+static node_t *identity_Max(node_t *node) {
+ ir_node *op = node->node;
+ node_t *a = get_irn_node(get_binop_left(op));
+ node_t *b = get_irn_node(get_binop_right(op));
+ ir_mode *mode = get_irn_mode(op);
+ tarval *tv_min;
+
+ if (a->part == b->part) {
+ /* leader of multiple predecessors */
+ return a;
+ }
+
+ /* works even with NaN */
+ tv_min = get_mode_min(mode);
+ if (a->type.tv == tv_min)
+ return b;
+ if (b->type.tv == tv_min)
+ return a;
+ return node;
+} /* identity_Max */
+
+/**
+ * Calculates the Identity for nodes.
+ */
+static node_t *identity(node_t *node) {
+ ir_node *irn = node->node;
+
+ switch (get_irn_opcode(irn)) {
+ case iro_Phi:
+ return identity_Phi(node);
+ case iro_Mul:
+ return identity_Mul(node);
+ case iro_Add:
+ case iro_Or:
+ case iro_Eor:
+ return identity_comm_zero_binop(node);
+ case iro_Shr:
+ case iro_Shl:
+ case iro_Shrs:
+ case iro_Rotl:
+ return identity_shift(node);
+ case iro_And:
+ return identity_And(node);
+ case iro_Sub:
+ return identity_Sub(node);
+ case iro_Confirm:
+ return identity_Confirm(node);
+ case iro_Mux:
+ return identity_Mux(node);
+ case iro_Min:
+ return identity_Min(node);
+ case iro_Max:
+ return identity_Max(node);
+ default:
+ return node;
+ }
+} /* identity */
+
+/**
+ * Node follower is a (new) follower of leader, segregate Leader
+ * out edges.
+ */
+static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader) {
+ ir_node *l = leader->node;
+ int j, i, n = get_irn_n_outs(l);
+
+ DB((dbg, LEVEL_2, "%+F is a follower of %+F\n", follower, leader->node));
+ /* The leader edges must remain sorted, but follower edges can
+ be unsorted. */
+ for (i = leader->n_followers + 1; i <= n; ++i) {
+ if (l->out[i].use == follower) {
+ ir_def_use_edge t = l->out[i];
+
+ for (j = i - 1; j >= leader->n_followers + 1; --j)
+ l->out[j + 1] = l->out[j];
+ ++leader->n_followers;
+ l->out[leader->n_followers] = t;
+ break;
+ }
+ }
+} /* segregate_def_use_chain_1 */
+
+/**
+ * Node follower is a (new) follower segregate its Leader
+ * out edges.
+ *
+ * @param follower the follower IR node
+ */
+static void segregate_def_use_chain(const ir_node *follower) {
+ int i;
+
+ for (i = get_irn_arity(follower) - 1; i >= 0; --i) {
+ node_t *pred = get_irn_node(get_irn_n(follower, i));
+
+ segregate_def_use_chain_1(follower, pred);
+ }
+} /* segregate_def_use_chain */
+
+/**
+ * Propagate constant evaluation.
+ *
+ * @param env the environment
+ */
+static void propagate(environment_t *env) {
+ partition_t *X, *Y;
+ node_t *x;
+ lattice_elem_t old_type;
+ node_t *fallen;
+ unsigned n_fallen, old_type_was_T_or_C;
+ int i;
+
+ while (env->cprop != NULL) {
+ void *oldopcode = NULL;
+
+ /* remove the first partition X from cprop */
+ X = env->cprop;
+ X->on_cprop = 0;
+ env->cprop = X->cprop_next;
+
+ old_type_was_T_or_C = X->type_is_T_or_C;
+
+ DB((dbg, LEVEL_2, "Propagate type on part%d\n", X->nr));
+ fallen = NULL;
+ n_fallen = 0;
+ for (;;) {
+ int cprop_empty = list_empty(&X->cprop);
+ int cprop_X_empty = list_empty(&X->cprop_X);
+
+ if (cprop_empty && cprop_X_empty) {
+ /* both cprop lists are empty */
+ break;
+ }
+
+ /* remove the first Node x from X.cprop */
+ if (cprop_empty) {
+ /* Get a node from the cprop_X list only if
+ * all data nodes are processed.
+ * This ensures, that all inputs of the Cond
+ * predecessor are processed if its type is still Top.
+ */
+ x = list_entry(X->cprop_X.next, node_t, cprop_list);
+ } else {
+ x = list_entry(X->cprop.next, node_t, cprop_list);
+ }
+
+ //assert(x->part == X);
+ list_del(&x->cprop_list);
+ x->on_cprop = 0;
+
+ if (x->is_follower && identity(x) == x) {
+ /* check the opcode first */
+ if (oldopcode == NULL) {
+ oldopcode = lambda_opcode(get_first_node(X), env);
+ }
+ if (oldopcode != lambda_opcode(x, env)) {
+ if (x->on_fallen == 0) {
+ /* different opcode -> x falls out of this partition */
+ x->next = fallen;
+ x->on_fallen = 1;
+ fallen = x;
+ ++n_fallen;
+ DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
+ }
+ }
+
+ /* x will make the follower -> leader transition */
+ follower_to_leader(x);
+ }
+
+ /* compute a new type for x */
+ old_type = x->type;
+ DB((dbg, LEVEL_3, "computing type of %+F\n", x->node));
+ compute(x);
+ if (x->type.tv != old_type.tv) {
+ DB((dbg, LEVEL_2, "node %+F has changed type from %+F to %+F\n", x->node, old_type, x->type));
+ verify_type(old_type, x);
+
+ if (x->on_fallen == 0) {
+ /* Add x to fallen. Nodes might fall from T -> const -> _|_, so check that they are
+ not already on the list. */
+ x->next = fallen;
+ x->on_fallen = 1;
+ fallen = x;
+ ++n_fallen;
+ DB((dbg, LEVEL_2, "Add node %+F to fallen\n", x->node));
+ }
+ for (i = get_irn_n_outs(x->node) - 1; i >= 0; --i) {
+ ir_node *succ = get_irn_out(x->node, i);
+ node_t *y = get_irn_node(succ);
+
+ /* Add y to y.partition.cprop. */
+ add_to_cprop(y, env);
+ }
+ }
+ }