+static ir_node *equivalent_node_Block(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ /* The Block constructor does not call optimize, but mature_immBlock
+ calls the optimization. */
+ assert(get_Block_matured(n));
+
+ /* Straightening: a single entry Block following a single exit Block
+ can be merged, if it is not the Start block. */
+ /* !!! Beware, all Phi-nodes of n must have been optimized away.
+ This should be true, as the block is matured before optimize is called.
+ But what about Phi-cycles with the Phi0/Id that could not be resolved?
+ Remaining Phi nodes are just Ids. */
+ if ((get_Block_n_cfgpreds(n) == 1) &&
+ (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
+ ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
+ if (predblock == oldn) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ n = new_Bad(); DBG_OPT_DEAD;
+ } else if (get_opt_control_flow_straightening()) {
+ n = predblock; DBG_OPT_STG;
+ }
+ }
+ else if ((get_Block_n_cfgpreds(n) == 1) &&
+ (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
+ ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
+ if (predblock == oldn) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ n = new_Bad(); DBG_OPT_DEAD;
+ }
+ }
+ else if ((get_Block_n_cfgpreds(n) == 2) &&
+ (get_opt_control_flow_weak_simplification())) {
+ /* Test whether Cond jumps twice to this block
+ @@@ we could do this also with two loops finding two preds from several ones. */
+ ir_node *a = get_Block_cfgpred(n, 0);
+ ir_node *b = get_Block_cfgpred(n, 1);
+
+ if ((get_irn_op(a) == op_Proj) &&
+ (get_irn_op(b) == op_Proj) &&
+ (get_Proj_pred(a) == get_Proj_pred(b)) &&
+ (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
+ (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
+ /* Also a single entry Block following a single exit Block. Phis have
+ twice the same operand and will be optimized away. */
+ n = get_nodes_block(a); DBG_OPT_IFSIM;
+ }
+ } else if (get_opt_unreachable_code() &&
+ (n != current_ir_graph->start_block) &&
+ (n != current_ir_graph->end_block) ) {
+ int i;
+ /* If all inputs are dead, this block is dead too, except if it is
+ the start or end block. This is a step of unreachable code
+ elimination */
+ for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
+ if (!is_Bad(get_Block_cfgpred(n, i))) break;
+ }
+ if (i == get_Block_n_cfgpreds(n))
+ n = new_Bad();
+ }
+
+ return n;
+}
+
+/**
+ * Returns a equivalent node for a Jmp, a Bad :-)
+ * Of course this only happens if the Block of the Jmp is Bad.
+ */
+static ir_node *equivalent_node_Jmp(ir_node *n)
+{
+ /* GL: Why not same for op_Raise?? */
+ /* unreachable code elimination */
+ if (is_Bad(get_nodes_block(n)))
+ n = new_Bad();
+
+ return n;
+}
+
+static ir_node *equivalent_node_Cond(ir_node *n)
+{
+ /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
+ See cases for iro_Cond and iro_Proj in transform_node. */
+ return n;
+}
+
+/**
+ * Use algebraic simplification a v a = a.
+ */
+static ir_node *equivalent_node_Or(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_Or_left(n);
+ ir_node *b = get_Or_right(n);
+
+ /* remove a v a */
+ if (a == b) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+
+ return n;
+}
+
+/**
+ * optimize operations that are commutative and have neutral 0,
+ * so a op 0 = 0 op a = a.
+ */
+static ir_node *equivalent_node_neutral_zero(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_binop_left(n);
+ ir_node *b = get_binop_right(n);
+
+ tarval *tv;
+ ir_node *on;
+
+ /* After running compute_node there is only one constant predecessor.
+ Find this predecessors value and remember the other node: */
+ if ((tv = computed_value(a)) != tarval_bad) {
+ on = b;
+ } else if ((tv = computed_value(b)) != tarval_bad) {
+ on = a;
+ } else
+ return n;
+
+ /* If this predecessors constant value is zero, the operation is
+ unnecessary. Remove it: */
+ if (classify_tarval (tv) == TV_CLASSIFY_NULL) {
+ n = on; DBG_OPT_ALGSIM1;
+ }
+
+ return n;
+}
+
+#define equivalent_node_Add equivalent_node_neutral_zero
+#define equivalent_node_Eor equivalent_node_neutral_zero
+
+/**
+ * optimize operations that are not commutative but have neutral 0 on left,
+ * so a op 0 = a.
+ */
+static ir_node *equivalent_node_left_zero(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_binop_left(n);
+ ir_node *b = get_binop_right(n);
+
+ if (classify_tarval(computed_value(b)) == TV_CLASSIFY_NULL) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+
+ return n;
+}
+
+#define equivalent_node_Sub equivalent_node_left_zero
+#define equivalent_node_Shl equivalent_node_left_zero
+#define equivalent_node_Shr equivalent_node_left_zero
+#define equivalent_node_Shrs equivalent_node_left_zero
+#define equivalent_node_Rot equivalent_node_left_zero
+
+/**
+ * Er, a "symmetic unop", ie op(op(n)) = n.
+ */
+static ir_node *equivalent_node_symmetric_unop(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ /* optimize symmetric unop */
+ if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
+ n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
+ }
+ return n;
+}
+
+/* NotNot x == x */
+#define equivalent_node_Not equivalent_node_symmetric_unop
+
+/* --x == x */ /* ??? Is this possible or can --x raise an
+ out of bounds exception if min =! max? */
+#define equivalent_node_Minus equivalent_node_symmetric_unop
+
+/**
+ * Optimize a * 1 = 1 * a = a.
+ */
+static ir_node *equivalent_node_Mul(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_Mul_left(n);
+ ir_node *b = get_Mul_right(n);
+
+ /* Mul is commutative and has again an other neutral element. */
+ if (classify_tarval (computed_value (a)) == TV_CLASSIFY_ONE) {
+ n = b; DBG_OPT_ALGSIM1;
+ } else if (classify_tarval (computed_value (b)) == TV_CLASSIFY_ONE) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+ return n;
+}
+
+/**
+ * Optimize a / 1 = a.
+ */
+static ir_node *equivalent_node_Div(ir_node *n)
+{
+ ir_node *a = get_Div_left(n);
+ ir_node *b = get_Div_right(n);
+
+ /* Div is not commutative. */
+ if (classify_tarval(computed_value(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
+ /* Turn Div into a tuple (mem, bad, a) */
+ ir_node *mem = get_Div_mem(n);
+ turn_into_tuple(n, 3);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Div_res, a);
+ }
+ return n;
+}
+
+/**
+ * Optimize a & 0b1...1 = 0b1...1 & a = a & a = a.
+ */
+static ir_node *equivalent_node_And(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_And_left(n);
+ ir_node *b = get_And_right(n);
+
+ if (a == b) {
+ n = a; /* And has it's own neutral element */
+ } else if (classify_tarval(computed_value(a)) == TV_CLASSIFY_ALL_ONE) {
+ n = b;
+ } else if (classify_tarval(computed_value(b)) == TV_CLASSIFY_ALL_ONE) {
+ n = a;
+ }
+ if (n != oldn) DBG_OPT_ALGSIM1;
+ return n;
+}
+
+/**
+ * Try to remove useless conv's:
+ */
+static ir_node *equivalent_node_Conv(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a = get_Conv_op(n);
+ ir_node *b;
+
+ ir_mode *n_mode = get_irn_mode(n);
+ ir_mode *a_mode = get_irn_mode(a);
+
+ if (n_mode == a_mode) { /* No Conv necessary */
+ n = a; DBG_OPT_ALGSIM3;
+ } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
+ ir_mode *b_mode;
+
+ b = get_Conv_op(a);
+ n_mode = get_irn_mode(n);
+ b_mode = get_irn_mode(b);
+
+ if (n_mode == b_mode) {
+ if (n_mode == mode_b) {
+ n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM1;
+ }
+ else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
+ if (smaller_mode(b_mode, a_mode)){
+ n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ DBG_OPT_ALGSIM1;
+ }
+ }
+ }
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_Phi(ir_node *n)
+{
+ /* Several optimizations:
+ - no Phi in start block.
+ - remove Id operators that are inputs to Phi
+ - fold Phi-nodes, iff they have only one predecessor except
+ themselves.
+ */
+ int i, n_preds;
+
+ ir_node *oldn = n;
+ ir_node *block = NULL; /* to shutup gcc */
+ ir_node *first_val = NULL; /* to shutup gcc */
+ ir_node *scnd_val = NULL; /* to shutup gcc */
+
+ if (!get_opt_normalize()) return n;
+
+ n_preds = get_Phi_n_preds(n);
+
+ block = get_nodes_block(n);
+ /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
+ assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
+ if ((is_Bad(block)) || /* Control dead */
+ (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
+ return new_Bad(); /* in the Start Block. */
+
+ if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
+
+#if 0
+ /* first we test for a special case: */
+ /* Confirm is a special node fixing additional information for a
+ value that is known at a certain point. This is useful for
+ dataflow analysis. */
+ if (n_preds == 2) {
+ ir_node *a = get_Phi_pred(n, 0);
+ ir_node *b = get_Phi_pred(n, 1);
+ if ( (get_irn_op(a) == op_Confirm)
+ && (get_irn_op(b) == op_Confirm)
+ && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
+ && (get_irn_n(a, 1) == get_irn_n (b, 1))
+ && (a->data.num == (~b->data.num & irpn_True) )) {
+ return get_irn_n(a, 0);
+ }
+ }
+#endif
+
+ /* If the Block has a Bad pred, we also have one. */
+ for (i = 0; i < n_preds; ++i)
+ if (is_Bad (get_Block_cfgpred(block, i)))
+ set_Phi_pred(n, i, new_Bad());
+
+ /* Find first non-self-referencing input */
+ for (i = 0; i < n_preds; ++i) {
+ first_val = get_Phi_pred(n, i);
+ if ( (first_val != n) /* not self pointer */
+#if 1
+ && (get_irn_op(first_val) != op_Bad)
+#endif
+ ) { /* value not dead */
+ break; /* then found first value. */
+ }
+ }
+
+ /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
+ if (i >= n_preds) { return new_Bad(); }
+
+ scnd_val = NULL;
+
+ /* follow_Id () for rest of inputs, determine if any of these
+ are non-self-referencing */
+ while (++i < n_preds) {
+ scnd_val = get_Phi_pred(n, i);
+ if ( (scnd_val != n)
+ && (scnd_val != first_val)
+#if 1
+ && (get_irn_op(scnd_val) != op_Bad)
+#endif
+ ) {
+ break;
+ }
+ }
+
+ /* Fold, if no multiple distinct non-self-referencing inputs */
+ if (i >= n_preds) {
+ n = first_val; DBG_OPT_PHI;
+ } else {
+ /* skip the remaining Ids (done in get_Phi_pred). */
+ /* superfluous, since we walk all to propagate Block's Bads.
+ while (++i < n_preds) get_Phi_pred(n, i); */
+ }
+ return n;
+}
+
+/**
+ * Optimize Loads after Store.
+ *
+ * @todo FAILS for volatile entities
+ */
+static ir_node *equivalent_node_Load(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ /* remove unnecessary Load. */
+ ir_node *a = skip_Proj(get_Load_mem(n));
+ ir_node *b = get_Load_ptr(n);
+ ir_node *c;
+
+ /* TODO: check for volatile */
+ if (get_irn_op(a) == op_Store && get_Store_ptr(a) == b) {
+ /* We load immediately after a store -- a read after write. */
+ ir_node *mem = get_Load_mem(n);
+
+ c = get_Store_value(a);
+ turn_into_tuple(n, 3);
+ set_Tuple_pred(n, pn_Load_M, mem);
+ set_Tuple_pred(n, pn_Load_res, c);
+ set_Tuple_pred(n, pn_Load_X_except, new_Bad()); DBG_OPT_RAW;
+ }
+ return n;
+}
+
+/**
+ * Optimize store after store and load atfter store.
+ *
+ * @todo FAILS for volatile entities
+ */
+static ir_node *equivalent_node_Store(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ /* remove unnecessary store. */
+ ir_node *a = skip_Proj(get_Store_mem(n));
+ ir_node *b = get_Store_ptr(n);
+ ir_node *c = skip_Proj(get_Store_value(n));
+
+ if (get_irn_op(a) == op_Store
+ && get_Store_ptr(a) == b
+ && skip_Proj(get_Store_value(a)) == c) {
+ /* We have twice exactly the same store -- a write after write. */
+ n = a; DBG_OPT_WAW;
+ } else if (get_irn_op(c) == op_Load
+ && (a == c || skip_Proj(get_Load_mem(c)) == a)
+ && get_Load_ptr(c) == b ) {
+ /* We just loaded the value from the same memory, i.e., the store
+ doesn't change the memory -- a write after read. */
+ a = get_Store_mem(n);
+ turn_into_tuple(n, 2);
+ set_Tuple_pred(n, pn_Store_M, a);
+ set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR;
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_Proj(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_Proj_pred(n);
+
+ if ( get_irn_op(a) == op_Tuple) {
+ /* Remove the Tuple/Proj combination. */
+ if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
+ n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
+ } else {
+// assert(0); /* This should not happen! */
+// n = new_Bad();
+ dump_ir_block_graph(current_ir_graph, "-CRASH");
+ printf(">>>%d\n", get_irn_node_nr(n));
+ exit(1);
+ }
+ } else if (get_irn_mode(n) == mode_X &&
+ is_Bad(get_nodes_block(n))) {
+ /* Remove dead control flow -- early gigo. */
+ n = new_Bad();
+ }
+ return n;
+}
+
+/**
+ * Remove Id's.
+ */
+static ir_node *equivalent_node_Id(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ n = follow_Id(n); DBG_OPT_ID;
+ return n;
+}
+
+/*
+case iro_Mod, Quot, DivMod
+ DivMod allocates new nodes --> it's treated in transform node.
+ What about Quot, DivMod?
+*/
+
+/**
+ * equivalent_node() returns a node equivalent to input n. It skips all nodes that
+ * perform no actual computation, as, e.g., the Id nodes. It does not create
+ * new nodes. It is therefore safe to free n if the node returned is not n.
+ * If a node returns a Tuple we can not just skip it. If the size of the
+ * in array fits, we transform n into a tuple (e.g., Div).
+ */