+/**
+ * optimize a Mux
+ */
+static ir_node *equivalent_node_Mux(ir_node *n)
+{
+ ir_node *oldn = n, *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ /* Mux(true, f, t) == t */
+ if (ts == tarval_b_true) {
+ n = get_Mux_true(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ }
+ /* Mux(false, f, t) == f */
+ else if (ts == tarval_b_false) {
+ n = get_Mux_false(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ }
+ /* Mux(v, x, x) == x */
+ else if (get_Mux_false(n) == get_Mux_true(n)) {
+ n = get_Mux_true(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ }
+ else if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(get_irn_mode(n))) {
+ ir_node *cmp = get_Proj_pred(sel);
+ long proj_nr = get_Proj_proj(sel);
+ ir_node *b = get_Mux_false(n);
+ ir_node *a = get_Mux_true(n);
+
+ /*
+ * Note: normalization puts the constant on the right site,
+ * so we check only one case.
+ *
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the first one.
+ */
+ if (get_irn_op(cmp) == op_Cmp && get_Cmp_left(cmp) == a) {
+ if (classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
+ /* Mux(a CMP 0, X, a) */
+ if (get_irn_op(b) == op_Minus && get_Minus_op(b) == a) {
+ /* Mux(a CMP 0, -a, a) */
+ if (proj_nr == pn_Cmp_Eq) {
+ /* Mux(a == 0, -a, a) ==> -a */
+ n = b;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ else if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
+ /* Mux(a != 0, -a, a) ==> a */
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ }
+ else if (classify_Const(b) == CNST_NULL) {
+ /* Mux(a CMP 0, 0, a) */
+ if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
+ /* Mux(a != 0, 0, a) ==> a */
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ else if (proj_nr == pn_Cmp_Eq) {
+ /* Mux(a == 0, 0, a) ==> 0 */
+ n = b;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ }
+ }
+ }
+ }
+ return n;
+}
+
+/**
+ * Optimize -a CMP -b into b CMP a.
+ * This works only for for modes where unary Minus
+ * cannot Overflow.
+ * Note that two-complement integers can Overflow
+ * so it will NOT work.
+ */
+static ir_node *equivalent_node_Cmp(ir_node *n)
+{
+ ir_node *left = get_Cmp_left(n);
+ ir_node *right = get_Cmp_right(n);
+
+ if (get_irn_op(left) == op_Minus && get_irn_op(right) == op_Minus &&
+ !mode_overflow_on_unary_Minus(get_irn_mode(left))) {
+ left = get_Minus_op(left);
+ right = get_Minus_op(right);
+ set_Cmp_left(n, right);
+ set_Cmp_right(n, left);
+ }
+ return n;
+}
+
+/**
+ * Remove Confirm nodes if setting is on.
+ * Replace Confirms(x, '=', Constlike) by Constlike.
+ */
+static ir_node *equivalent_node_Confirm(ir_node *n)
+{
+ ir_node *pred = get_Confirm_value(n);
+ pn_Cmp pnc = get_Confirm_cmp(n);
+
+ if (get_irn_op(pred) == op_Confirm && pnc == get_Confirm_cmp(pred)) {
+ /*
+ * rare case: two identical Confirms one after another,
+ * replace the second one with the first.
+ */
+ n = pred;
+ }
+ if (pnc == pn_Cmp_Eq) {
+ ir_node *bound = get_Confirm_bound(n);
+
+ /*
+ * Optimize a rare case:
+ * Confirm(x, '=', Constlike) ==> Constlike
+ */
+ if (is_irn_constlike(bound)) {
+ DBG_OPT_CONFIRM(n, bound);
+ return bound;
+ }
+ }
+ return get_opt_remove_confirm() ? get_Confirm_value(n) : n;
+}
+
+/**
+ * Optimize CopyB(mem, x, x) into a Nop
+ */
+static ir_node *equivalent_node_CopyB(ir_node *n)
+{
+ ir_node *a = get_CopyB_dst(n);
+ ir_node *b = get_CopyB_src(n);
+
+ if (a == b) {
+ /* Turn CopyB into a tuple (mem, bad, bad) */
+ ir_node *mem = get_CopyB_mem(n);
+ turn_into_tuple(n, pn_CopyB_max);
+ set_Tuple_pred(n, pn_CopyB_M, mem);
+ set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Call_M_except, new_Bad());
+ }
+ return n;
+}
+
+/**
+ * Optimize Bounds(idx, idx, upper) into idx.
+ */
+static ir_node *equivalent_node_Bound(ir_node *n)
+{
+ ir_node *idx = get_Bound_index(n);
+ ir_node *lower = get_Bound_lower(n);
+ int ret_tuple = 0;
+
+ /* By definition lower < upper, so if idx == lower -->
+ lower <= idx && idx < upper */
+ if (idx == lower) {
+ /* Turn Bound into a tuple (mem, bad, idx) */
+ ret_tuple = 1;
+ }
+ else {
+ ir_node *pred = skip_Proj(idx);
+
+ if (get_irn_op(pred) == op_Bound) {
+ /*
+ * idx was Bounds_check previously, it is still valid if
+ * lower <= pred_lower && pred_upper <= upper.
+ */
+ ir_node *upper = get_Bound_upper(n);
+ if (get_Bound_lower(pred) == lower &&
+ get_Bound_upper(pred) == upper) {
+ /*
+ * One could expect that we simple return the previous
+ * Bound here. However, this would be wrong, as we could
+ * add an exception Proj to a new location than.
+ * So, we must turn in into a tuple
+ */
+ ret_tuple = 1;
+ }
+ }
+ }
+ if (ret_tuple) {
+ /* Turn Bound into a tuple (mem, bad, idx) */
+ ir_node *mem = get_Bound_mem(n);
+ turn_into_tuple(n, pn_Bound_max);
+ set_Tuple_pred(n, pn_Bound_M_regular, mem);
+ set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Bound_res, idx);
+ set_Tuple_pred(n, pn_Bound_M_except, mem);
+ }
+ return n;
+}
+