+ ir_node *lower = get_Bound_lower(bound);
+ ir_node *upper = get_Bound_upper(bound);
+ if (get_Bound_lower(pred) == lower &&
+ get_Bound_upper(pred) == upper &&
+ get_irn_MacroBlock(bound) == get_irn_MacroBlock(pred)) {
+ /*
+ * One could expect that we simply return the previous
+ * Bound here. However, this would be wrong, as we could
+ * add an exception Proj to a new location then.
+ * So, we must turn in into a tuple.
+ */
+ ret_tuple = 1;
+ }
+ }
+ if (ret_tuple) {
+ /* Turn Bound into a tuple (mem, jmp, bad, idx) */
+ switch (get_Proj_proj(proj)) {
+ case pn_Bound_M:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_Bound_mem(bound);
+ break;
+ case pn_Bound_X_except:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_irg_bad(current_ir_graph);
+ break;
+ case pn_Bound_res:
+ proj = idx;
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
+ break;
+ default:
+ /* cannot optimize pn_Bound_X_regular, handled in transform ... */
+ ;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Bound */
+
+/**
+ * Optimize an Exception Proj(Load) with a non-null address.
+ */
+static ir_node *equivalent_node_Proj_Load(ir_node *proj) {
+ if (get_opt_ldst_only_null_ptr_exceptions()) {
+ if (get_irn_mode(proj) == mode_X) {
+ ir_node *oldn = proj;
+ ir_node *load = get_Proj_pred(proj);
+
+ /* get the Load address */
+ ir_node *addr = get_Load_ptr(load);
+ ir_node *blk = get_nodes_block(load);
+ ir_node *confirm;
+
+ if (value_not_null(addr, &confirm)) {
+ if (get_Proj_proj(proj) == pn_Load_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return get_irg_bad(current_ir_graph);
+ }
+ }
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Load */
+
+/**
+ * Optimize an Exception Proj(Store) with a non-null address.
+ */
+static ir_node *equivalent_node_Proj_Store(ir_node *proj) {
+ if (get_opt_ldst_only_null_ptr_exceptions()) {
+ if (get_irn_mode(proj) == mode_X) {
+ ir_node *oldn = proj;
+ ir_node *store = get_Proj_pred(proj);
+
+ /* get the load/store address */
+ ir_node *addr = get_Store_ptr(store);
+ ir_node *blk = get_nodes_block(store);
+ ir_node *confirm;
+
+ if (value_not_null(addr, &confirm)) {
+ if (get_Proj_proj(proj) == pn_Store_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return get_irg_bad(current_ir_graph);
+ }
+ }
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Store */
+
+/**
+ * Does all optimizations on nodes that must be done on it's Proj's
+ * because of creating new nodes.
+ */
+static ir_node *equivalent_node_Proj(ir_node *proj) {
+ ir_node *n = get_Proj_pred(proj);
+
+ switch (get_irn_opcode(n)) {
+ case iro_Div:
+ return equivalent_node_Proj_Div(proj);
+
+ case iro_DivMod:
+ return equivalent_node_Proj_DivMod(proj);
+
+ case iro_Quot:
+ return equivalent_node_Proj_DivMod(proj);
+
+ case iro_Tuple:
+ return equivalent_node_Proj_Tuple(proj);
+
+ case iro_CopyB:
+ return equivalent_node_Proj_CopyB(proj);
+
+ case iro_Bound:
+ return equivalent_node_Proj_Bound(proj);
+
+ case iro_Load:
+ return equivalent_node_Proj_Load(proj);
+
+ case iro_Store:
+ return equivalent_node_Proj_Store(proj);
+
+ default:
+ if (get_irn_mode(proj) == mode_X) {
+ if (is_Block_dead(get_nodes_block(n))) {
+ /* Remove dead control flow -- early gigo(). */
+ proj = get_irg_bad(current_ir_graph);
+ }
+ }
+ return proj;
+ }
+} /* equivalent_node_Proj */
+
+/**
+ * Remove Id's.
+ */
+static ir_node *equivalent_node_Id(ir_node *n) {
+ ir_node *oldn = n;
+
+ do {
+ n = get_Id_pred(n);
+ } while (is_Id(n));
+
+ DBG_OPT_ID(oldn, n);
+ return n;
+} /* equivalent_node_Id */
+
+/**
+ * Optimize a Mux.
+ */
+static ir_node *equivalent_node_Mux(ir_node *n)
+{
+ ir_node *oldn = n, *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ /* Mux(true, f, t) == t */
+ if (ts == tarval_b_true) {
+ n = get_Mux_true(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ }
+ /* Mux(false, f, t) == f */
+ else if (ts == tarval_b_false) {
+ n = get_Mux_false(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ }
+ /* Mux(v, x, x) == x */
+ else if (get_Mux_false(n) == get_Mux_true(n)) {
+ n = get_Mux_true(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ }
+ else if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) {
+ ir_node *cmp = get_Proj_pred(sel);
+ long proj_nr = get_Proj_proj(sel);
+ ir_node *f = get_Mux_false(n);
+ ir_node *t = get_Mux_true(n);
+
+ /*
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the first one.
+ */
+ if (is_Cmp(cmp)) {
+ ir_node *const cmp_l = get_Cmp_left(cmp);
+ ir_node *const cmp_r = get_Cmp_right(cmp);
+
+ switch (proj_nr) {
+ case pn_Cmp_Eq:
+ if ((cmp_l == t && cmp_r == f) || /* Psi(t == f, t, f) -> f */
+ (cmp_l == f && cmp_r == t)) { /* Psi(f == t, t, f) -> f */