+ /* Remove duplicate predecessors */
+ for (j = 0;; ++j) {
+ if (j >= i) {
+ ++i;
+ break;
+ }
+ if (get_Sync_pred(n, j) == pred) {
+ del_Sync_n(n, i);
+ --arity;
+ break;
+ }
+ }
+ }
+
+ if (arity == 0) return get_irg_bad(current_ir_graph);
+ if (arity == 1) return get_Sync_pred(n, 0);
+ return n;
+} /* equivalent_node_Sync */
+
+/**
+ * Optimize Proj(Tuple).
+ */
+static ir_node *equivalent_node_Proj_Tuple(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *tuple = get_Proj_pred(proj);
+
+ /* Remove the Tuple/Proj combination. */
+ proj = get_Tuple_pred(tuple, get_Proj_proj(proj));
+ DBG_OPT_TUPLE(oldn, tuple, proj);
+
+ return proj;
+} /* equivalent_node_Proj_Tuple */
+
+/**
+ * Optimize a / 1 = a.
+ */
+static ir_node *equivalent_node_Proj_Div(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *div = get_Proj_pred(proj);
+ ir_node *b = get_Div_right(div);
+ tarval *tb = value_of(b);
+
+ /* Div is not commutative. */
+ if (tarval_is_one(tb)) { /* div(x, 1) == x */
+ switch (get_Proj_proj(proj)) {
+ case pn_Div_M:
+ proj = get_Div_mem(div);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ case pn_Div_res:
+ proj = get_Div_left(div);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ default:
+ /* we cannot replace the exception Proj's here, this is done in
+ transform_node_Proj_Div() */
+ return proj;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Div */
+
+/**
+ * Optimize a / 1.0 = a.
+ */
+static ir_node *equivalent_node_Proj_Quot(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *quot = get_Proj_pred(proj);
+ ir_node *b = get_Quot_right(quot);
+ tarval *tb = value_of(b);
+
+ /* Div is not commutative. */
+ if (tarval_is_one(tb)) { /* Quot(x, 1) == x */
+ switch (get_Proj_proj(proj)) {
+ case pn_Quot_M:
+ proj = get_Quot_mem(quot);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ case pn_Quot_res:
+ proj = get_Quot_left(quot);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ default:
+ /* we cannot replace the exception Proj's here, this is done in
+ transform_node_Proj_Quot() */
+ return proj;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Quot */
+
+/**
+ * Optimize a / 1 = a.
+ */
+static ir_node *equivalent_node_Proj_DivMod(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *divmod = get_Proj_pred(proj);
+ ir_node *b = get_DivMod_right(divmod);
+ tarval *tb = value_of(b);
+
+ /* Div is not commutative. */
+ if (tarval_is_one(tb)) { /* div(x, 1) == x */
+ switch (get_Proj_proj(proj)) {
+ case pn_DivMod_M:
+ proj = get_DivMod_mem(divmod);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ case pn_DivMod_res_div:
+ proj = get_DivMod_left(divmod);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ default:
+ /* we cannot replace the exception Proj's here, this is done in
+ transform_node_Proj_DivMod().
+ Note further that the pn_DivMod_res_div case is handled in
+ computed_value_Proj(). */
+ return proj;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_DivMod */
+
+/**
+ * Optimize CopyB(mem, x, x) into a Nop.
+ */
+static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *copyb = get_Proj_pred(proj);
+ ir_node *a = get_CopyB_dst(copyb);
+ ir_node *b = get_CopyB_src(copyb);
+
+ if (a == b) {
+ /* Turn CopyB into a tuple (mem, jmp, bad, bad) */
+ switch (get_Proj_proj(proj)) {
+ case pn_CopyB_M_regular:
+ proj = get_CopyB_mem(copyb);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
+ break;
+
+ case pn_CopyB_M_except:
+ case pn_CopyB_X_except:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_irg_bad(current_ir_graph);
+ break;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_CopyB */
+
+/**
+ * Optimize Bounds(idx, idx, upper) into idx.
+ */
+static ir_node *equivalent_node_Proj_Bound(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *bound = get_Proj_pred(proj);
+ ir_node *idx = get_Bound_index(bound);
+ ir_node *pred = skip_Proj(idx);
+ int ret_tuple = 0;
+
+ if (idx == get_Bound_lower(bound))
+ ret_tuple = 1;
+ else if (is_Bound(pred)) {
+ /*
+ * idx was Bounds checked in the same MacroBlock previously,
+ * it is still valid if lower <= pred_lower && pred_upper <= upper.
+ */
+ ir_node *lower = get_Bound_lower(bound);
+ ir_node *upper = get_Bound_upper(bound);
+ if (get_Bound_lower(pred) == lower &&
+ get_Bound_upper(pred) == upper &&
+ get_irn_MacroBlock(bound) == get_irn_MacroBlock(pred)) {
+ /*
+ * One could expect that we simply return the previous
+ * Bound here. However, this would be wrong, as we could
+ * add an exception Proj to a new location then.
+ * So, we must turn in into a tuple.
+ */
+ ret_tuple = 1;
+ }
+ }
+ if (ret_tuple) {
+ /* Turn Bound into a tuple (mem, jmp, bad, idx) */
+ switch (get_Proj_proj(proj)) {
+ case pn_Bound_M:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_Bound_mem(bound);
+ break;
+ case pn_Bound_X_except:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_irg_bad(current_ir_graph);
+ break;
+ case pn_Bound_res:
+ proj = idx;
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
+ break;
+ default:
+ /* cannot optimize pn_Bound_X_regular, handled in transform ... */
+ ;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Bound */
+
+/**
+ * Optimize an Exception Proj(Load) with a non-null address.
+ */
+static ir_node *equivalent_node_Proj_Load(ir_node *proj) {
+ if (get_opt_ldst_only_null_ptr_exceptions()) {
+ if (get_irn_mode(proj) == mode_X) {
+ ir_node *load = get_Proj_pred(proj);
+
+ /* get the Load address */
+ const ir_node *addr = get_Load_ptr(load);
+ const ir_node *confirm;
+
+ if (value_not_null(addr, &confirm)) {
+ if (get_Proj_proj(proj) == pn_Load_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return get_irg_bad(current_ir_graph);
+ }