+ /* Div is not commutative. */
+ if (tarval_is_one(tb)) { /* div(x, 1) == x */
+ switch (get_Proj_proj(proj)) {
+ case pn_DivMod_M:
+ proj = get_DivMod_mem(divmod);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ case pn_DivMod_res_div:
+ proj = get_DivMod_left(divmod);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
+ return proj;
+
+ default:
+ /* we cannot replace the exception Proj's here, this is done in
+ transform_node_Proj_DivMod().
+ Note further that the pn_DivMod_res_div case is handled in
+ computed_value_Proj(). */
+ return proj;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_DivMod */
+
+/**
+ * Optimize CopyB(mem, x, x) into a Nop.
+ */
+static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *copyb = get_Proj_pred(proj);
+ ir_node *a = get_CopyB_dst(copyb);
+ ir_node *b = get_CopyB_src(copyb);
+
+ if (a == b) {
+ /* Turn CopyB into a tuple (mem, jmp, bad, bad) */
+ switch (get_Proj_proj(proj)) {
+ case pn_CopyB_M_regular:
+ proj = get_CopyB_mem(copyb);
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
+ break;
+
+ case pn_CopyB_M_except:
+ case pn_CopyB_X_except:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_irg_bad(current_ir_graph);
+ break;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_CopyB */
+
+/**
+ * Optimize Bounds(idx, idx, upper) into idx.
+ */
+static ir_node *equivalent_node_Proj_Bound(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *bound = get_Proj_pred(proj);
+ ir_node *idx = get_Bound_index(bound);
+ ir_node *pred = skip_Proj(idx);
+ int ret_tuple = 0;
+
+ if (idx == get_Bound_lower(bound))
+ ret_tuple = 1;
+ else if (is_Bound(pred)) {
+ /*
+ * idx was Bounds checked in the same MacroBlock previously,
+ * it is still valid if lower <= pred_lower && pred_upper <= upper.
+ */
+ ir_node *lower = get_Bound_lower(bound);
+ ir_node *upper = get_Bound_upper(bound);
+ if (get_Bound_lower(pred) == lower &&
+ get_Bound_upper(pred) == upper &&
+ get_irn_MacroBlock(bound) == get_irn_MacroBlock(pred)) {
+ /*
+ * One could expect that we simply return the previous
+ * Bound here. However, this would be wrong, as we could
+ * add an exception Proj to a new location then.
+ * So, we must turn in into a tuple.
+ */
+ ret_tuple = 1;
+ }
+ }
+ if (ret_tuple) {
+ /* Turn Bound into a tuple (mem, jmp, bad, idx) */
+ switch (get_Proj_proj(proj)) {
+ case pn_Bound_M:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_Bound_mem(bound);
+ break;
+ case pn_Bound_X_except:
+ DBG_OPT_EXC_REM(proj);
+ proj = get_irg_bad(current_ir_graph);
+ break;
+ case pn_Bound_res:
+ proj = idx;
+ DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
+ break;
+ default:
+ /* cannot optimize pn_Bound_X_regular, handled in transform ... */
+ ;
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Bound */
+
+/**
+ * Optimize an Exception Proj(Load) with a non-null address.
+ */
+static ir_node *equivalent_node_Proj_Load(ir_node *proj) {
+ if (get_opt_ldst_only_null_ptr_exceptions()) {
+ if (get_irn_mode(proj) == mode_X) {
+ ir_node *oldn = proj;
+ ir_node *load = get_Proj_pred(proj);
+
+ /* get the Load address */
+ ir_node *addr = get_Load_ptr(load);
+ ir_node *blk = get_nodes_block(load);
+ ir_node *confirm;
+
+ if (value_not_null(addr, &confirm)) {
+ if (get_Proj_proj(proj) == pn_Load_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return get_irg_bad(current_ir_graph);
+ }
+ }
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Load */
+
+/**
+ * Optimize an Exception Proj(Store) with a non-null address.
+ */
+static ir_node *equivalent_node_Proj_Store(ir_node *proj) {
+ if (get_opt_ldst_only_null_ptr_exceptions()) {
+ if (get_irn_mode(proj) == mode_X) {
+ ir_node *oldn = proj;
+ ir_node *store = get_Proj_pred(proj);
+
+ /* get the load/store address */
+ ir_node *addr = get_Store_ptr(store);
+ ir_node *blk = get_nodes_block(store);
+ ir_node *confirm;
+
+ if (value_not_null(addr, &confirm)) {
+ if (get_Proj_proj(proj) == pn_Store_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return get_irg_bad(current_ir_graph);
+ }
+ }
+ }
+ }
+ return proj;
+} /* equivalent_node_Proj_Store */
+
+/**
+ * Does all optimizations on nodes that must be done on it's Proj's
+ * because of creating new nodes.
+ */
+static ir_node *equivalent_node_Proj(ir_node *proj) {
+ ir_node *n = get_Proj_pred(proj);
+
+ switch (get_irn_opcode(n)) {
+ case iro_Div:
+ return equivalent_node_Proj_Div(proj);
+
+ case iro_DivMod:
+ return equivalent_node_Proj_DivMod(proj);
+
+ case iro_Quot:
+ return equivalent_node_Proj_DivMod(proj);
+
+ case iro_Tuple:
+ return equivalent_node_Proj_Tuple(proj);
+
+ case iro_CopyB:
+ return equivalent_node_Proj_CopyB(proj);
+
+ case iro_Bound:
+ return equivalent_node_Proj_Bound(proj);
+
+ case iro_Load:
+ return equivalent_node_Proj_Load(proj);
+
+ case iro_Store:
+ return equivalent_node_Proj_Store(proj);
+
+ default:
+ if (get_irn_mode(proj) == mode_X) {
+ if (is_Block_dead(get_nodes_block(n))) {
+ /* Remove dead control flow -- early gigo(). */
+ proj = get_irg_bad(current_ir_graph);
+ }
+ }
+ return proj;
+ }
+} /* equivalent_node_Proj */
+
+/**
+ * Remove Id's.
+ */
+static ir_node *equivalent_node_Id(ir_node *n) {
+ ir_node *oldn = n;
+
+ do {
+ n = get_Id_pred(n);
+ } while (is_Id(n));
+
+ DBG_OPT_ID(oldn, n);
+ return n;
+} /* equivalent_node_Id */
+
+/**
+ * Optimize a Mux.
+ */
+static ir_node *equivalent_node_Mux(ir_node *n)
+{
+ ir_node *oldn = n, *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ /* Mux(true, f, t) == t */
+ if (ts == tarval_b_true) {