+ n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
+ mode_b, get_negated_pnc(get_Proj_proj(a)));
+
+ return n;
+}
+
+/**
+ * Transform a Cast of a Const into a new Const
+ */
+static ir_node *transform_node_Cast(ir_node *n) {
+ ir_node *pred = get_Cast_op(n);
+ type *tp = get_irn_type(pred);
+
+ if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
+ n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred),
+ get_Const_tarval(pred), tp);
+ } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
+ n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred),
+ get_SymConst_kind(pred), tp);
+ }
+ return n;
+}
+
+/**
+ * Transform a Div/Mod/DivMod with a non-zero constant. Must be
+ * done here instead of equivalent node because it creates new
+ * nodes.
+ * Removes the exceptions and routes the memory to the NoMem node.
+ *
+ * Further, it optimizes jump tables by removing all impossible cases.
+ */
+static ir_node *transform_node_Proj(ir_node *proj)
+{
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *b;
+ tarval *tb;
+ long proj_nr;
+
+ switch (get_irn_opcode(n)) {
+ case iro_Div:
+ b = get_Div_right(n);
+ tb = value_of(b);
+
+ if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
+ proj_nr = get_Proj_proj(proj);
+
+ /* this node may float */
+ set_irn_pinned(n, op_pin_state_floats);
+
+ if (proj_nr == pn_Div_X_except) {
+ /* we found an exception handler, remove it */
+ return new_Bad();
+ } else {
+ /* the memory Proj can be removed */
+ ir_node *res = get_Div_mem(n);
+# ifdef USE_NOMEM
+ set_Div_mem(n, get_irg_no_mem(current_ir_graph));
+# endif /* defined USE_NOMEM */
+ if (proj_nr == pn_Div_M)
+ return res;
+ }
+ }
+ break;
+ case iro_Mod:
+ b = get_Mod_right(n);
+ tb = value_of(b);
+
+ if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* mod(x, c) && c != 0 */
+ proj_nr = get_Proj_proj(proj);
+
+ /* this node may float */
+ set_irn_pinned(n, op_pin_state_floats);
+
+ if (proj_nr == pn_Mod_X_except) {
+ /* we found an exception handler, remove it */
+ return new_Bad();
+ } else {
+ /* the memory Proj can be removed */
+ ir_node *res = get_Mod_mem(n);
+# ifdef USE_NOMEM
+ set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
+# endif /* defined USE_NOMEM */
+ if (proj_nr == pn_Mod_M)
+ return res;
+ }
+ }
+ break;
+ case iro_DivMod:
+ b = get_DivMod_right(n);
+ tb = value_of(b);
+
+ if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* DivMod(x, c) && c != 0 */
+ proj_nr = get_Proj_proj(proj);
+
+ /* this node may float */
+ set_irn_pinned(n, op_pin_state_floats);
+
+ if (proj_nr == pn_DivMod_X_except) {
+ /* we found an exception handler, remove it */
+ return new_Bad();
+ }
+ else {
+ /* the memory Proj can be removed */
+ ir_node *res = get_DivMod_mem(n);
+# ifdef USE_NOMEM
+ set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
+# endif /* defined USE_NOMEM */
+ if (proj_nr == pn_DivMod_M)
+ return res;
+ }
+ }
+ break;
+
+ case iro_Cond:
+ if (get_opt_unreachable_code()) {
+ b = get_Cond_selector(n);
+ tb = value_of(b);
+
+ if (tb != tarval_bad && mode_is_int(get_tarval_mode(tb))) {
+ /* we have a constant switch */
+ long num = get_Proj_proj(proj);
+
+ if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
+ if (get_tarval_long(tb) == num) {
+ /* Do NOT create a jump here, or we will have 2 control flow ops
+ * in a block. This case is optimized away in optimize_cf(). */
+ return proj;
+ }
+ else
+ return new_Bad();
+ }
+ }
+ }
+ return proj;
+
+ case iro_Tuple:
+ /* should not happen, but if it does will be optimized away */
+ break;
+
+ default:
+ /* do nothing */
+ return proj;
+ }
+
+ /* we have added a Tuple, optimize it for the current Proj away */
+ return equivalent_node_Proj(proj);
+}
+
+/**
+ * returns the operands of a commutative bin-op, if one operand is
+ * a const, it is returned as the second one.
+ */
+static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
+{
+ ir_node *op_a = get_binop_left(binop);
+ ir_node *op_b = get_binop_right(binop);
+
+ assert(is_op_commutative(get_irn_op(binop)));
+
+ if (get_irn_op(op_a) == op_Const) {
+ *a = op_b;
+ *c = op_a;
+ }
+ else {
+ *a = op_a;
+ *c = op_b;
+ }
+}
+
+/**
+ * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
+ * Such pattern may arise in bitfield stores.
+ *
+ * value c4 value c4 & c2
+ * AND c3 AND c1 | c3
+ * OR c2 ===> OR
+ * AND c1
+ * OR
+ */
+static ir_node *transform_node_Or(ir_node *or)
+{
+ ir_node *and, *c1;
+ ir_node *or_l, *c2;
+ ir_node *and_l, *c3;
+ ir_node *value, *c4;
+ ir_node *new_and, *new_const, *block;
+ ir_mode *mode = get_irn_mode(or);
+
+ tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
+
+ get_comm_Binop_Ops(or, &and, &c1);
+ if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
+ return or;
+
+ get_comm_Binop_Ops(and, &or_l, &c2);
+ if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
+ return or;
+
+ get_comm_Binop_Ops(or_l, &and_l, &c3);
+ if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
+ return or;
+
+ get_comm_Binop_Ops(and_l, &value, &c4);
+ if (get_irn_op(c4) != op_Const)
+ return or;
+
+ /* ok, found the pattern, check for conditions */
+ assert(mode == get_irn_mode(and));
+ assert(mode == get_irn_mode(or_l));
+ assert(mode == get_irn_mode(and_l));
+
+ tv1 = get_Const_tarval(c1);
+ tv2 = get_Const_tarval(c2);
+ tv3 = get_Const_tarval(c3);
+ tv4 = get_Const_tarval(c4);
+
+ tv = tarval_or(tv4, tv2);
+ if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
+ /* have at least one 0 at the same bit position */
+ return or;
+ }
+
+ n_tv4 = tarval_not(tv4);
+ if (tv3 != tarval_and(tv3, n_tv4)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ n_tv2 = tarval_not(tv2);
+ if (tv1 != tarval_and(tv1, n_tv2)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ /* ok, all conditions met */
+ block = get_nodes_block(or);
+
+ new_and = new_r_And(current_ir_graph, block,
+ value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
+
+ new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+
+ set_Or_left(or, new_and);
+ set_Or_right(or, new_const);
+
+ /* check for more */
+ return transform_node_Or(or);
+}
+
+/* forward */
+static ir_node *transform_node(ir_node *n);