+ return transform_node_Or_bf_store(or);
+} /* transform_node_Or_bf_store */
+
+/**
+ * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
+ */
+static ir_node *transform_node_Or_Rot(ir_node *or)
+{
+ ir_mode *mode = get_irn_mode(or);
+ ir_node *shl, *shr, *block;
+ ir_node *irn, *x, *c1, *c2, *v, *sub, *n;
+ tarval *tv1, *tv2;
+
+ if (! mode_is_int(mode))
+ return or;
+
+ shl = get_binop_left(or);
+ shr = get_binop_right(or);
+
+ if (get_irn_op(shl) == op_Shr) {
+ if (get_irn_op(shr) != op_Shl)
+ return or;
+
+ irn = shl;
+ shl = shr;
+ shr = irn;
+ }
+ else if (get_irn_op(shl) != op_Shl)
+ return or;
+ else if (get_irn_op(shr) != op_Shr)
+ return or;
+
+ x = get_Shl_left(shl);
+ if (x != get_Shr_left(shr))
+ return or;
+
+ c1 = get_Shl_right(shl);
+ c2 = get_Shr_right(shr);
+ if (get_irn_op(c1) == op_Const && get_irn_op(c2) == op_Const) {
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ tv2 = get_Const_tarval(c2);
+ if (! tarval_is_long(tv2))
+ return or;
+
+ if (get_tarval_long(tv1) + get_tarval_long(tv2)
+ != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_irn_n(or, -1);
+
+ n = new_r_Rot(current_ir_graph, block, x, c1, mode);
+
+ DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT);
+ return n;
+ }
+ else if (get_irn_op(c1) == op_Sub) {
+ v = c2;
+ sub = c1;
+
+ if (get_Sub_right(sub) != v)
+ return or;
+
+ c1 = get_Sub_left(sub);
+ if (get_irn_op(c1) != op_Const)
+ return or;
+
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_nodes_block(or);
+
+ /* a Rot right is not supported, so use a rot left */
+ n = new_r_Rot(current_ir_graph, block, x, sub, mode);
+
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ return n;
+ }
+ else if (get_irn_op(c2) == op_Sub) {
+ v = c1;
+ sub = c2;
+
+ c1 = get_Sub_left(sub);
+ if (get_irn_op(c1) != op_Const)
+ return or;
+
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_irn_n(or, -1);
+
+ /* a Rot Left */
+ n = new_r_Rot(current_ir_graph, block, x, v, mode);
+
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ return n;
+ }
+
+ return or;
+} /* transform_node_Or_Rot */
+
+/**
+ * Transform an Or.
+ */
+static ir_node *transform_node_Or(ir_node *n)
+{
+ ir_node *c, *oldn = n;
+ ir_node *a = get_Or_left(n);
+ ir_node *b = get_Or_right(n);
+
+ HANDLE_BINOP_PHI(tarval_or, a,b,c);
+
+ n = transform_node_Or_bf_store(n);
+ n = transform_node_Or_Rot(n);
+
+ return n;
+} /* transform_node_Or */
+
+
+/* forward */
+static ir_node *transform_node(ir_node *n);
+
+/**
+ * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl.
+ *
+ * Should be moved to reassociation?
+ */
+static ir_node *transform_node_shift(ir_node *n)
+{
+ ir_node *left, *right;
+ tarval *tv1, *tv2, *res;
+ ir_mode *mode;
+ int modulo_shf, flag;
+
+ left = get_binop_left(n);
+
+ /* different operations */
+ if (get_irn_op(left) != get_irn_op(n))
+ return n;
+
+ right = get_binop_right(n);
+ tv1 = value_of(right);
+ if (tv1 == tarval_bad)
+ return n;
+
+ tv2 = value_of(get_binop_right(left));
+ if (tv2 == tarval_bad)
+ return n;
+
+ res = tarval_add(tv1, tv2);
+
+ /* beware: a simple replacement works only, if res < modulo shift */
+ mode = get_irn_mode(n);
+
+ flag = 0;
+
+ modulo_shf = get_mode_modulo_shift(mode);
+ if (modulo_shf > 0) {
+ tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res));
+
+ if (tarval_cmp(res, modulo) & pn_Cmp_Lt)
+ flag = 1;
+ }
+ else
+ flag = 1;
+
+ if (flag) {
+ /* ok, we can replace it */
+ ir_node *in[2], *irn, *block = get_irn_n(n, -1);
+
+ in[0] = get_binop_left(left);
+ in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
+
+ irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
+
+ DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
+
+ return transform_node(irn);
+ }
+ return n;
+} /* transform_node_shift */
+
+/**
+ * Transform a Shr.
+ */
+static ir_node *transform_node_Shr(ir_node *n)
+{
+ ir_node *c, *oldn = n;
+ ir_node *a = get_Shr_left(n);
+ ir_node *b = get_Shr_right(n);
+
+ HANDLE_BINOP_PHI(tarval_shr, a, b, c);
+ return transform_node_shift(n);
+} /* transform_node_Shr */
+
+/**
+ * Transform a Shrs.
+ */
+static ir_node *transform_node_Shrs(ir_node *n)
+{
+ ir_node *c, *oldn = n;
+ ir_node *a = get_Shrs_left(n);
+ ir_node *b = get_Shrs_right(n);
+
+ HANDLE_BINOP_PHI(tarval_shrs, a, b, c);
+ return transform_node_shift(n);
+} /* transform_node_Shrs */
+
+/**
+ * Transform a Shl.
+ */
+static ir_node *transform_node_Shl(ir_node *n)
+{
+ ir_node *c, *oldn = n;
+ ir_node *a = get_Shl_left(n);
+ ir_node *b = get_Shl_right(n);
+
+ HANDLE_BINOP_PHI(tarval_shl, a, b, c);
+ return transform_node_shift(n);
+} /* transform_node_Shl */
+
+/**
+ * Remove dead blocks and nodes in dead blocks
+ * in keep alive list. We do not generate a new End node.
+ */
+static ir_node *transform_node_End(ir_node *n) {
+ int i, n_keepalives = get_End_n_keepalives(n);
+
+ for (i = 0; i < n_keepalives; ++i) {
+ ir_node *ka = get_End_keepalive(n, i);
+ if (is_Block(ka)) {
+ if (is_Block_dead(ka)) {
+ set_End_keepalive(n, i, new_Bad());
+ }
+ }
+ else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka)))
+ set_End_keepalive(n, i, new_Bad());
+ }
+ return n;
+} /* transform_node_End */
+
+/**
+ * Optimize a Mux into some simpler cases.
+ */
+static ir_node *transform_node_Mux(ir_node *n)
+{
+ ir_node *oldn = n, *sel = get_Mux_sel(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
+ ir_node *cmp = get_Proj_pred(sel);
+ long proj_nr = get_Proj_proj(sel);
+ ir_node *f = get_Mux_false(n);
+ ir_node *t = get_Mux_true(n);
+
+ if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
+ ir_node *block = get_irn_n(n, -1);
+
+ /*
+ * Note: normalization puts the constant on the right site,
+ * so we check only one case.
+ *
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the first one.
+ */
+ if (get_irn_op(f) == op_Minus &&
+ get_Minus_op(f) == t &&
+ get_Cmp_left(cmp) == t) {
+
+ if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
+ /* Mux(a >=/> 0, -a, a) ==> Abs(a) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ t, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ /* Mux(a <=/< 0, -a, a) ==> Minus(Abs(a)) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ t, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ n, mode);
+
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ }
+ else if (get_irn_op(t) == op_Minus &&
+ get_Minus_op(t) == f &&
+ get_Cmp_left(cmp) == f) {
+
+ if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ /* Mux(a <=/< 0, a, -a) ==> Abs(a) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ f, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
+ /* Mux(a >=/> 0, a, -a) ==> Minus(Abs(a)) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ f, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ n, mode);
+
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ }
+
+ if (mode_is_int(mode) && mode_is_signed(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *x = get_Cmp_left(cmp);
+
+ /* the following optimization works only with signed integer two-complement mode */
+
+ if (mode == get_irn_mode(x)) {
+ /*
+ * FIXME: this restriction is two rigid, as it would still
+ * work if mode(x) = Hs and mode == Is, but at least it removes
+ * all wrong cases.
+ */
+ if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Le) &&
+ classify_Const(t) == CNST_ALL_ONE &&
+ classify_Const(f) == CNST_NULL) {
+ /*
+ * Mux(x:T </<= 0, 0, -1) -> Shrs(x, sizeof_bits(T) - 1)
+ * Conditions:
+ * T must be signed.
+ */
+ n = new_rd_Shrs(get_irn_dbg_info(n),
+ current_ir_graph, block, x,
+ new_r_Const_long(current_ir_graph, block, mode_Iu,
+ get_mode_size_bits(mode) - 1),
+ mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
+ return n;
+ }
+ else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Ge) &&
+ classify_Const(t) == CNST_ONE &&
+ classify_Const(f) == CNST_NULL) {
+ /*
+ * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1)
+ * Conditions:
+ * T must be signed.
+ */
+ n = new_rd_Shr(get_irn_dbg_info(n),
+ current_ir_graph, block,
+ new_r_Minus(current_ir_graph, block, x, mode),
+ new_r_Const_long(current_ir_graph, block, mode_Iu,
+ get_mode_size_bits(mode) - 1),
+ mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
+ return n;
+ }
+ }
+ }
+ }
+ }
+ return arch_transform_node_Mux(n);
+} /* transform_node_Mux */
+
+/**
+ * Optimize a Psi into some simpler cases.
+ */
+static ir_node *transform_node_Psi(ir_node *n) {
+ if (is_Mux(n))
+ return transform_node_Mux(n);
+
+ return n;
+} /* transform_node_Psi */