-static ir_node *transform_node_Mux(ir_node *n)
-{
- ir_node *oldn = n, *sel = get_Mux_sel(n);
- ir_mode *mode = get_irn_mode(n);
-
- if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
- ir_node *cmp = get_Proj_pred(sel);
- long proj_nr = get_Proj_proj(sel);
- ir_node *f = get_Mux_false(n);
- ir_node *t = get_Mux_true(n);
-
- if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
- ir_node *block = get_irn_n(n, -1);
-
- /*
- * Note: normalization puts the constant on the right site,
- * so we check only one case.
- *
- * Note further that these optimization work even for floating point
- * with NaN's because -NaN == NaN.
- * However, if +0 and -0 is handled differently, we cannot use the first one.
- */
- if (get_irn_op(f) == op_Minus &&
- get_Minus_op(f) == t &&
- get_Cmp_left(cmp) == t) {
-
- if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
- /* Mux(a >=/> 0, -a, a) ==> Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- t, mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
- }
- else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
- /* Mux(a <=/< 0, -a, a) ==> Minus(Abs(a)) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- t, mode);
- n = new_rd_Minus(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- n, mode);
-
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
- }
- }
- else if (get_irn_op(t) == op_Minus &&
- get_Minus_op(t) == f &&
- get_Cmp_left(cmp) == f) {
-
- if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
- /* Mux(a <=/< 0, a, -a) ==> Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- f, mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
- }
- else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
- /* Mux(a >=/> 0, a, -a) ==> Minus(Abs(a)) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- f, mode);
- n = new_rd_Minus(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- n, mode);
-
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
- }
- }
-
- if (mode_is_int(mode) && mode_is_signed(mode) &&
- get_mode_arithmetic(mode) == irma_twos_complement) {
- ir_node *x = get_Cmp_left(cmp);
-
- /* the following optimization works only with signed integer two-complement mode */
-
- if (mode == get_irn_mode(x)) {
- /*
- * FIXME: this restriction is two rigid, as it would still
- * work if mode(x) = Hs and mode == Is, but at least it removes
- * all wrong cases.
- */
- if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Le) &&
- classify_Const(t) == CNST_ALL_ONE &&
- classify_Const(f) == CNST_NULL) {
- /*
- * Mux(x:T </<= 0, 0, -1) -> Shrs(x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- n = new_rd_Shrs(get_irn_dbg_info(n),
- current_ir_graph, block, x,
- new_r_Const_long(current_ir_graph, block, mode_Iu,
- get_mode_size_bits(mode) - 1),
- mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
- return n;
- }
- else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Ge) &&
- classify_Const(t) == CNST_ONE &&
- classify_Const(f) == CNST_NULL) {
- /*
- * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- n = new_rd_Shr(get_irn_dbg_info(n),
- current_ir_graph, block,
- new_r_Minus(current_ir_graph, block, x, mode),
- new_r_Const_long(current_ir_graph, block, mode_Iu,
- get_mode_size_bits(mode) - 1),
- mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
- return n;
- }
- }
- }
- }
- }
- return arch_transform_node_Mux(n);
+static ir_node *transform_node_Mux(ir_node *n) {
+ ir_node *oldn = n, *sel = get_Mux_sel(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ if (mode == mode_b) {
+ ir_node *t = get_Mux_true(n);
+ ir_node *f = get_Mux_false(n);
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_irn_n(n, -1);
+ ir_graph *irg = current_ir_graph;
+
+ if (is_Const(t)) {
+ tarval *tv_t = get_Const_tarval(t);
+ if (tv_t == tarval_b_true) {
+ if (is_Const(f)) {
+ assert(get_Const_tarval(f) == tarval_b_false);
+ return sel;
+ } else {
+ return new_rd_Or(dbg, irg, block, sel, f, mode_b);
+ }
+ } else {
+ ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+ assert(tv_t == tarval_b_false);
+ if (is_Const(f)) {
+ assert(get_Const_tarval(f) == tarval_b_true);
+ return not_sel;
+ } else {
+ return new_rd_And(dbg, irg, block, not_sel, f, mode_b);
+ }
+ }
+ } else if (is_Const(f)) {
+ tarval *tv_f = get_Const_tarval(f);
+ if (tv_f == tarval_b_true) {
+ ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+ return new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ } else {
+ assert(tv_f == tarval_b_false);
+ return new_rd_And(dbg, irg, block, sel, t, mode_b);
+ }
+ }
+ }
+
+ if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
+ ir_node *cmp = get_Proj_pred(sel);
+ long proj_nr = get_Proj_proj(sel);
+ ir_node *f = get_Mux_false(n);
+ ir_node *t = get_Mux_true(n);
+
+ if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
+ ir_node *block = get_irn_n(n, -1);
+
+ /*
+ * Note: normalization puts the constant on the right site,
+ * so we check only one case.
+ *
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the first one.
+ */
+ if (get_irn_op(f) == op_Minus &&
+ get_Minus_op(f) == t &&
+ get_Cmp_left(cmp) == t) {
+
+ if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
+ /* Mux(a >=/> 0, -a, a) ==> Abs(a) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ t, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ } else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ /* Mux(a <=/< 0, -a, a) ==> Minus(Abs(a)) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ t, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ n, mode);
+
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ } else if (get_irn_op(t) == op_Minus &&
+ get_Minus_op(t) == f &&
+ get_Cmp_left(cmp) == f) {
+
+ if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ /* Mux(a <=/< 0, a, -a) ==> Abs(a) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ f, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ } else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
+ /* Mux(a >=/> 0, a, -a) ==> Minus(Abs(a)) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ f, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ n, mode);
+
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ }
+
+ if (mode_is_int(mode) && mode_is_signed(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *x = get_Cmp_left(cmp);
+
+ /* the following optimization works only with signed integer two-complement mode */
+
+ if (mode == get_irn_mode(x)) {
+ /*
+ * FIXME: this restriction is two rigid, as it would still
+ * work if mode(x) = Hs and mode == Is, but at least it removes
+ * all wrong cases.
+ */
+ if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Le) &&
+ classify_Const(t) == CNST_ALL_ONE &&
+ classify_Const(f) == CNST_NULL) {
+ /*
+ * Mux(x:T </<= 0, 0, -1) -> Shrs(x, sizeof_bits(T) - 1)
+ * Conditions:
+ * T must be signed.
+ */
+ n = new_rd_Shrs(get_irn_dbg_info(n),
+ current_ir_graph, block, x,
+ new_r_Const_long(current_ir_graph, block, mode_Iu,
+ get_mode_size_bits(mode) - 1),
+ mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
+ return n;
+ } else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Ge) &&
+ classify_Const(t) == CNST_ONE &&
+ classify_Const(f) == CNST_NULL) {
+ /*
+ * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1)
+ * Conditions:
+ * T must be signed.
+ */
+ n = new_rd_Shr(get_irn_dbg_info(n),
+ current_ir_graph, block,
+ new_r_Minus(current_ir_graph, block, x, mode),
+ new_r_Const_long(current_ir_graph, block, mode_Iu,
+ get_mode_size_bits(mode) - 1),
+ mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
+ return n;
+ }
+ }
+ }
+ }
+ }
+ return arch_transform_node_Mux(n);