-make_tuple:
- /* Turn Mod into a tuple (mem, jmp, bad, value) */
- mem = get_Mod_mem(n);
- blk = get_nodes_block(n);
-
- /* skip a potential Pin */
- mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Mod_max);
- set_Tuple_pred(n, pn_Mod_M, mem);
- set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
- set_Tuple_pred(n, pn_Mod_res, value);
- }
- return n;
-} /* transform_node_Mod */
-
-/**
- * Transform a DivMod node.
- */
-static ir_node *transform_node_DivMod(ir_node *n)
-{
- const ir_node *dummy;
- ir_node *a = get_DivMod_left(n);
- ir_node *b = get_DivMod_right(n);
- ir_mode *mode = get_DivMod_resmode(n);
- ir_node *va, *vb;
- tarval *ta, *tb;
- int evaluated = 0;
-
- if (is_Const(b) && is_const_Phi(a)) {
- /* check for Div(Phi, Const) */
- va = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0);
- vb = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0);
- if (va && vb) {
- DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
- DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
- goto make_tuple;
- }
- }
- else if (is_Const(a) && is_const_Phi(b)) {
- /* check for Div(Const, Phi) */
- va = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1);
- vb = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1);
- if (va && vb) {
- DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
- DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
- goto make_tuple;
- }
- }
- else if (is_const_Phi(a) && is_const_Phi(b)) {
- /* check for Div(Phi, Phi) */
- va = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode);
- vb = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode);
- if (va && vb) {
- DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
- DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
- goto make_tuple;
- }
- }
-
- ta = value_of(a);
- tb = value_of(b);
- if (tb != tarval_bad) {
- if (tb == get_mode_one(get_tarval_mode(tb))) {
- va = a;
- vb = new_Const(get_mode_null(mode));
- DBG_OPT_CSTEVAL(n, vb);
- goto make_tuple;
- } else if (ta != tarval_bad) {
- tarval *resa, *resb;
- resa = tarval_div(ta, tb);
- if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
- Jmp for X result!? */
- resb = tarval_mod(ta, tb);
- if (resb == tarval_bad) return n; /* Causes exception! */
- va = new_Const(resa);
- vb = new_Const(resb);
- DBG_OPT_CSTEVAL(n, va);
- DBG_OPT_CSTEVAL(n, vb);
- goto make_tuple;
- } else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
- va = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
- vb = new_Const(get_mode_null(mode));
- DBG_OPT_CSTEVAL(n, va);
- DBG_OPT_CSTEVAL(n, vb);
- goto make_tuple;
- } else { /* Try architecture dependent optimization */
- va = a;
- vb = b;
- arch_dep_replace_divmod_by_const(&va, &vb, n);
- evaluated = va != NULL;
- }
- } else if (a == b) {
- if (value_not_zero(a, &dummy)) {
- /* a/a && a != 0 */
- va = new_Const(get_mode_one(mode));
- vb = new_Const(get_mode_null(mode));
- DBG_OPT_CSTEVAL(n, va);
- DBG_OPT_CSTEVAL(n, vb);
- goto make_tuple;
- } else {
- /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
- return n;
- }
- } else if (ta == get_mode_null(mode) && value_not_zero(b, &dummy)) {
- /* 0 / non-Const = 0 */
- vb = va = a;
- goto make_tuple;
- }
-
- if (evaluated) { /* replace by tuple */
- ir_node *mem, *blk;
-
-make_tuple:
- mem = get_DivMod_mem(n);
- /* skip a potential Pin */
- mem = skip_Pin(mem);
-
- blk = get_nodes_block(n);
- turn_into_tuple(n, pn_DivMod_max);
- set_Tuple_pred(n, pn_DivMod_M, mem);
- set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_DivMod_res_div, va);
- set_Tuple_pred(n, pn_DivMod_res_mod, vb);
- }
-
- return n;
-} /* transform_node_DivMod */
-
-/**
- * Optimize x / c to x * (1/c)
- */
-static ir_node *transform_node_Quot(ir_node *n)
-{
- ir_mode *mode = get_Quot_resmode(n);
- ir_node *oldn = n;
-
- if (get_mode_arithmetic(mode) == irma_ieee754) {
- ir_node *b = get_Quot_right(n);
- tarval *tv = value_of(b);
-
- if (tv != tarval_bad) {
- int rem = tarval_fp_ops_enabled();
-
- /*
- * Floating point constant folding might be disabled here to
- * prevent rounding.
- * However, as we check for exact result, doing it is safe.
- * Switch it on.
- */
- tarval_enable_fp_ops(1);
- tv = tarval_quo(get_mode_one(mode), tv);
- tarval_enable_fp_ops(rem);
-
- /* Do the transformation if the result is either exact or we are not
- using strict rules. */
- if (tv != tarval_bad &&
- (tarval_ieee754_get_exact() || (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) == 0)) {
- ir_node *blk = get_nodes_block(n);
- ir_node *c = new_Const(tv);
- ir_node *a = get_Quot_left(n);
- ir_node *m = new_rd_Mul(get_irn_dbg_info(n), blk, a, c, mode);
- ir_node *mem = get_Quot_mem(n);
-
- /* skip a potential Pin */
- mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Quot_max);
- set_Tuple_pred(n, pn_Quot_M, mem);
- set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Quot_X_except, new_Bad());
- set_Tuple_pred(n, pn_Quot_res, m);
- DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
- }
- }
- }
- return n;
-} /* transform_node_Quot */
-
-/**
- * Optimize Abs(x) into x if x is Confirmed >= 0
- * Optimize Abs(x) into -x if x is Confirmed <= 0
- * Optimize Abs(-x) int Abs(x)
- */
-static ir_node *transform_node_Abs(ir_node *n)
-{
- ir_node *c, *oldn = n;
- ir_node *a = get_Abs_op(n);
- ir_mode *mode;
-
- HANDLE_UNOP_PHI(tarval_abs, a, c);
-
- switch (classify_value_sign(a)) {
- case value_classified_negative:
- mode = get_irn_mode(n);
-
- /*
- * We can replace the Abs by -x here.
- * We even could add a new Confirm here
- * (if not twos complement)
- *
- * Note that -x would create a new node, so we could
- * not run it in the equivalent_node() context.
- */
- n = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
-
- DBG_OPT_CONFIRM(oldn, n);
- return n;
- case value_classified_positive:
- /* n is positive, Abs is not needed */
- n = a;
-
- DBG_OPT_CONFIRM(oldn, n);
- return n;
- default:
- break;
- }
- if (is_Minus(a)) {
- /* Abs(-x) = Abs(x) */
- mode = get_irn_mode(n);
- n = new_rd_Abs(get_irn_dbg_info(n), get_nodes_block(n), get_Minus_op(a), mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ABS_MINUS_X);
- return n;
- }
- return n;
-} /* transform_node_Abs */
-
-/**
- * Optimize -a CMP -b into b CMP a.
- * This works only for for modes where unary Minus
- * cannot Overflow.
- * Note that two-complement integers can Overflow
- * so it will NOT work.
- *
- * For == and != can be handled in Proj(Cmp)
- */
-static ir_node *transform_node_Cmp(ir_node *n)
-{
- ir_node *oldn = n;
- ir_node *left = get_Cmp_left(n);
- ir_node *right = get_Cmp_right(n);
-
- if (is_Minus(left) && is_Minus(right) &&
- !mode_overflow_on_unary_Minus(get_irn_mode(left))) {
- ir_node *const new_left = get_Minus_op(right);
- ir_node *const new_right = get_Minus_op(left);
- n = new_rd_Cmp(get_irn_dbg_info(n), get_nodes_block(n), new_left, new_right);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CMP_OP_OP);