X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=bab927fd6c12e65b8b9a723e072aed71c94617bc;hb=0652d44cff2021c908e4b76572e59469c9057598;hp=93d994ed85994da27615f1eca50496af5ee2fac9;hpb=eb4f867c4cf775def5947ecc6c150e30d7c36b6e;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 93d994ed8..bab927fd6 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -953,40 +953,23 @@ static ir_node *equivalent_node_left_zero(ir_node *n) { */ static ir_node *equivalent_node_Sub(ir_node *n) { ir_node *oldn = n; - ir_node *a, *b; + ir_node *b; ir_mode *mode = get_irn_mode(n); /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) return n; - a = get_Sub_left(n); b = get_Sub_right(n); /* Beware: modes might be different */ if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) { + ir_node *a = get_Sub_left(n); if (mode == get_irn_mode(a)) { n = a; DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0); } - } else if (get_irn_op(a) == op_Add) { - if (mode_wrap_around(mode)) { - ir_node *left = get_Add_left(a); - ir_node *right = get_Add_right(a); - - if (left == b) { - if (mode == get_irn_mode(right)) { - n = right; - DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); - } - } else if (right == b) { - if (mode == get_irn_mode(left)) { - n = left; - DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); - } - } - } } return n; } /* equivalent_node_Sub */ @@ -1049,7 +1032,7 @@ static ir_node *equivalent_node_Div(ir_node *n) { if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */ /* Turn Div into a tuple (mem, bad, a) */ ir_node *mem = get_Div_mem(n); - ir_node *blk = get_nodes_block(n); + ir_node *blk = get_irn_n(n, -1); turn_into_tuple(n, pn_Div_max); set_Tuple_pred(n, pn_Div_M, mem); set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk)); @@ -1070,7 +1053,7 @@ static ir_node *equivalent_node_Quot(ir_node *n) { if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */ /* Turn Quot into a tuple (mem, jmp, bad, a) */ ir_node *mem = get_Quot_mem(n); - ir_node *blk = get_nodes_block(n); + ir_node *blk = get_irn_n(n, -1); turn_into_tuple(n, pn_Quot_max); set_Tuple_pred(n, pn_Quot_M, mem); set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk)); @@ -1091,7 +1074,7 @@ static ir_node *equivalent_node_DivMod(ir_node *n) { /* Turn DivMod into a tuple (mem, jmp, bad, a, 0) */ ir_node *a = get_DivMod_left(n); ir_node *mem = get_Div_mem(n); - ir_node *blk = get_nodes_block(n); + ir_node *blk = get_irn_n(n, -1); ir_mode *mode = get_DivMod_resmode(n); turn_into_tuple(n, pn_DivMod_max); @@ -2031,16 +2014,61 @@ static ir_node *transform_node_Sub(ir_node *n) { a = get_Sub_left(n); b = get_Sub_right(n); - HANDLE_BINOP_PHI(tarval_sub, a,b,c); - mode = get_irn_mode(n); +restart: + HANDLE_BINOP_PHI(tarval_sub, a,b,c); + /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) return n; + if (is_Add(a)) { + if (mode_wrap_around(mode)) { + ir_node *left = get_Add_left(a); + ir_node *right = get_Add_right(a); + + /* FIXME: Does the Conv's word only for two complement or generally? */ + if (left == b) { + if (mode != get_irn_mode(right)) { + /* This Sub is an effective Cast */ + right = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), right, mode); + } + n = right; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); + } else if (right == b) { + if (mode != get_irn_mode(left)) { + /* This Sub is an effective Cast */ + left = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), left, mode); + } + n = left; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); + } + } + } else if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) { + ir_mode *mode = get_irn_mode(a); + + if (mode == get_irn_mode(b)) { + ir_mode *ma, *mb; + + a = get_Conv_op(a); + b = get_Conv_op(b); + + /* check if it's allowed to skip the conv */ + ma = get_irn_mode(a); + mb = get_irn_mode(b); + + if (mode_is_reference(ma) && mode_is_reference(mb)) { + /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */ + set_Sub_left(n, a); + set_Sub_right(n, b); + + goto restart; + } + } + } /* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */ - if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) { + else if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) { n = new_rd_Minus( get_irn_dbg_info(n), current_ir_graph, @@ -2114,7 +2142,6 @@ static ir_node *transform_node_Sub(ir_node *n) { set_Sub_right(n, add); DBG_OPT_ALGSIM0(n, n, FS_OPT_SUB_SUB_X_Y_Z); } - return n; } /* transform_node_Sub */ @@ -2167,7 +2194,7 @@ static ir_node *transform_node_Div(ir_node *n) { if (value != n) { /* Turn Div into a tuple (mem, jmp, bad, value) */ ir_node *mem = get_Div_mem(n); - ir_node *blk = get_nodes_block(n); + ir_node *blk = get_irn_n(n, -1); turn_into_tuple(n, pn_Div_max); set_Tuple_pred(n, pn_Div_M, mem); @@ -2197,7 +2224,7 @@ static ir_node *transform_node_Mod(ir_node *n) { if (value != n) { /* Turn Mod into a tuple (mem, jmp, bad, value) */ ir_node *mem = get_Mod_mem(n); - ir_node *blk = get_nodes_block(n); + ir_node *blk = get_irn_n(n, -1); turn_into_tuple(n, pn_Mod_max); set_Tuple_pred(n, pn_Mod_M, mem); @@ -2256,7 +2283,7 @@ static ir_node *transform_node_DivMod(ir_node *n) { if (evaluated) { /* replace by tuple */ ir_node *mem = get_DivMod_mem(n); - ir_node *blk = get_nodes_block(n); + ir_node *blk = get_irn_n(n, -1); turn_into_tuple(n, pn_DivMod_max); set_Tuple_pred(n, pn_DivMod_M, mem); set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk)); @@ -3559,7 +3586,7 @@ static int node_cmp_attr_ASM(ir_node *a, ir_node *b) { const ir_asm_constraint *cb; ident **cla, **clb; - if (get_ASM_text(a) != get_ASM_text(b)); + if (get_ASM_text(a) != get_ASM_text(b)) return 1; /* Should we really check the constraints here? Should be better, but is strange. */