*/
static ir_node *equivalent_node_Sub(ir_node *n) {
ir_node *oldn = n;
- ir_node *a, *b;
+ ir_node *b;
ir_mode *mode = get_irn_mode(n);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
return n;
- a = get_Sub_left(n);
b = get_Sub_right(n);
/* Beware: modes might be different */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
+ ir_node *a = get_Sub_left(n);
if (mode == get_irn_mode(a)) {
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
}
- } else if (get_irn_op(a) == op_Add) {
- if (mode_wrap_around(mode)) {
- ir_node *left = get_Add_left(a);
- ir_node *right = get_Add_right(a);
-
- if (left == b) {
- if (mode == get_irn_mode(right)) {
- n = right;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
- }
- } else if (right == b) {
- if (mode == get_irn_mode(left)) {
- n = left;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
- }
- }
- }
}
return n;
} /* equivalent_node_Sub */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */
/* Turn Quot into a tuple (mem, jmp, bad, a) */
ir_node *mem = get_Quot_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
/* Turn DivMod into a tuple (mem, jmp, bad, a, 0) */
ir_node *a = get_DivMod_left(n);
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
ir_mode *mode = get_DivMod_resmode(n);
turn_into_tuple(n, pn_DivMod_max);
a = get_Sub_left(n);
b = get_Sub_right(n);
- HANDLE_BINOP_PHI(tarval_sub, a,b,c);
-
mode = get_irn_mode(n);
+restart:
+ HANDLE_BINOP_PHI(tarval_sub, a,b,c);
+
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
return n;
+ if (is_Add(a)) {
+ if (mode_wrap_around(mode)) {
+ ir_node *left = get_Add_left(a);
+ ir_node *right = get_Add_right(a);
+
+ /* FIXME: Does the Conv's word only for two complement or generally? */
+ if (left == b) {
+ if (mode != get_irn_mode(right)) {
+ /* This Sub is an effective Cast */
+ right = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), right, mode);
+ }
+ n = right;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ } else if (right == b) {
+ if (mode != get_irn_mode(left)) {
+ /* This Sub is an effective Cast */
+ left = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), left, mode);
+ }
+ n = left;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ }
+ }
+ } else if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) {
+ ir_mode *mode = get_irn_mode(a);
+
+ if (mode == get_irn_mode(b)) {
+ ir_mode *ma, *mb;
+
+ a = get_Conv_op(a);
+ b = get_Conv_op(b);
+
+ /* check if it's allowed to skip the conv */
+ ma = get_irn_mode(a);
+ mb = get_irn_mode(b);
+
+ if (mode_is_reference(ma) && mode_is_reference(mb)) {
+ /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */
+ set_Sub_left(n, a);
+ set_Sub_right(n, b);
+
+ goto restart;
+ }
+ }
+ }
/* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
- if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) {
+ else if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) {
n = new_rd_Minus(
get_irn_dbg_info(n),
current_ir_graph,
set_Sub_right(n, add);
DBG_OPT_ALGSIM0(n, n, FS_OPT_SUB_SUB_X_Y_Z);
}
-
return n;
} /* transform_node_Sub */
if (value != n) {
/* Turn Div into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
if (value != n) {
/* Turn Mod into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Mod_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
const ir_asm_constraint *cb;
ident **cla, **clb;
- if (get_ASM_text(a) != get_ASM_text(b));
+ if (get_ASM_text(a) != get_ASM_text(b))
return 1;
/* Should we really check the constraints here? Should be better, but is strange. */