X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=d7eaaa1e7dca1db77bcb485eafdffb75666a327f;hb=85f517eca982e6a4e1d1848eb67634ae33b70de9;hp=5b3de32e7c1b597dbd2e9226fcb3512af97bbf8c;hpb=a28a95092a50a01896568c2d09484a6cc97509eb;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index 5b3de32e7..d7eaaa1e7 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -43,6 +43,7 @@ #include "irprintf.h" #include "debug.h" #include "irdom.h" +#include "iropt.h" #include "error.h" #include "array_t.h" #include "heights.h" @@ -62,7 +63,6 @@ #include "ia32_new_nodes.h" #include "ia32_dbg_stat.h" #include "ia32_optimize.h" -#include "ia32_util.h" #include "ia32_address_mode.h" #include "ia32_architecture.h" @@ -90,6 +90,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) +static ir_node *old_initial_fpcw = NULL; static ir_node *initial_fpcw = NULL; int ia32_no_pic_adjust; @@ -224,7 +225,6 @@ static ir_node *gen_Const(ir_node *node) if (mode_is_float(mode)) { ir_node *res = NULL; ir_node *load; - ir_node *base; ir_entity *floatent; if (ia32_cg_config.use_sse2) { @@ -259,6 +259,7 @@ static ir_node *gen_Const(ir_node *node) set_ia32_ls_mode(load, mode); res = load; } else { + ir_node *base; #ifdef CONSTRUCT_SSE_CONST if (mode == mode_D) { unsigned val = get_tarval_sub_bits(tv, 0) | @@ -324,8 +325,6 @@ static ir_node *gen_Const(ir_node *node) end: #endif /* CONSTRUCT_SSE_CONST */ SET_IA32_ORIG_NODE(load, node); - - be_dep_on_frame(load); return res; } else { /* non-float mode */ ir_node *cnst; @@ -343,7 +342,6 @@ end: cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, val); SET_IA32_ORIG_NODE(cnst, node); - be_dep_on_frame(cnst); return cnst; } } @@ -373,12 +371,18 @@ static ir_node *gen_SymConst(ir_node *node) panic("backend only support symconst_addr_ent (at %+F)", node); } entity = get_SymConst_entity(node); - cnst = new_bd_ia32_Const(dbgi, block, entity, 0, 0, 0); + if (get_entity_owner(entity) == get_tls_type()) { + ir_node *tls_base = new_bd_ia32_LdTls(NULL, block); + ir_node *lea = new_bd_ia32_Lea(dbgi, block, tls_base, noreg_GP); + set_ia32_am_sc(lea, entity); + cnst = lea; + } else { + cnst = new_bd_ia32_Const(dbgi, block, entity, 0, 0, 0); + } } SET_IA32_ORIG_NODE(cnst, node); - be_dep_on_frame(cnst); return cnst; } @@ -640,6 +644,7 @@ static void build_address(ia32_address_mode_t *am, ir_node *node, addr->index = noreg_GP; addr->mem = nomem; addr->symconst_ent = entity; + addr->tls_segment = false; addr->use_frame = 1; am->ls_mode = get_type_mode(get_entity_type(entity)); am->pinned = op_pin_state_floats; @@ -668,6 +673,7 @@ static void set_address(ir_node *node, const ia32_address_t *addr) set_ia32_am_scale(node, addr->scale); set_ia32_am_sc(node, addr->symconst_ent); set_ia32_am_offs_int(node, addr->offset); + set_ia32_am_tls_segment(node, addr->tls_segment); if (addr->symconst_sign) set_ia32_am_sc_sign(node); if (addr->use_frame) @@ -875,7 +881,6 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, } am->op_type = ia32_AddrModeS; } else { - ir_mode *mode; am->op_type = ia32_Normal; if (flags & match_try_am) { @@ -1033,14 +1038,10 @@ static ir_node *gen_binop_flags(ir_node *node, construct_binop_flags_func *func, static ir_node *get_fpcw(void) { - ir_node *fpcw; if (initial_fpcw != NULL) return initial_fpcw; - fpcw = be_abi_get_ignore_irn(be_get_irg_abi(current_ir_graph), - &ia32_registers[REG_FPCW]); - initial_fpcw = be_transform_node(fpcw); - + initial_fpcw = be_transform_node(old_initial_fpcw); return initial_fpcw; } @@ -1144,8 +1145,12 @@ static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2, /* lowered shift instruction may have a dependency operand, handle it here */ if (get_irn_arity(node) == 3) { /* we have a dependency */ - ir_node *new_dep = be_transform_node(get_irn_n(node, 2)); - add_irn_dep(new_node, new_dep); + ir_node* dep = get_irn_n(node, 2); + if (get_irn_n_edges(dep) > 1) { + /* ... which has at least one user other than 'node' */ + ir_node *new_dep = be_transform_node(dep); + add_irn_dep(new_node, new_dep); + } } return new_node; @@ -1184,7 +1189,9 @@ static ir_node *gen_unop(ir_node *node, ir_node *op, construct_unop_func *func, static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block, ia32_address_t *addr) { - ir_node *base, *index, *res; + ir_node *base; + ir_node *idx; + ir_node *res; base = addr->base; if (base == NULL) { @@ -1193,14 +1200,26 @@ static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block, base = be_transform_node(base); } - index = addr->index; - if (index == NULL) { - index = noreg_GP; + idx = addr->index; + if (idx == NULL) { + idx = noreg_GP; } else { - index = be_transform_node(index); + idx = be_transform_node(idx); + } + + /* segment overrides are ineffective for Leas :-( so we have to patch + * around... */ + if (addr->tls_segment) { + ir_node *tls_base = new_bd_ia32_LdTls(NULL, block); + assert(addr->symconst_ent != NULL); + if (base == noreg_GP) + base = tls_base; + else + base = new_bd_ia32_Lea(dbgi, block, tls_base, base); + addr->tls_segment = false; } - res = new_bd_ia32_Lea(dbgi, block, base, index); + res = new_bd_ia32_Lea(dbgi, block, base, idx); set_address(res, addr); return res; @@ -1263,7 +1282,6 @@ static ir_node *gen_Add(ir_node *node) if (addr.base == NULL && addr.index == NULL) { new_node = new_bd_ia32_Const(dbgi, new_block, addr.symconst_ent, addr.symconst_sign, 0, addr.offset); - be_dep_on_frame(new_node); SET_IA32_ORIG_NODE(new_node, node); return new_node; } @@ -1402,7 +1420,116 @@ static ir_node *gen_And(ir_node *node) match_commutative | match_mode_neutral | match_am | match_immediate); } +/** + * test wether 2 values result in 'x' and '32-x' when interpreted as a shift + * value. + */ +static bool is_complementary_shifts(ir_node *value1, ir_node *value2) +{ + if (is_Const(value1) && is_Const(value2)) { + ir_tarval *tv1 = get_Const_tarval(value1); + ir_tarval *tv2 = get_Const_tarval(value2); + if (tarval_is_long(tv1) && tarval_is_long(tv2)) { + long v1 = get_tarval_long(tv1); + long v2 = get_tarval_long(tv2); + return v1 <= v2 && v2 == 32-v1; + } + } + return false; +} + +typedef ir_node* (*new_shiftd_func)(dbg_info *dbgi, ir_node *block, + ir_node *high, ir_node *low, + ir_node *count); + +/** + * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs: + * op1 - target to be shifted + * op2 - contains bits to be shifted into target + * op3 - shift count + * Only op3 can be an immediate. + */ +static ir_node *gen_64bit_shifts(dbg_info *dbgi, ir_node *block, + ir_node *high, ir_node *low, ir_node *count, + new_shiftd_func func) +{ + ir_node *new_block = be_transform_node(block); + ir_node *new_high = be_transform_node(high); + ir_node *new_low = be_transform_node(low); + ir_node *new_count; + ir_node *new_node; + + /* the shift amount can be any mode that is bigger than 5 bits, since all + * other bits are ignored anyway */ + while (is_Conv(count) && + get_irn_n_edges(count) == 1 && + mode_is_int(get_irn_mode(count))) { + assert(get_mode_size_bits(get_irn_mode(count)) >= 5); + count = get_Conv_op(count); + } + new_count = create_immediate_or_transform(count, 0); + + new_node = func(dbgi, new_block, new_high, new_low, new_count); + return new_node; +} + +static ir_node *match_64bit_shift(ir_node *node) +{ + ir_node *op1 = get_Or_left(node); + ir_node *op2 = get_Or_right(node); + if (is_Shr(op1)) { + ir_node *tmp = op1; + op1 = op2; + op2 = tmp; + } + + /* match ShlD operation */ + if (is_Shl(op1) && is_Shr(op2)) { + ir_node *shl_right = get_Shl_right(op1); + ir_node *shl_left = get_Shl_left(op1); + ir_node *shr_right = get_Shr_right(op2); + ir_node *shr_left = get_Shr_left(op2); + /* constant ShlD operation */ + if (is_complementary_shifts(shl_right, shr_right)) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + return gen_64bit_shifts(dbgi, block, shl_left, shr_left, shl_right, + new_bd_ia32_ShlD); + } + /* constant ShrD operation */ + if (is_complementary_shifts(shr_right, shl_right)) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + return gen_64bit_shifts(dbgi, block, shr_left, shl_left, shr_right, + new_bd_ia32_ShrD); + } + /* lower_dw produces the following for ShlD: + * Or(Shr(Shr(high,1),Not(c)),Shl(low,c)) */ + if (is_Shr(shr_left) && is_Not(shr_right) + && is_Const_1(get_Shr_right(shr_left)) + && get_Not_op(shr_right) == shl_right) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *val_h = get_Shr_left(shr_left); + return gen_64bit_shifts(dbgi, block, shl_left, val_h, shl_right, + new_bd_ia32_ShlD); + } + /* lower_dw produces the following for ShrD: + * Or(Shl(Shl(high,1),Not(c)), Shr(low,c)) */ + if (is_Shl(shl_left) && is_Not(shl_right) + && is_Const_1(get_Shl_right(shl_left)) + && get_Not_op(shl_right) == shr_right) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *val_h = get_Shl_left(shl_left); + return gen_64bit_shifts(dbgi, block, shr_left, val_h, shr_right, + new_bd_ia32_ShrD); + } + } + + return NULL; +} /** * Creates an ia32 Or. @@ -1413,6 +1540,11 @@ static ir_node *gen_Or(ir_node *node) { ir_node *op1 = get_Or_left(node); ir_node *op2 = get_Or_right(node); + ir_node *res; + + res = match_64bit_shift(node); + if (res != NULL) + return res; assert (! mode_is_float(get_irn_mode(node))); return gen_binop(node, op1, op2, new_bd_ia32_Or, match_commutative @@ -1498,8 +1630,13 @@ static ir_node *transform_AM_mem(ir_node *const block, ins[n++] = be_transform_node(pred); } - ins[n++] = am_mem; + if (n==1 && ins[0] == am_mem) { + return am_mem; + /* creating a new Sync and relying on CSE may fail, + * if am_mem is a ProjM, which does not yet verify. */ + } + ins[n++] = am_mem; return new_r_Sync(block, n, ins); } else { ir_node *ins[2]; @@ -1526,7 +1663,6 @@ static ir_node *create_sex_32_64(dbg_info *dbgi, ir_node *block, (void)orig; if (ia32_cg_config.use_short_sex_eax) { ir_node *pval = new_bd_ia32_ProduceVal(dbgi, block); - be_dep_on_frame(pval); res = new_bd_ia32_Cltd(dbgi, block, val, pval); } else { ir_node *imm31 = ia32_create_Immediate(NULL, 0, 31); @@ -1542,9 +1678,10 @@ static ir_node *create_sex_32_64(dbg_info *dbgi, ir_node *block, */ static ir_node *create_Div(ir_node *node) { - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + int throws_exception = ir_throws_exception(node); ir_node *mem; ir_node *new_mem; ir_node *op1; @@ -1586,12 +1723,12 @@ static ir_node *create_Div(ir_node *node) addr->index, new_mem, am.new_op2, am.new_op1, sign_extension); } else { sign_extension = new_bd_ia32_Const(dbgi, new_block, NULL, 0, 0, 0); - be_dep_on_frame(sign_extension); new_node = new_bd_ia32_Div(dbgi, new_block, addr->base, addr->index, new_mem, am.new_op2, am.new_op1, sign_extension); } + ir_set_throws_exception(new_node, throws_exception); set_irn_pinned(new_node, get_irn_pinned(node)); @@ -1826,68 +1963,42 @@ static ir_node *gen_Not(ir_node *node) return gen_unop(node, op, new_bd_ia32_Not, match_mode_neutral); } -static ir_node *create_abs(dbg_info *dbgi, ir_node *block, ir_node *op, - bool negate, ir_node *node) +static ir_node *create_float_abs(dbg_info *dbgi, ir_node *block, ir_node *op, + bool negate, ir_node *node) { ir_node *new_block = be_transform_node(block); ir_mode *mode = get_irn_mode(op); - ir_node *new_op; + ir_node *new_op = be_transform_node(op); ir_node *new_node; int size; ir_entity *ent; - if (mode_is_float(mode)) { - new_op = be_transform_node(op); + assert(mode_is_float(mode)); - if (ia32_cg_config.use_sse2) { - ir_node *noreg_fp = ia32_new_NoReg_xmm(current_ir_graph); - new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(), - noreg_GP, nomem, new_op, noreg_fp); + if (ia32_cg_config.use_sse2) { + ir_node *noreg_fp = ia32_new_NoReg_xmm(current_ir_graph); + new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(), + noreg_GP, nomem, new_op, noreg_fp); - size = get_mode_size_bits(mode); - ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); + size = get_mode_size_bits(mode); + ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); - set_ia32_am_sc(new_node, ent); + set_ia32_am_sc(new_node, ent); - SET_IA32_ORIG_NODE(new_node, node); + SET_IA32_ORIG_NODE(new_node, node); - set_ia32_op_type(new_node, ia32_AddrModeS); - set_ia32_ls_mode(new_node, mode); + set_ia32_op_type(new_node, ia32_AddrModeS); + set_ia32_ls_mode(new_node, mode); - /* TODO, implement -Abs case */ - assert(!negate); - } else { - new_node = new_bd_ia32_vfabs(dbgi, new_block, new_op); - SET_IA32_ORIG_NODE(new_node, node); - if (negate) { - new_node = new_bd_ia32_vfchs(dbgi, new_block, new_node); - SET_IA32_ORIG_NODE(new_node, node); - } - } + /* TODO, implement -Abs case */ + assert(!negate); } else { - ir_node *xorn; - ir_node *sign_extension; - - if (get_mode_size_bits(mode) == 32) { - new_op = be_transform_node(op); - } else { - new_op = create_I2I_Conv(mode, mode_Is, dbgi, block, op, node); - } - - sign_extension = create_sex_32_64(dbgi, new_block, new_op, node); - - xorn = new_bd_ia32_Xor(dbgi, new_block, noreg_GP, noreg_GP, - nomem, new_op, sign_extension); - SET_IA32_ORIG_NODE(xorn, node); - + new_node = new_bd_ia32_vfabs(dbgi, new_block, new_op); + SET_IA32_ORIG_NODE(new_node, node); if (negate) { - new_node = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, - nomem, sign_extension, xorn); - } else { - new_node = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, - nomem, xorn, sign_extension); + new_node = new_bd_ia32_vfchs(dbgi, new_block, new_node); + SET_IA32_ORIG_NODE(new_node, node); } - SET_IA32_ORIG_NODE(new_node, node); } return new_node; @@ -1907,73 +2018,75 @@ static ir_node *gen_bt(ir_node *cmp, ir_node *x, ir_node *n) return new_bd_ia32_Bt(dbgi, new_block, op1, op2); } -static ia32_condition_code_t pnc_to_condition_code(pn_Cmp pnc, ir_mode *mode) +static ia32_condition_code_t relation_to_condition_code(ir_relation relation, + ir_mode *mode) { if (mode_is_float(mode)) { - switch (pnc) { - case pn_Cmp_Eq: return ia32_cc_float_equal; - case pn_Cmp_Lt: return ia32_cc_float_below; - case pn_Cmp_Le: return ia32_cc_float_below_equal; - case pn_Cmp_Gt: return ia32_cc_float_above; - case pn_Cmp_Ge: return ia32_cc_float_above_equal; - case pn_Cmp_Lg: return ia32_cc_not_equal; - case pn_Cmp_Leg: return ia32_cc_not_parity; - case pn_Cmp_Uo: return ia32_cc_parity; - case pn_Cmp_Ue: return ia32_cc_equal; - case pn_Cmp_Ul: return ia32_cc_float_unordered_below; - case pn_Cmp_Ule: return ia32_cc_float_unordered_below_equal; - case pn_Cmp_Ug: return ia32_cc_float_unordered_above; - case pn_Cmp_Uge: return ia32_cc_float_unordered_above_equal; - case pn_Cmp_Ne: return ia32_cc_float_not_equal; - case pn_Cmp_False: - case pn_Cmp_True: - case pn_Cmp_max: + switch (relation) { + case ir_relation_equal: return ia32_cc_float_equal; + case ir_relation_less: return ia32_cc_float_below; + case ir_relation_less_equal: return ia32_cc_float_below_equal; + case ir_relation_greater: return ia32_cc_float_above; + case ir_relation_greater_equal: return ia32_cc_float_above_equal; + case ir_relation_less_greater: return ia32_cc_not_equal; + case ir_relation_less_equal_greater: return ia32_cc_not_parity; + case ir_relation_unordered: return ia32_cc_parity; + case ir_relation_unordered_equal: return ia32_cc_equal; + case ir_relation_unordered_less: return ia32_cc_float_unordered_below; + case ir_relation_unordered_less_equal: + return ia32_cc_float_unordered_below_equal; + case ir_relation_unordered_greater: + return ia32_cc_float_unordered_above; + case ir_relation_unordered_greater_equal: + return ia32_cc_float_unordered_above_equal; + case ir_relation_unordered_less_greater: + return ia32_cc_float_not_equal; + case ir_relation_false: + case ir_relation_true: /* should we introduce a jump always/jump never? */ break; } panic("Unexpected float pnc"); } else if (mode_is_signed(mode)) { - switch (pnc) { - case pn_Cmp_Ue: - case pn_Cmp_Eq: return ia32_cc_equal; - case pn_Cmp_Ul: - case pn_Cmp_Lt: return ia32_cc_less; - case pn_Cmp_Ule: - case pn_Cmp_Le: return ia32_cc_less_equal; - case pn_Cmp_Ug: - case pn_Cmp_Gt: return ia32_cc_greater; - case pn_Cmp_Uge: - case pn_Cmp_Ge: return ia32_cc_greater_equal; - case pn_Cmp_Lg: - case pn_Cmp_Ne: return ia32_cc_not_equal; - case pn_Cmp_Leg: - case pn_Cmp_Uo: - case pn_Cmp_False: - case pn_Cmp_True: - case pn_Cmp_max: + switch (relation) { + case ir_relation_unordered_equal: + case ir_relation_equal: return ia32_cc_equal; + case ir_relation_unordered_less: + case ir_relation_less: return ia32_cc_less; + case ir_relation_unordered_less_equal: + case ir_relation_less_equal: return ia32_cc_less_equal; + case ir_relation_unordered_greater: + case ir_relation_greater: return ia32_cc_greater; + case ir_relation_unordered_greater_equal: + case ir_relation_greater_equal: return ia32_cc_greater_equal; + case ir_relation_unordered_less_greater: + case ir_relation_less_greater: return ia32_cc_not_equal; + case ir_relation_less_equal_greater: + case ir_relation_unordered: + case ir_relation_false: + case ir_relation_true: /* introduce jump always/jump never? */ break; } panic("Unexpected pnc"); } else { - switch (pnc) { - case pn_Cmp_Ue: - case pn_Cmp_Eq: return ia32_cc_equal; - case pn_Cmp_Ul: - case pn_Cmp_Lt: return ia32_cc_below; - case pn_Cmp_Ule: - case pn_Cmp_Le: return ia32_cc_below_equal; - case pn_Cmp_Ug: - case pn_Cmp_Gt: return ia32_cc_above; - case pn_Cmp_Uge: - case pn_Cmp_Ge: return ia32_cc_above_equal; - case pn_Cmp_Lg: - case pn_Cmp_Ne: return ia32_cc_not_equal; - case pn_Cmp_Leg: - case pn_Cmp_Uo: - case pn_Cmp_False: - case pn_Cmp_True: - case pn_Cmp_max: + switch (relation) { + case ir_relation_unordered_equal: + case ir_relation_equal: return ia32_cc_equal; + case ir_relation_unordered_less: + case ir_relation_less: return ia32_cc_below; + case ir_relation_unordered_less_equal: + case ir_relation_less_equal: return ia32_cc_below_equal; + case ir_relation_unordered_greater: + case ir_relation_greater: return ia32_cc_above; + case ir_relation_unordered_greater_equal: + case ir_relation_greater_equal: return ia32_cc_above_equal; + case ir_relation_unordered_less_greater: + case ir_relation_less_greater: return ia32_cc_not_equal; + case ir_relation_less_equal_greater: + case ir_relation_unordered: + case ir_relation_false: + case ir_relation_true: /* introduce jump always/jump never? */ break; } @@ -1992,48 +2105,55 @@ static ir_node *get_flags_mode_b(ir_node *node, ia32_condition_code_t *cc_out) return flags; } -static ir_node *get_flags_node_cmp(ir_node *node, ia32_condition_code_t *cc_out) +static ir_node *get_flags_node_cmp(ir_node *cmp, ia32_condition_code_t *cc_out) { - /* must have a Proj(Cmp) as input */ - ir_node *cmp = get_Proj_pred(node); - int pnc = get_Proj_pn_cmp(node); - ir_node *l = get_Cmp_left(cmp); - ir_mode *mode = get_irn_mode(l); - ir_node *flags; + /* must have a Cmp as input */ + ir_relation relation = get_Cmp_relation(cmp); + ir_relation possible; + ir_node *l = get_Cmp_left(cmp); + ir_node *r = get_Cmp_right(cmp); + ir_mode *mode = get_irn_mode(l); + ir_node *flags; /* check for bit-test */ - if (ia32_cg_config.use_bt - && (pnc == pn_Cmp_Lg || pnc == pn_Cmp_Eq || pnc == pn_Cmp_Ne - || pnc == pn_Cmp_Ue)) { - ir_node *l = get_Cmp_left(cmp); - ir_node *r = get_Cmp_right(cmp); - if (is_And(l)) { - ir_node *la = get_And_left(l); - ir_node *ra = get_And_right(l); - if (is_Shl(ra)) { - ir_node *tmp = la; - la = ra; - ra = tmp; - } - if (is_Shl(la)) { - ir_node *c = get_Shl_left(la); - if (is_Const_1(c) && is_Const_0(r)) { - /* (1 << n) & ra) */ - ir_node *n = get_Shl_right(la); - flags = gen_bt(cmp, ra, n); - /* the bit is copied into the CF flag */ - if (pnc & pn_Cmp_Eq) - *cc_out = ia32_cc_below; /* ==0, so we test for CF=1 */ - else - *cc_out = ia32_cc_above_equal; /* test for CF=0 */ - return flags; - } + if (ia32_cg_config.use_bt && (relation == ir_relation_equal + || (mode_is_signed(mode) && relation == ir_relation_less_greater) + || (!mode_is_signed(mode) && ((relation & ir_relation_greater_equal) == ir_relation_greater))) + && is_And(l)) { + ir_node *la = get_And_left(l); + ir_node *ra = get_And_right(l); + if (is_Shl(ra)) { + ir_node *tmp = la; + la = ra; + ra = tmp; + } + if (is_Shl(la)) { + ir_node *c = get_Shl_left(la); + if (is_Const_1(c) && is_Const_0(r)) { + /* (1 << n) & ra) */ + ir_node *n = get_Shl_right(la); + flags = gen_bt(cmp, ra, n); + /* the bit is copied into the CF flag */ + if (relation & ir_relation_equal) + *cc_out = ia32_cc_above_equal; /* test for CF=0 */ + else + *cc_out = ia32_cc_below; /* test for CF=1 */ + return flags; } } } + /* the middle-end tries to eliminate impossible relations, so a ptr != 0 + * test becomes ptr > 0. But for x86 an equal comparison is preferable to + * a >0 (we can sometimes eliminate the cmp in favor of flags produced by + * a predecessor node). So add the < bit */ + possible = ir_get_possible_cmp_relations(l, r); + if (((relation & ir_relation_less) && !(possible & ir_relation_greater)) + || ((relation & ir_relation_greater) && !(possible & ir_relation_less))) + relation |= ir_relation_less_greater; + /* just do a normal transformation of the Cmp */ - *cc_out = pnc_to_condition_code(pnc, mode); + *cc_out = relation_to_condition_code(relation, mode); flags = be_transform_node(cmp); return flags; } @@ -2046,7 +2166,7 @@ static ir_node *get_flags_node_cmp(ir_node *node, ia32_condition_code_t *cc_out) */ static ir_node *get_flags_node(ir_node *node, ia32_condition_code_t *cc_out) { - if (is_Proj(node) && is_Cmp(get_Proj_pred(node))) + if (is_Cmp(node)) return get_flags_node_cmp(node, cc_out); assert(get_irn_mode(node) == mode_b); return get_flags_mode_b(node, cc_out); @@ -2060,22 +2180,23 @@ static ir_node *get_flags_node(ir_node *node, ia32_condition_code_t *cc_out) static ir_node *gen_Load(ir_node *node) { ir_node *old_block = get_nodes_block(node); - ir_node *block = be_transform_node(old_block); - ir_node *ptr = get_Load_ptr(node); - ir_node *mem = get_Load_mem(node); - ir_node *new_mem = be_transform_node(mem); + ir_node *block = be_transform_node(old_block); + ir_node *ptr = get_Load_ptr(node); + ir_node *mem = get_Load_mem(node); + ir_node *new_mem = be_transform_node(mem); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_Load_mode(node); + int throws_exception = ir_throws_exception(node); ir_node *base; - ir_node *index; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_Load_mode(node); + ir_node *idx; ir_node *new_node; ia32_address_t addr; /* construct load address */ memset(&addr, 0, sizeof(addr)); ia32_create_address_mode(&addr, ptr, ia32_create_am_normal); - base = addr.base; - index = addr.index; + base = addr.base; + idx = addr.index; if (base == NULL) { base = noreg_GP; @@ -2083,18 +2204,18 @@ static ir_node *gen_Load(ir_node *node) base = be_transform_node(base); } - if (index == NULL) { - index = noreg_GP; + if (idx == NULL) { + idx = noreg_GP; } else { - index = be_transform_node(index); + idx = be_transform_node(idx); } if (mode_is_float(mode)) { if (ia32_cg_config.use_sse2) { - new_node = new_bd_ia32_xLoad(dbgi, block, base, index, new_mem, + new_node = new_bd_ia32_xLoad(dbgi, block, base, idx, new_mem, mode); } else { - new_node = new_bd_ia32_vfld(dbgi, block, base, index, new_mem, + new_node = new_bd_ia32_vfld(dbgi, block, base, idx, new_mem, mode); } } else { @@ -2102,12 +2223,13 @@ static ir_node *gen_Load(ir_node *node) /* create a conv node with address mode for smaller modes */ if (get_mode_size_bits(mode) < 32) { - new_node = new_bd_ia32_Conv_I2I(dbgi, block, base, index, + new_node = new_bd_ia32_Conv_I2I(dbgi, block, base, idx, new_mem, noreg_GP, mode); } else { - new_node = new_bd_ia32_Load(dbgi, block, base, index, new_mem); + new_node = new_bd_ia32_Load(dbgi, block, base, idx, new_mem); } } + ir_set_throws_exception(new_node, throws_exception); set_irn_pinned(new_node, get_irn_pinned(node)); set_ia32_op_type(new_node, ia32_AddrModeS); @@ -2123,7 +2245,6 @@ static ir_node *gen_Load(ir_node *node) SET_IA32_ORIG_NODE(new_node, node); - be_dep_on_frame(new_node); return new_node; } @@ -2218,7 +2339,8 @@ static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node); mem_proj = be_transform_node(am.mem_proj); - be_set_transformed_node(mem_proj ? mem_proj : am.mem_proj, new_node); + be_set_transformed_node(am.mem_proj, new_node); + be_set_transformed_node(mem_proj, new_node); return new_node; } @@ -2253,7 +2375,8 @@ static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node); mem_proj = be_transform_node(am.mem_proj); - be_set_transformed_node(mem_proj ? mem_proj : am.mem_proj, new_node); + be_set_transformed_node(am.mem_proj, new_node); + be_set_transformed_node(mem_proj, new_node); return new_node; } @@ -2499,6 +2622,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) dbg_info *dbgi = get_irn_dbg_info(node); int ofs = 0; int i = 0; + int throws_exception = ir_throws_exception(node); ir_node *ins[4]; ia32_address_t addr; @@ -2517,7 +2641,9 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) ir_node *new_node = new_bd_ia32_Store(dbgi, new_block, addr.base, addr.index, addr.mem, imm); + ir_node *new_mem = new_r_Proj(new_node, mode_M, pn_ia32_Store_M); + ir_set_throws_exception(new_node, throws_exception); set_irn_pinned(new_node, get_irn_pinned(node)); set_ia32_op_type(new_node, ia32_AddrModeD); set_ia32_ls_mode(new_node, mode_Iu); @@ -2525,7 +2651,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) SET_IA32_ORIG_NODE(new_node, node); assert(i < 4); - ins[i++] = new_node; + ins[i++] = new_mem; size -= 4; ofs += 4; @@ -2535,18 +2661,16 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) if (i > 1) { return new_rd_Sync(dbgi, new_block, i, ins); } else { - return ins[0]; + return get_Proj_pred(ins[0]); } } /** * Generate a vfist or vfisttp instruction. */ -static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, ir_node *index, - ir_node *mem, ir_node *val, ir_node **fist) +static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, + ir_node *index, ir_node *mem, ir_node *val) { - ir_node *new_node; - if (ia32_cg_config.use_fisttp) { /* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied if other users exists */ @@ -2554,17 +2678,16 @@ static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, ir_node ir_node *value = new_r_Proj(vfisttp, mode_E, pn_ia32_vfisttp_res); be_new_Keep(block, 1, &value); - new_node = new_r_Proj(vfisttp, mode_M, pn_ia32_vfisttp_M); - *fist = vfisttp; + return vfisttp; } else { ir_node *trunc_mode = ia32_new_Fpu_truncate(current_ir_graph); /* do a fist */ - new_node = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode); - *fist = new_node; + ir_node *vfist = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode); + return vfist; } - return new_node; } + /** * Transforms a general (no special case) Store. * @@ -2579,7 +2702,9 @@ static ir_node *gen_general_Store(ir_node *node) ir_node *ptr = get_Store_ptr(node); ir_node *mem = get_Store_mem(node); dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_val, *new_node, *store; + int throws_exception = ir_throws_exception(node); + ir_node *new_val; + ir_node *new_node; ia32_address_t addr; /* check for destination address mode */ @@ -2621,12 +2746,12 @@ static ir_node *gen_general_Store(ir_node *node) new_node = new_bd_ia32_vfst(dbgi, new_block, addr.base, addr.index, addr.mem, new_val, mode); } - store = new_node; } else if (!ia32_cg_config.use_sse2 && is_float_to_int_conv(val)) { val = get_Conv_op(val); /* TODO: is this optimisation still necessary at all (middleend)? */ - /* We can skip ALL float->float up-Convs (and strict-up-Convs) before stores. */ + /* We can skip ALL float->float up-Convs (and strict-up-Convs) before + * stores. */ while (is_Conv(val)) { ir_node *op = get_Conv_op(val); if (!mode_is_float(get_irn_mode(op))) @@ -2636,7 +2761,7 @@ static ir_node *gen_general_Store(ir_node *node) val = op; } new_val = be_transform_node(val); - new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val, &store); + new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val); } else { new_val = create_immediate_or_transform(val, 0); assert(mode != mode_b); @@ -2648,15 +2773,15 @@ static ir_node *gen_general_Store(ir_node *node) new_node = new_bd_ia32_Store(dbgi, new_block, addr.base, addr.index, addr.mem, new_val); } - store = new_node; } + ir_set_throws_exception(new_node, throws_exception); - set_irn_pinned(store, get_irn_pinned(node)); - set_ia32_op_type(store, ia32_AddrModeD); - set_ia32_ls_mode(store, mode); + set_irn_pinned(new_node, get_irn_pinned(node)); + set_ia32_op_type(new_node, ia32_AddrModeD); + set_ia32_ls_mode(new_node, mode); - set_address(store, &addr); - SET_IA32_ORIG_NODE(store, node); + set_address(new_node, &addr); + SET_IA32_ORIG_NODE(new_node, node); return new_node; } @@ -2689,46 +2814,31 @@ static ir_node *gen_Store(ir_node *node) */ static ir_node *create_Switch(ir_node *node) { - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *sel = get_Cond_selector(node); - ir_node *new_sel = be_transform_node(sel); - long switch_min = LONG_MAX; - long switch_max = LONG_MIN; - long default_pn = get_Cond_default_proj(node); - ir_node *new_node; - const ir_edge_t *edge; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *sel = get_Cond_selector(node); + ir_node *new_sel = be_transform_node(sel); + long default_pn = get_Cond_default_proj(node); + ir_node *new_node; + ir_entity *entity; assert(get_mode_size_bits(get_irn_mode(sel)) == 32); - /* determine the smallest switch case value */ - foreach_out_edge(node, edge) { - ir_node *proj = get_edge_src_irn(edge); - long pn = get_Proj_proj(proj); - if (pn == default_pn) - continue; - - if (pn < switch_min) - switch_min = pn; - if (pn > switch_max) - switch_max = pn; - } - - if ((unsigned long) (switch_max - switch_min) > 128000) { - panic("Size of switch %+F bigger than 128000", node); - } - - if (switch_min != 0) { - /* if smallest switch case is not 0 we need an additional sub */ - new_sel = new_bd_ia32_Lea(dbgi, block, new_sel, noreg_GP); - add_ia32_am_offs_int(new_sel, -switch_min); - set_ia32_op_type(new_sel, ia32_AddrModeS); + entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type()); + set_entity_visibility(entity, ir_visibility_private); + add_entity_linkage(entity, IR_LINKAGE_CONSTANT); - SET_IA32_ORIG_NODE(new_sel, node); - } - - new_node = new_bd_ia32_SwitchJmp(dbgi, block, new_sel, default_pn); + /* TODO: we could perform some more matching here to also use the base + * register of the address mode */ + new_node + = new_bd_ia32_SwitchJmp(dbgi, block, noreg_GP, new_sel, default_pn); + set_ia32_am_scale(new_node, 2); + set_ia32_am_sc(new_node, entity); + set_ia32_op_type(new_node, ia32_AddrModeS); + set_ia32_ls_mode(new_node, mode_Iu); SET_IA32_ORIG_NODE(new_node, node); + // FIXME This seems wrong. GCC uses PIC for switch on OS X. + get_ia32_attr(new_node)->data.am_sc_no_pic_adjust = true; return new_node; } @@ -2837,25 +2947,6 @@ static ir_node *create_Ucomi(ir_node *node) return new_node; } -/** - * helper function: checks whether all Cmp projs are Lg or Eq which is needed - * to fold an and into a test node - */ -static bool can_fold_test_and(ir_node *node) -{ - const ir_edge_t *edge; - - /** we can only have eq and lg projs */ - foreach_out_edge(node, edge) { - ir_node *proj = get_edge_src_irn(edge); - pn_Cmp pnc = get_Proj_pn_cmp(proj); - if (pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) - return false; - } - - return true; -} - /** * returns true if it is assured, that the upper bits of a node are "clean" * which means for a 16 or 8 bit value, that the upper bits in the register @@ -2964,8 +3055,7 @@ static ir_node *gen_Cmp(ir_node *node) /* Prefer the Test instruction, when encountering (x & y) ==/!= 0 */ if (is_Const_0(right) && is_And(left) && - get_irn_n_edges(left) == 1 && - can_fold_test_and(node)) { + get_irn_n_edges(left) == 1) { /* Test(and_left, and_right) */ ir_node *and_left = get_And_left(left); ir_node *and_right = get_And_right(left); @@ -3046,7 +3136,7 @@ static ir_node *create_CMov(ir_node *node, ir_node *flags, ir_node *new_flags, match_commutative | match_am | match_16bit_am | match_mode_neutral); if (am.ins_permuted) - cc = ia32_invert_condition_code(cc); + cc = ia32_negate_condition_code(cc); new_node = new_bd_ia32_CMovcc(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op1, am.new_op2, new_flags, @@ -3105,12 +3195,12 @@ static ir_node *create_doz(ir_node *psi, ir_node *a, ir_node *b) if (is_Proj(new_node)) { sub = get_Proj_pred(new_node); - assert(is_ia32_Sub(sub)); } else { sub = new_node; set_irn_mode(sub, mode_T); new_node = new_rd_Proj(NULL, sub, mode, pn_ia32_res); } + assert(is_ia32_Sub(sub)); eflags = new_rd_Proj(NULL, sub, mode_Iu, pn_ia32_Sub_flags); dbgi = get_irn_dbg_info(psi); @@ -3225,7 +3315,7 @@ static void find_const_transform(ia32_condition_code_t cc, t = f; f = tmp; cc = ia32_negate_condition_code(cc); - } else if (tarval_cmp(t, f) == pn_Cmp_Lt) { + } else if (tarval_cmp(t, f) == ir_relation_less) { // now, t is the bigger one ir_tarval *tmp = t; t = f; @@ -3315,11 +3405,11 @@ static void find_const_transform(ia32_condition_code_t cc, ++step; res->steps[step].transform = SETCC_TR_NEG; } else { - int v = get_tarval_lowest_bit(t); - assert(v >= 0); + int val = get_tarval_lowest_bit(t); + assert(val >= 0); res->steps[step].transform = SETCC_TR_SHL; - res->steps[step].scale = v; + res->steps[step].scale = val; } } ++step; @@ -3342,29 +3432,34 @@ static ir_node *gen_Mux(ir_node *node) ir_node *new_block = be_transform_node(block); ir_node *mux_true = get_Mux_true(node); ir_node *mux_false = get_Mux_false(node); - ir_node *cond = get_Mux_sel(node); + ir_node *sel = get_Mux_sel(node); ir_mode *mode = get_irn_mode(node); ir_node *flags; ir_node *new_node; int is_abs; ia32_condition_code_t cc; - assert(get_irn_mode(cond) == mode_b); + assert(get_irn_mode(sel) == mode_b); - is_abs = be_mux_is_abs(cond, mux_true, mux_false); + is_abs = ir_mux_is_abs(sel, mux_true, mux_false); if (is_abs != 0) { - return create_abs(dbgi, block, be_get_abs_op(cond), is_abs < 0, node); + if (ia32_mode_needs_gp_reg(mode)) { + ir_fprintf(stderr, "Optimisation warning: Integer abs %+F not transformed\n", + node); + } else { + ir_node *op = ir_get_abs_op(sel, mux_true, mux_false); + return create_float_abs(dbgi, block, op, is_abs < 0, node); + } } /* Note: a Mux node uses a Load two times IFF it's used in the compare AND in the result */ if (mode_is_float(mode)) { - ir_node *cmp = get_Proj_pred(cond); - ir_node *cmp_left = get_Cmp_left(cmp); - ir_node *cmp_right = get_Cmp_right(cmp); - int pnc = get_Proj_proj(cond); + ir_node *cmp_left = get_Cmp_left(sel); + ir_node *cmp_right = get_Cmp_right(sel); + ir_relation relation = get_Cmp_relation(sel); if (ia32_cg_config.use_sse2) { - if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Le) { + if (relation == ir_relation_less || relation == ir_relation_less_equal) { if (cmp_left == mux_true && cmp_right == mux_false) { /* Mux(a <= b, a, b) => MIN */ return gen_binop(node, cmp_left, cmp_right, new_bd_ia32_xMin, @@ -3374,7 +3469,7 @@ static ir_node *gen_Mux(ir_node *node) return gen_binop(node, cmp_left, cmp_right, new_bd_ia32_xMax, match_commutative | match_am | match_two_users); } - } else if (pnc == pn_Cmp_Gt || pnc == pn_Cmp_Ge) { + } else if (relation == ir_relation_greater || relation == ir_relation_greater_equal) { if (cmp_left == mux_true && cmp_right == mux_false) { /* Mux(a >= b, a, b) => MAX */ return gen_binop(node, cmp_left, cmp_right, new_bd_ia32_xMax, @@ -3393,7 +3488,7 @@ static ir_node *gen_Mux(ir_node *node) ir_mode *new_mode; unsigned scale; - flags = get_flags_node(cond, &cc); + flags = get_flags_node(sel, &cc); new_node = create_set_32bit(dbgi, new_block, flags, cc, node); if (ia32_cg_config.use_sse2) { @@ -3428,7 +3523,7 @@ static ir_node *gen_Mux(ir_node *node) case 16: /* arg, shift 16 NOT supported */ scale = 3; - new_node = new_bd_ia32_Add(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, new_node); + new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node); break; default: panic("Unsupported constant size"); @@ -3441,6 +3536,7 @@ static ir_node *gen_Mux(ir_node *node) am.addr.offset = 0; am.addr.scale = scale; am.addr.use_frame = 0; + am.addr.tls_segment = false; am.addr.frame_entity = NULL; am.addr.symconst_sign = 0; am.mem_proj = am.addr.mem; @@ -3464,37 +3560,34 @@ static ir_node *gen_Mux(ir_node *node) } else { assert(ia32_mode_needs_gp_reg(mode)); - if (is_Proj(cond)) { - ir_node *cmp = get_Proj_pred(cond); - if (is_Cmp(cmp)) { - ir_node *cmp_left = get_Cmp_left(cmp); - ir_node *cmp_right = get_Cmp_right(cmp); - ir_node *val_true = mux_true; - ir_node *val_false = mux_false; - int pnc = get_Proj_proj(cond); - - if (is_Const(val_true) && is_Const_null(val_true)) { - ir_node *tmp = val_false; - val_false = val_true; - val_true = tmp; - pnc = get_negated_pnc(pnc, get_irn_mode(cmp_left)); + if (is_Cmp(sel)) { + ir_node *cmp_left = get_Cmp_left(sel); + ir_node *cmp_right = get_Cmp_right(sel); + ir_relation relation = get_Cmp_relation(sel); + ir_node *val_true = mux_true; + ir_node *val_false = mux_false; + + if (is_Const(val_true) && is_Const_null(val_true)) { + ir_node *tmp = val_false; + val_false = val_true; + val_true = tmp; + relation = get_negated_relation(relation); + } + if (is_Const_0(val_false) && is_Sub(val_true)) { + if ((relation & ir_relation_greater) + && get_Sub_left(val_true) == cmp_left + && get_Sub_right(val_true) == cmp_right) { + return create_doz(node, cmp_left, cmp_right); } - if (is_Const_0(val_false) && is_Sub(val_true)) { - if ((pnc == pn_Cmp_Gt || pnc == pn_Cmp_Ge) - && get_Sub_left(val_true) == cmp_left - && get_Sub_right(val_true) == cmp_right) { - return create_doz(node, cmp_left, cmp_right); - } - if ((pnc == pn_Cmp_Lt || pnc == pn_Cmp_Le) - && get_Sub_left(val_true) == cmp_right - && get_Sub_right(val_true) == cmp_left) { - return create_doz(node, cmp_right, cmp_left); - } + if ((relation & ir_relation_less) + && get_Sub_left(val_true) == cmp_right + && get_Sub_right(val_true) == cmp_left) { + return create_doz(node, cmp_right, cmp_left); } } } - flags = get_flags_node(cond, &cc); + flags = get_flags_node(sel, &cc); if (is_Const(mux_true) && is_Const(mux_false)) { /* both are const, good */ @@ -3510,8 +3603,8 @@ static ir_node *gen_Mux(ir_node *node) switch (res.steps[step].transform) { case SETCC_TR_ADD: - imm = ia32_immediate_from_long(res.steps[step].val); - new_node = new_bd_ia32_Add(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, imm); + new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, noreg_GP); + add_ia32_am_offs_int(new_node, res.steps[step].val); break; case SETCC_TR_ADDxx: new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node); @@ -3551,7 +3644,7 @@ static ir_node *gen_Mux(ir_node *node) } } } else { - new_node = create_CMov(node, cond, flags, cc); + new_node = create_CMov(node, sel, flags, cc); } return new_node; } @@ -3569,13 +3662,17 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_irn_mode(node); + ir_node *frame = get_irg_frame(irg); ir_node *fist, *load, *mem; - mem = gen_vfist(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, new_op, &fist); + fist = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_op); set_irn_pinned(fist, op_pin_state_floats); set_ia32_use_frame(fist); set_ia32_op_type(fist, ia32_AddrModeD); + assert((long)pn_ia32_vfist_M == (long) pn_ia32_vfisttp_M); + mem = new_r_Proj(fist, mode_M, pn_ia32_vfist_M); + assert(get_mode_size_bits(mode) <= 32); /* exception we can only store signed 32 bit integers, so for unsigned we store a 64bit (signed) integer and load the lower bits */ @@ -3614,6 +3711,7 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) ir_graph *irg = get_Block_irg(block); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *frame = get_irg_frame(irg); + ir_node *store_mem; ir_node *store, *load; ir_node *new_node; @@ -3622,7 +3720,9 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) set_ia32_op_type(store, ia32_AddrModeD); SET_IA32_ORIG_NODE(store, node); - load = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store, tgt_mode); + store_mem = new_r_Proj(store, mode_M, pn_ia32_vfst_M); + + load = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store_mem, tgt_mode); set_ia32_use_frame(load); set_ia32_op_type(load, ia32_AddrModeS); SET_IA32_ORIG_NODE(load, node); @@ -3656,6 +3756,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) ir_mode *store_mode; ir_node *fild; ir_node *store; + ir_node *store_mem; ir_node *new_node; /* fild can use source AM if the operand is a signed 16bit or 32bit integer */ @@ -3701,6 +3802,8 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) set_ia32_op_type(store, ia32_AddrModeD); set_ia32_ls_mode(store, mode_Iu); + store_mem = new_r_Proj(store, mode_M, pn_ia32_Store_M); + /* exception for 32bit unsigned, do a 64bit spill+load */ if (!mode_is_signed(mode)) { ir_node *in[2]; @@ -3709,23 +3812,24 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) ir_node *zero_store = new_bd_ia32_Store(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, zero_const); + ir_node *zero_store_mem = new_r_Proj(zero_store, mode_M, pn_ia32_Store_M); set_ia32_use_frame(zero_store); set_ia32_op_type(zero_store, ia32_AddrModeD); add_ia32_am_offs_int(zero_store, 4); set_ia32_ls_mode(zero_store, mode_Iu); - in[0] = zero_store; - in[1] = store; + in[0] = zero_store_mem; + in[1] = store_mem; - store = new_rd_Sync(dbgi, block, 2, in); + store_mem = new_rd_Sync(dbgi, block, 2, in); store_mode = mode_Ls; } else { store_mode = mode_Is; } /* do a fild */ - fild = new_bd_ia32_vfild(dbgi, block, get_irg_frame(irg), noreg_GP, store); + fild = new_bd_ia32_vfild(dbgi, block, get_irg_frame(irg), noreg_GP, store_mem); set_ia32_use_frame(fild); set_ia32_op_type(fild, ia32_AddrModeS); @@ -3944,19 +4048,27 @@ static ir_node *gen_be_FrameAddr(ir_node *node) */ static ir_node *gen_be_Return(ir_node *node) { - ir_graph *irg = current_ir_graph; - ir_node *ret_val = get_irn_n(node, be_pos_Return_val); - ir_node *ret_mem = get_irn_n(node, be_pos_Return_mem); - ir_entity *ent = get_irg_entity(irg); - ir_type *tp = get_entity_type(ent); - dbg_info *dbgi; - ir_node *block; + ir_graph *irg = current_ir_graph; + ir_node *ret_val = get_irn_n(node, n_be_Return_val); + ir_node *ret_mem = get_irn_n(node, n_be_Return_mem); + ir_node *new_ret_val = be_transform_node(ret_val); + ir_node *new_ret_mem = be_transform_node(ret_mem); + ir_entity *ent = get_irg_entity(irg); + ir_type *tp = get_entity_type(ent); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); ir_type *res_type; ir_mode *mode; - ir_node *frame, *sse_store, *fld, *mproj, *barrier; - ir_node *new_barrier, *new_ret_val, *new_ret_mem; - ir_node **in; - int pn_ret_val, pn_ret_mem, arity, i; + ir_node *frame; + ir_node *sse_store; + ir_node *store_mem; + ir_node *fld; + ir_node *mproj; + int i; + int arity; + unsigned pop; + ir_node **in; + ir_node *new_node; assert(ret_val != NULL); if (be_Return_get_n_rets(node) < 1 || ! ia32_cg_config.use_sse2) { @@ -3976,66 +4088,42 @@ static ir_node *gen_be_Return(ir_node *node) assert(get_method_n_ress(tp) == 1); - pn_ret_val = get_Proj_proj(ret_val); - pn_ret_mem = get_Proj_proj(ret_mem); - - /* get the Barrier */ - barrier = get_Proj_pred(ret_val); - - /* get result input of the Barrier */ - ret_val = get_irn_n(barrier, pn_ret_val); - new_ret_val = be_transform_node(ret_val); - - /* get memory input of the Barrier */ - ret_mem = get_irn_n(barrier, pn_ret_mem); - new_ret_mem = be_transform_node(ret_mem); - frame = get_irg_frame(irg); - dbgi = get_irn_dbg_info(barrier); - block = be_transform_node(get_nodes_block(barrier)); - /* store xmm0 onto stack */ sse_store = new_bd_ia32_xStoreSimple(dbgi, block, frame, noreg_GP, new_ret_mem, new_ret_val); set_ia32_ls_mode(sse_store, mode); set_ia32_op_type(sse_store, ia32_AddrModeD); set_ia32_use_frame(sse_store); + store_mem = new_r_Proj(sse_store, mode_M, pn_ia32_xStoreSimple_M); /* load into x87 register */ - fld = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, sse_store, mode); + fld = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store_mem, mode); set_ia32_op_type(fld, ia32_AddrModeS); set_ia32_use_frame(fld); mproj = new_r_Proj(fld, mode_M, pn_ia32_vfld_M); fld = new_r_Proj(fld, mode_vfp, pn_ia32_vfld_res); - /* create a new barrier */ - arity = get_irn_arity(barrier); + /* create a new return */ + arity = get_irn_arity(node); in = ALLOCAN(ir_node*, arity); + pop = be_Return_get_pop(node); for (i = 0; i < arity; ++i) { - ir_node *new_in; - - if (i == pn_ret_val) { - new_in = fld; - } else if (i == pn_ret_mem) { - new_in = mproj; + ir_node *op = get_irn_n(node, i); + if (op == ret_val) { + in[i] = fld; + } else if (op == ret_mem) { + in[i] = mproj; } else { - ir_node *in = get_irn_n(barrier, i); - new_in = be_transform_node(in); + in[i] = be_transform_node(op); } - in[i] = new_in; } + new_node = be_new_Return(dbgi, irg, block, arity, pop, arity, in); + copy_node_attr(irg, node, new_node); - new_barrier = new_ir_node(dbgi, irg, block, - get_irn_op(barrier), get_irn_mode(barrier), - arity, in); - copy_node_attr(irg, barrier, new_barrier); - be_duplicate_deps(barrier, new_barrier); - be_set_transformed_node(barrier, new_barrier); - - /* transform normally */ - return be_duplicate_node(node); + return new_node; } /** @@ -4043,11 +4131,15 @@ static ir_node *gen_be_Return(ir_node *node) */ static ir_node *gen_be_AddSP(ir_node *node) { - ir_node *sz = get_irn_n(node, be_pos_AddSP_size); - ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp); + ir_node *sz = get_irn_n(node, n_be_AddSP_size); + ir_node *sp = get_irn_n(node, n_be_AddSP_old_sp); - return gen_binop(node, sp, sz, new_bd_ia32_SubSP, - match_am | match_immediate); + ir_node *new_node = gen_binop(node, sp, sz, new_bd_ia32_SubSP, + match_am | match_immediate); + assert(is_ia32_SubSP(new_node)); + arch_irn_set_register(new_node, pn_ia32_SubSP_stack, + &ia32_registers[REG_ESP]); + return new_node; } /** @@ -4055,11 +4147,15 @@ static ir_node *gen_be_AddSP(ir_node *node) */ static ir_node *gen_be_SubSP(ir_node *node) { - ir_node *sz = get_irn_n(node, be_pos_SubSP_size); - ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp); + ir_node *sz = get_irn_n(node, n_be_SubSP_size); + ir_node *sp = get_irn_n(node, n_be_SubSP_old_sp); - return gen_binop(node, sp, sz, new_bd_ia32_AddSP, - match_am | match_immediate); + ir_node *new_node = gen_binop(node, sp, sz, new_bd_ia32_AddSP, + match_am | match_immediate); + assert(is_ia32_AddSP(new_node)); + arch_irn_set_register(new_node, pn_ia32_AddSP_stack, + &ia32_registers[REG_ESP]); + return new_node; } /** @@ -4146,31 +4242,6 @@ static ir_node *gen_IJmp(ir_node *node) return new_node; } -static ir_node *gen_ia32_l_ShlDep(ir_node *node) -{ - ir_node *left = get_irn_n(node, n_ia32_l_ShlDep_val); - ir_node *right = get_irn_n(node, n_ia32_l_ShlDep_count); - - return gen_shift_binop(node, left, right, new_bd_ia32_Shl, - match_immediate | match_mode_neutral); -} - -static ir_node *gen_ia32_l_ShrDep(ir_node *node) -{ - ir_node *left = get_irn_n(node, n_ia32_l_ShrDep_val); - ir_node *right = get_irn_n(node, n_ia32_l_ShrDep_count); - return gen_shift_binop(node, left, right, new_bd_ia32_Shr, - match_immediate); -} - -static ir_node *gen_ia32_l_SarDep(ir_node *node) -{ - ir_node *left = get_irn_n(node, n_ia32_l_SarDep_val); - ir_node *right = get_irn_n(node, n_ia32_l_SarDep_count); - return gen_shift_binop(node, left, right, new_bd_ia32_Sar, - match_immediate); -} - static ir_node *gen_ia32_l_Add(ir_node *node) { ir_node *left = get_irn_n(node, n_ia32_l_Add_left); @@ -4247,62 +4318,6 @@ static ir_node *gen_ia32_l_Sbb(ir_node *node) match_am | match_immediate | match_mode_neutral); } -/** - * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs: - * op1 - target to be shifted - * op2 - contains bits to be shifted into target - * op3 - shift count - * Only op3 can be an immediate. - */ -static ir_node *gen_lowered_64bit_shifts(ir_node *node, ir_node *high, - ir_node *low, ir_node *count) -{ - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_high = be_transform_node(high); - ir_node *new_low = be_transform_node(low); - ir_node *new_count; - ir_node *new_node; - - /* the shift amount can be any mode that is bigger than 5 bits, since all - * other bits are ignored anyway */ - while (is_Conv(count) && - get_irn_n_edges(count) == 1 && - mode_is_int(get_irn_mode(count))) { - assert(get_mode_size_bits(get_irn_mode(count)) >= 5); - count = get_Conv_op(count); - } - new_count = create_immediate_or_transform(count, 0); - - if (is_ia32_l_ShlD(node)) { - new_node = new_bd_ia32_ShlD(dbgi, new_block, new_high, new_low, - new_count); - } else { - new_node = new_bd_ia32_ShrD(dbgi, new_block, new_high, new_low, - new_count); - } - SET_IA32_ORIG_NODE(new_node, node); - - return new_node; -} - -static ir_node *gen_ia32_l_ShlD(ir_node *node) -{ - ir_node *high = get_irn_n(node, n_ia32_l_ShlD_val_high); - ir_node *low = get_irn_n(node, n_ia32_l_ShlD_val_low); - ir_node *count = get_irn_n(node, n_ia32_l_ShlD_count); - return gen_lowered_64bit_shifts(node, high, low, count); -} - -static ir_node *gen_ia32_l_ShrD(ir_node *node) -{ - ir_node *high = get_irn_n(node, n_ia32_l_ShrD_val_high); - ir_node *low = get_irn_n(node, n_ia32_l_ShrD_val_low); - ir_node *count = get_irn_n(node, n_ia32_l_ShrD_count); - return gen_lowered_64bit_shifts(node, high, low, count); -} - static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) { ir_node *src_block = get_nodes_block(node); @@ -4316,7 +4331,10 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) ir_node *new_val_high = be_transform_node(val_high); ir_node *in[2]; ir_node *sync, *fild, *res; - ir_node *store_low, *store_high; + ir_node *store_low; + ir_node *store_high; + ir_node *mem_low; + ir_node *mem_high; if (ia32_cg_config.use_sse2) { panic("ia32_l_LLtoFloat not implemented for SSE2"); @@ -4330,6 +4348,9 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) SET_IA32_ORIG_NODE(store_low, node); SET_IA32_ORIG_NODE(store_high, node); + mem_low = new_r_Proj(store_low, mode_M, pn_ia32_Store_M); + mem_high = new_r_Proj(store_high, mode_M, pn_ia32_Store_M); + set_ia32_use_frame(store_low); set_ia32_use_frame(store_high); set_ia32_op_type(store_low, ia32_AddrModeD); @@ -4338,8 +4359,8 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) set_ia32_ls_mode(store_high, mode_Is); add_ia32_am_offs_int(store_high, 4); - in[0] = store_low; - in[1] = store_high; + in[0] = mem_low; + in[1] = mem_high; sync = new_rd_Sync(dbgi, block, 2, in); /* do a fild */ @@ -4365,6 +4386,7 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) am.addr.offset = 0; am.addr.scale = 2; am.addr.symconst_ent = ia32_gen_fp_known_const(ia32_ULLBIAS); + am.addr.tls_segment = false; am.addr.use_frame = 0; am.addr.frame_entity = NULL; am.addr.symconst_sign = 0; @@ -4396,15 +4418,16 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node) ir_node *frame = get_irg_frame(irg); ir_node *val = get_irn_n(node, n_ia32_l_FloattoLL_val); ir_node *new_val = be_transform_node(val); - ir_node *fist, *mem; + ir_node *fist; - mem = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val, &fist); + fist = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val); SET_IA32_ORIG_NODE(fist, node); set_ia32_use_frame(fist); set_ia32_op_type(fist, ia32_AddrModeD); set_ia32_ls_mode(fist, mode_Ls); - return mem; + assert((long)pn_ia32_vfist_M == (long) pn_ia32_vfisttp_M); + return new_r_Proj(fist, mode_M, pn_ia32_vfist_M); } static ir_node *gen_Proj_l_FloattoLL(ir_node *node) @@ -4494,10 +4517,9 @@ static ir_node *gen_Proj_be_SubSP(ir_node *node) static ir_node *gen_Proj_Load(ir_node *node) { ir_node *new_pred; - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *pred = get_Proj_pred(node); - dbg_info *dbgi = get_irn_dbg_info(node); - long proj = get_Proj_proj(node); + ir_node *pred = get_Proj_pred(node); + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); /* loads might be part of source address mode matches, so we don't * transform the ProjMs yet (with the exception of loads whose result is @@ -4518,57 +4540,58 @@ static ir_node *gen_Proj_Load(ir_node *node) /* renumber the proj */ new_pred = be_transform_node(pred); if (is_ia32_Load(new_pred)) { - switch (proj) { + switch ((pn_Load)proj) { case pn_Load_res: return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Load_res); case pn_Load_M: return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Load_M); - case pn_Load_X_regular: - return new_rd_Jmp(dbgi, block); case pn_Load_X_except: /* This Load might raise an exception. Mark it. */ set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Load_X_exc); - default: - break; + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Load_X_except); + case pn_Load_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Load_X_regular); } } else if (is_ia32_Conv_I2I(new_pred) || is_ia32_Conv_I2I8Bit(new_pred)) { set_irn_mode(new_pred, mode_T); - if (proj == pn_Load_res) { + switch ((pn_Load)proj) { + case pn_Load_res: return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_res); - } else if (proj == pn_Load_M) { + case pn_Load_M: return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_mem); + case pn_Load_X_except: + /* This Load might raise an exception. Mark it. */ + set_ia32_exc_label(new_pred, 1); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Conv_I2I_X_except); + case pn_Load_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Conv_I2I_X_regular); } } else if (is_ia32_xLoad(new_pred)) { - switch (proj) { + switch ((pn_Load)proj) { case pn_Load_res: return new_rd_Proj(dbgi, new_pred, mode_xmm, pn_ia32_xLoad_res); case pn_Load_M: return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xLoad_M); - case pn_Load_X_regular: - return new_rd_Jmp(dbgi, block); case pn_Load_X_except: /* This Load might raise an exception. Mark it. */ set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xLoad_X_exc); - default: - break; + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xLoad_X_except); + case pn_Load_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xLoad_X_regular); } } else if (is_ia32_vfld(new_pred)) { - switch (proj) { + switch ((pn_Load)proj) { case pn_Load_res: return new_rd_Proj(dbgi, new_pred, mode_vfp, pn_ia32_vfld_res); case pn_Load_M: return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfld_M); - case pn_Load_X_regular: - return new_rd_Jmp(dbgi, block); case pn_Load_X_except: /* This Load might raise an exception. Mark it. */ set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfld_X_exc); - default: - break; + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfld_X_except); + case pn_Load_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfld_X_regular); } } else { /* can happen for ProJMs when source address mode happened for the @@ -4583,7 +4606,76 @@ static ir_node *gen_Proj_Load(ir_node *node) return new_rd_Proj(dbgi, new_pred, mode_M, 1); } - panic("No idea how to transform proj"); + panic("No idea how to transform Proj(Load) %+F", node); +} + +static ir_node *gen_Proj_Store(ir_node *node) +{ + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + dbg_info *dbgi = get_irn_dbg_info(node); + long pn = get_Proj_proj(node); + + if (is_ia32_Store(new_pred) || is_ia32_Store8Bit(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Store_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Store_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Store_X_regular); + } + } else if (is_ia32_vfist(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfist_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfist_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfist_X_regular); + } + } else if (is_ia32_vfisttp(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfisttp_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfisttp_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfisttp_X_regular); + } + } else if (is_ia32_vfst(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfst_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfst_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfst_X_regular); + } + } else if (is_ia32_xStore(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xStore_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xStore_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xStore_X_regular); + } + } else if (is_Sync(new_pred)) { + /* hack for the case that gen_float_const_Store produced a Sync */ + if (pn == pn_Store_M) { + return new_pred; + } + panic("exception control flow for gen_float_const_Store not implemented yet"); + } else if (get_ia32_op_type(new_pred) == ia32_AddrModeD) { + /* destination address mode */ + if (pn == pn_Store_M) { + return new_pred; + } + panic("exception control flow for destination AM not implemented yet"); + } + + panic("No idea how to transform Proj(Store) %+F", node); } /** @@ -4591,16 +4683,15 @@ static ir_node *gen_Proj_Load(ir_node *node) */ static ir_node *gen_Proj_Div(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); dbg_info *dbgi = get_irn_dbg_info(node); - long proj = get_Proj_proj(node); + long proj = get_Proj_proj(node); - assert(pn_ia32_Div_M == pn_ia32_IDiv_M); - assert(pn_ia32_Div_div_res == pn_ia32_IDiv_div_res); + assert((long)pn_ia32_Div_M == (long)pn_ia32_IDiv_M); + assert((long)pn_ia32_Div_div_res == (long)pn_ia32_IDiv_div_res); - switch (proj) { + switch ((pn_Div)proj) { case pn_Div_M: if (is_ia32_Div(new_pred) || is_ia32_IDiv(new_pred)) { return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M); @@ -4621,13 +4712,11 @@ static ir_node *gen_Proj_Div(ir_node *node) } else { panic("Div transformed to unexpected thing %+F", new_pred); } - case pn_Div_X_regular: - return new_rd_Jmp(dbgi, block); case pn_Div_X_except: set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc); - default: - break; + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_except); + case pn_Div_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_regular); } panic("No idea how to transform proj->Div"); @@ -4644,19 +4733,19 @@ static ir_node *gen_Proj_Mod(ir_node *node) long proj = get_Proj_proj(node); assert(is_ia32_Div(new_pred) || is_ia32_IDiv(new_pred)); - assert(pn_ia32_Div_M == pn_ia32_IDiv_M); - assert(pn_ia32_Div_mod_res == pn_ia32_IDiv_mod_res); + assert((long)pn_ia32_Div_M == (long)pn_ia32_IDiv_M); + assert((long)pn_ia32_Div_mod_res == (long)pn_ia32_IDiv_mod_res); - switch (proj) { + switch ((pn_Mod)proj) { case pn_Mod_M: return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M); case pn_Mod_res: return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_mod_res); case pn_Mod_X_except: set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc); - default: - break; + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_except); + case pn_Mod_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_regular); } panic("No idea how to transform proj->Mod"); } @@ -4671,7 +4760,7 @@ static ir_node *gen_Proj_CopyB(ir_node *node) dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); - switch (proj) { + switch ((pn_CopyB)proj) { case pn_CopyB_M: if (is_ia32_CopyB_i(new_pred)) { return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_i_M); @@ -4679,7 +4768,19 @@ static ir_node *gen_Proj_CopyB(ir_node *node) return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_M); } break; - default: + case pn_CopyB_X_regular: + if (is_ia32_CopyB_i(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_i_X_regular); + } else if (is_ia32_CopyB(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_X_regular); + } + break; + case pn_CopyB_X_except: + if (is_ia32_CopyB_i(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_i_X_except); + } else if (is_ia32_CopyB(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_X_except); + } break; } @@ -4691,10 +4792,10 @@ static ir_node *gen_be_Call(ir_node *node) dbg_info *const dbgi = get_irn_dbg_info(node); ir_node *const src_block = get_nodes_block(node); ir_node *const block = be_transform_node(src_block); - ir_node *const src_mem = get_irn_n(node, be_pos_Call_mem); - ir_node *const src_sp = get_irn_n(node, be_pos_Call_sp); + ir_node *const src_mem = get_irn_n(node, n_be_Call_mem); + ir_node *const src_sp = get_irn_n(node, n_be_Call_sp); ir_node *const sp = be_transform_node(src_sp); - ir_node *const src_ptr = get_irn_n(node, be_pos_Call_ptr); + ir_node *const src_ptr = get_irn_n(node, n_be_Call_ptr); ia32_address_mode_t am; ia32_address_t *const addr = &am.addr; ir_node * mem; @@ -4707,6 +4808,7 @@ static ir_node *gen_be_Call(ir_node *node) unsigned const pop = be_Call_get_pop(node); ir_type *const call_tp = be_Call_get_type(node); int old_no_pic_adjust; + int throws_exception = ir_throws_exception(node); /* Run the x87 simulator if the call returns a float value */ if (get_method_n_ress(call_tp) > 0) { @@ -4734,7 +4836,7 @@ static ir_node *gen_be_Call(ir_node *node) i = get_irn_arity(node) - 1; fpcw = be_transform_node(get_irn_n(node, i--)); - for (; i >= be_pos_Call_first_arg; --i) { + for (; i >= n_be_Call_first_arg; --i) { arch_register_req_t const *const req = arch_get_register_req(node, i); ir_node *const reg_parm = be_transform_node(get_irn_n(node, i)); @@ -4752,6 +4854,7 @@ static ir_node *gen_be_Call(ir_node *node) mem = transform_AM_mem(block, src_ptr, src_mem, addr->mem); call = new_bd_ia32_Call(dbgi, block, addr->base, addr->index, mem, am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp); + ir_set_throws_exception(call, throws_exception); set_am_attributes(call, &am); call = fix_mem_proj(call, &am); @@ -4802,6 +4905,7 @@ static ir_node *gen_return_address(ir_node *node) ir_node *frame = get_Builtin_param(node, 1); dbg_info *dbgi = get_irn_dbg_info(node); ir_tarval *tv = get_Const_tarval(param); + ir_graph *irg = get_irn_irg(node); unsigned long value = get_tarval_long(tv); ir_node *block = be_transform_node(get_nodes_block(node)); @@ -4823,7 +4927,7 @@ static ir_node *gen_return_address(ir_node *node) set_ia32_am_offs_int(load, 0); set_ia32_use_frame(load); - set_ia32_frame_ent(load, ia32_get_return_address_entity()); + set_ia32_frame_ent(load, ia32_get_return_address_entity(irg)); if (get_irn_pinned(node) == op_pin_state_floats) { assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res @@ -4845,6 +4949,7 @@ static ir_node *gen_frame_address(ir_node *node) ir_node *frame = get_Builtin_param(node, 1); dbg_info *dbgi = get_irn_dbg_info(node); ir_tarval *tv = get_Const_tarval(param); + ir_graph *irg = get_irn_irg(node); unsigned long value = get_tarval_long(tv); ir_node *block = be_transform_node(get_nodes_block(node)); @@ -4865,7 +4970,7 @@ static ir_node *gen_frame_address(ir_node *node) set_ia32_op_type(load, ia32_AddrModeS); set_ia32_ls_mode(load, mode_Iu); - ent = ia32_get_frame_address_entity(); + ent = ia32_get_frame_address_entity(irg); if (ent != NULL) { set_ia32_am_offs_int(load, 0); set_ia32_use_frame(load); @@ -4892,7 +4997,7 @@ static ir_node *gen_frame_address(ir_node *node) static ir_node *gen_prefetch(ir_node *node) { dbg_info *dbgi; - ir_node *ptr, *block, *mem, *base, *index; + ir_node *ptr, *block, *mem, *base, *idx; ir_node *param, *new_node; long rw, locality; ir_tarval *tv; @@ -4911,8 +5016,8 @@ static ir_node *gen_prefetch(ir_node *node) memset(&addr, 0, sizeof(addr)); ptr = get_Builtin_param(node, 0); ia32_create_address_mode(&addr, ptr, ia32_create_am_normal); - base = addr.base; - index = addr.index; + base = addr.base; + idx = addr.index; if (base == NULL) { base = noreg_GP; @@ -4920,10 +5025,10 @@ static ir_node *gen_prefetch(ir_node *node) base = be_transform_node(base); } - if (index == NULL) { - index = noreg_GP; + if (idx == NULL) { + idx = noreg_GP; } else { - index = be_transform_node(index); + idx = be_transform_node(idx); } dbgi = get_irn_dbg_info(node); @@ -4932,7 +5037,7 @@ static ir_node *gen_prefetch(ir_node *node) if (rw == 1 && ia32_cg_config.use_3dnow_prefetch) { /* we have 3DNow!, this was already checked above */ - new_node = new_bd_ia32_PrefetchW(dbgi, block, base, index, mem); + new_node = new_bd_ia32_PrefetchW(dbgi, block, base, idx, mem); } else if (ia32_cg_config.use_sse_prefetch) { /* note: rw == 1 is IGNORED in that case */ param = get_Builtin_param(node, 2); @@ -4942,22 +5047,22 @@ static ir_node *gen_prefetch(ir_node *node) /* SSE style prefetch */ switch (locality) { case 0: - new_node = new_bd_ia32_PrefetchNTA(dbgi, block, base, index, mem); + new_node = new_bd_ia32_PrefetchNTA(dbgi, block, base, idx, mem); break; case 1: - new_node = new_bd_ia32_Prefetch2(dbgi, block, base, index, mem); + new_node = new_bd_ia32_Prefetch2(dbgi, block, base, idx, mem); break; case 2: - new_node = new_bd_ia32_Prefetch1(dbgi, block, base, index, mem); + new_node = new_bd_ia32_Prefetch1(dbgi, block, base, idx, mem); break; default: - new_node = new_bd_ia32_Prefetch0(dbgi, block, base, index, mem); + new_node = new_bd_ia32_Prefetch0(dbgi, block, base, idx, mem); break; } } else { assert(ia32_cg_config.use_3dnow_prefetch); /* 3DNow! style prefetch */ - new_node = new_bd_ia32_Prefetch(dbgi, block, base, index, mem); + new_node = new_bd_ia32_Prefetch(dbgi, block, base, idx, mem); } set_irn_pinned(new_node, get_irn_pinned(node)); @@ -4967,7 +5072,6 @@ static ir_node *gen_prefetch(ir_node *node) SET_IA32_ORIG_NODE(new_node, node); - be_dep_on_frame(new_node); return new_r_Proj(new_node, mode_M, pn_ia32_Prefetch_M); } @@ -5005,7 +5109,7 @@ static ir_node *gen_ffs(ir_node *node) ir_node *real = skip_Proj(bsf); dbg_info *dbgi = get_irn_dbg_info(real); ir_node *block = get_nodes_block(real); - ir_node *flag, *set, *conv, *neg, *orn; + ir_node *flag, *set, *conv, *neg, *orn, *add; /* bsf x */ if (get_irn_mode(real) != mode_T) { @@ -5031,7 +5135,9 @@ static ir_node *gen_ffs(ir_node *node) set_ia32_commutative(orn); /* add 1 */ - return new_bd_ia32_Add(dbgi, block, noreg_GP, noreg_GP, nomem, orn, ia32_create_Immediate(NULL, 0, 1)); + add = new_bd_ia32_Lea(dbgi, block, orn, noreg_GP); + add_ia32_am_offs_int(add, 1); + return add; } /** @@ -5358,7 +5464,8 @@ static ir_node *gen_inner_trampoline(ir_node *node) if (is_SymConst(callee)) { rel = new_bd_ia32_Const(dbgi, new_block, get_SymConst_entity(callee), 0, 0, -10); } else { - rel = new_bd_ia32_Lea(dbgi, new_block, be_transform_node(callee), ia32_create_Immediate(NULL, 0, -10)); + rel = new_bd_ia32_Lea(dbgi, new_block, be_transform_node(callee), noreg_GP); + add_ia32_am_offs_int(rel, -10); } rel = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, nomem, rel, trampoline); @@ -5479,7 +5586,7 @@ static ir_node *gen_Proj_be_Call(ir_node *node) ir_mode *mode = get_irn_mode(node); ir_node *res; - if (proj == pn_be_Call_M_regular) { + if (proj == pn_be_Call_M) { return new_rd_Proj(dbgi, new_call, mode_M, n_ia32_Call_mem); } /* transform call modes */ @@ -5491,8 +5598,12 @@ static ir_node *gen_Proj_be_Call(ir_node *node) /* Map from be_Call to ia32_Call proj number */ if (proj == pn_be_Call_sp) { proj = pn_ia32_Call_stack; - } else if (proj == pn_be_Call_M_regular) { + } else if (proj == pn_be_Call_M) { proj = pn_ia32_Call_M; + } else if (proj == pn_be_Call_X_except) { + proj = pn_ia32_Call_X_except; + } else if (proj == pn_be_Call_X_regular) { + proj = pn_ia32_Call_X_regular; } else { arch_register_req_t const *const req = arch_get_register_req_out(node); int const n_outs = arch_irn_get_n_outs(new_call); @@ -5520,13 +5631,13 @@ static ir_node *gen_Proj_be_Call(ir_node *node) /* TODO arch_set_irn_register() only operates on Projs, need variant with index */ switch (proj) { - case pn_ia32_Call_stack: - arch_set_irn_register(res, &ia32_registers[REG_ESP]); - break; + case pn_ia32_Call_stack: + arch_set_irn_register(res, &ia32_registers[REG_ESP]); + break; - case pn_ia32_Call_fpcw: - arch_set_irn_register(res, &ia32_registers[REG_FPCW]); - break; + case pn_ia32_Call_fpcw: + arch_set_irn_register(res, &ia32_registers[REG_FPCW]); + break; } return res; @@ -5571,15 +5682,10 @@ static ir_node *gen_Proj(ir_node *node) long proj; switch (get_irn_opcode(pred)) { - case iro_Store: - proj = get_Proj_proj(node); - if (proj == pn_Store_M) { - return be_transform_node(pred); - } else { - panic("No idea how to transform proj->Store"); - } case iro_Load: return gen_Proj_Load(node); + case iro_Store: + return gen_Proj_Store(node); case iro_ASM: return gen_Proj_ASM(node); case iro_Builtin: @@ -5610,9 +5716,6 @@ static ir_node *gen_Proj(ir_node *node) return jump; } - - case pn_Start_P_tls: - return ia32_gen_Proj_tls(node); } break; @@ -5670,12 +5773,7 @@ static void register_transformers(void) be_set_transform_function(op_ia32_l_IMul, gen_ia32_l_IMul); be_set_transform_function(op_ia32_l_LLtoFloat, gen_ia32_l_LLtoFloat); be_set_transform_function(op_ia32_l_Mul, gen_ia32_l_Mul); - be_set_transform_function(op_ia32_l_SarDep, gen_ia32_l_SarDep); be_set_transform_function(op_ia32_l_Sbb, gen_ia32_l_Sbb); - be_set_transform_function(op_ia32_l_ShlDep, gen_ia32_l_ShlDep); - be_set_transform_function(op_ia32_l_ShlD, gen_ia32_l_ShlD); - be_set_transform_function(op_ia32_l_ShrDep, gen_ia32_l_ShrDep); - be_set_transform_function(op_ia32_l_ShrD, gen_ia32_l_ShrD); be_set_transform_function(op_ia32_l_Sub, gen_ia32_l_Sub); be_set_transform_function(op_ia32_GetEIP, be_duplicate_node); be_set_transform_function(op_ia32_Minus64Bit, be_duplicate_node); @@ -5714,14 +5812,14 @@ static void ia32_pretransform_node(void) ir_graph *irg = current_ir_graph; ia32_irg_data_t *irg_data = ia32_get_irg_data(current_ir_graph); - irg_data->noreg_gp = be_pre_transform_node(irg_data->noreg_gp); - irg_data->noreg_vfp = be_pre_transform_node(irg_data->noreg_vfp); - irg_data->noreg_xmm = be_pre_transform_node(irg_data->noreg_xmm); + irg_data->noreg_gp = be_pre_transform_node(irg_data->noreg_gp); + irg_data->noreg_vfp = be_pre_transform_node(irg_data->noreg_vfp); + irg_data->noreg_xmm = be_pre_transform_node(irg_data->noreg_xmm); + irg_data->get_eip = be_pre_transform_node(irg_data->get_eip); + irg_data->fpu_trunc_mode = be_pre_transform_node(irg_data->fpu_trunc_mode); nomem = get_irg_no_mem(irg); noreg_GP = ia32_new_NoReg_gp(irg); - - get_fpcw(); } /** @@ -5742,14 +5840,14 @@ static void postprocess_fp_call_results(void) ir_type *res_tp = get_method_res_type(mtp, j); ir_node *res, *new_res; const ir_edge_t *edge, *next; - ir_mode *mode; + ir_mode *res_mode; if (! is_atomic_type(res_tp)) { /* no floating point return */ continue; } - mode = get_type_mode(res_tp); - if (! mode_is_float(mode)) { + res_mode = get_type_mode(res_tp); + if (! mode_is_float(res_mode)) { /* no floating point return */ continue; } @@ -5770,12 +5868,13 @@ static void postprocess_fp_call_results(void) dbg_info *db = get_irn_dbg_info(succ); ir_node *block = get_nodes_block(succ); ir_node *base = get_irn_n(succ, n_ia32_xStore_base); - ir_node *index = get_irn_n(succ, n_ia32_xStore_index); + ir_node *idx = get_irn_n(succ, n_ia32_xStore_index); ir_node *mem = get_irn_n(succ, n_ia32_xStore_mem); ir_node *value = get_irn_n(succ, n_ia32_xStore_val); ir_mode *mode = get_ia32_ls_mode(succ); - ir_node *st = new_bd_ia32_vfst(db, block, base, index, mem, value, mode); + ir_node *st = new_bd_ia32_vfst(db, block, base, idx, mem, value, mode); + //ir_node *mem = new_r_Proj(st, mode_M, pn_ia32_vfst_M); set_ia32_am_offs_int(st, get_ia32_am_offs_int(succ)); if (is_ia32_use_frame(succ)) set_ia32_use_frame(st); @@ -5783,36 +5882,43 @@ static void postprocess_fp_call_results(void) set_irn_pinned(st, get_irn_pinned(succ)); set_ia32_op_type(st, ia32_AddrModeD); + assert((long)pn_ia32_xStore_M == (long)pn_ia32_vfst_M); + assert((long)pn_ia32_xStore_X_regular == (long)pn_ia32_vfst_X_regular); + assert((long)pn_ia32_xStore_X_except == (long)pn_ia32_vfst_X_except); + exchange(succ, st); - } else { - if (new_res == NULL) { - dbg_info *db = get_irn_dbg_info(call); - ir_node *block = get_nodes_block(call); - ir_node *frame = get_irg_frame(current_ir_graph); - ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M); - ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M); - ir_node *vfst, *xld, *new_mem; - - /* store st(0) on stack */ - vfst = new_bd_ia32_vfst(db, block, frame, noreg_GP, call_mem, res, mode); - set_ia32_op_type(vfst, ia32_AddrModeD); - set_ia32_use_frame(vfst); - - /* load into SSE register */ - xld = new_bd_ia32_xLoad(db, block, frame, noreg_GP, vfst, mode); - set_ia32_op_type(xld, ia32_AddrModeS); - set_ia32_use_frame(xld); - - new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res); - new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M); - - if (old_mem != NULL) { - edges_reroute(old_mem, new_mem, current_ir_graph); - kill_node(old_mem); - } + } else if (new_res == NULL) { + dbg_info *db = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node *frame = get_irg_frame(current_ir_graph); + ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M); + ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M); + ir_node *vfst, *xld, *new_mem; + ir_node *vfst_mem; + + /* store st(0) on stack */ + vfst = new_bd_ia32_vfst(db, block, frame, noreg_GP, call_mem, + res, res_mode); + set_ia32_op_type(vfst, ia32_AddrModeD); + set_ia32_use_frame(vfst); + + vfst_mem = new_r_Proj(vfst, mode_M, pn_ia32_vfst_M); + + /* load into SSE register */ + xld = new_bd_ia32_xLoad(db, block, frame, noreg_GP, vfst_mem, + res_mode); + set_ia32_op_type(xld, ia32_AddrModeS); + set_ia32_use_frame(xld); + + new_res = new_r_Proj(xld, res_mode, pn_ia32_xLoad_res); + new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M); + + if (old_mem != NULL) { + edges_reroute(old_mem, new_mem); + kill_node(old_mem); } - set_irn_n(succ, get_edge_src_pos(edge), new_res); } + set_irn_n(succ, get_edge_src_pos(edge), new_res); } } } @@ -5827,6 +5933,8 @@ void ia32_transform_graph(ir_graph *irg) initial_fpcw = NULL; ia32_no_pic_adjust = 0; + old_initial_fpcw = be_get_initial_reg_value(irg, &ia32_registers[REG_FPCW]); + be_timer_push(T_HEIGHTS); ia32_heights = heights_new(irg); be_timer_pop(T_HEIGHTS);