X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=0097160a0b14f31ed1e008604e7e02e6f2d5fa98;hb=d9ebcc506768b24fb6378dc7cae91002be410eec;hp=6a585ece71e955a1f8663b1d4d345cbf5db98f37;hpb=644c7ca46f7d4b85ba28c6e59f31068f3e323216;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index 6a585ece7..0097160a0 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -38,7 +38,6 @@ #include "irprog_t.h" #include "iredges_t.h" #include "irgmod.h" -#include "irvrfy.h" #include "ircons.h" #include "irgwalk.h" #include "irprintf.h" @@ -46,13 +45,13 @@ #include "irdom.h" #include "error.h" #include "array_t.h" -#include "height.h" +#include "heights.h" -#include "../benode_t.h" +#include "../benode.h" #include "../besched.h" #include "../beabi.h" #include "../beutil.h" -#include "../beirg_t.h" +#include "../beirg.h" #include "../betranshlp.h" #include "../be_t.h" @@ -81,18 +80,19 @@ #define DFP_INTMAX "9223372036854775807" #define ULL_BIAS "18446744073709551616" -#define ENT_SFP_SIGN ".LC_ia32_sfp_sign" -#define ENT_DFP_SIGN ".LC_ia32_dfp_sign" -#define ENT_SFP_ABS ".LC_ia32_sfp_abs" -#define ENT_DFP_ABS ".LC_ia32_dfp_abs" -#define ENT_ULL_BIAS ".LC_ia32_ull_bias" +#define ENT_SFP_SIGN "C_ia32_sfp_sign" +#define ENT_DFP_SIGN "C_ia32_dfp_sign" +#define ENT_SFP_ABS "C_ia32_sfp_abs" +#define ENT_DFP_ABS "C_ia32_dfp_abs" +#define ENT_ULL_BIAS "C_ia32_ull_bias" -#define mode_vfp (ia32_reg_classes[CLASS_ia32_vfp].mode) +#define mode_vfp (ia32_reg_classes[CLASS_ia32_vfp].mode) #define mode_xmm (ia32_reg_classes[CLASS_ia32_xmm].mode) DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) static ir_node *initial_fpcw = NULL; +int ia32_no_pic_adjust; typedef ir_node *construct_binop_func(dbg_info *db, ir_node *block, ir_node *base, ir_node *index, ir_node *mem, ir_node *op1, @@ -154,7 +154,7 @@ static bool is_Const_Minus_1(ir_node *node) */ static bool is_simple_x87_Const(ir_node *node) { - tarval *tv = get_Const_tarval(node); + ir_tarval *tv = get_Const_tarval(node); if (tarval_is_null(tv) || tarval_is_one(tv)) return true; @@ -167,8 +167,8 @@ static bool is_simple_x87_Const(ir_node *node) */ static bool is_simple_sse_Const(ir_node *node) { - tarval *tv = get_Const_tarval(node); - ir_mode *mode = get_tarval_mode(tv); + ir_tarval *tv = get_Const_tarval(node); + ir_mode *mode = get_tarval_mode(tv); if (mode == mode_F) return true; @@ -194,6 +194,22 @@ static bool is_simple_sse_Const(ir_node *node) return false; } +/** + * return NoREG or pic_base in case of PIC. + * This is necessary as base address for newly created symbols + */ +static ir_node *get_symconst_base(void) +{ + ir_graph *irg = current_ir_graph; + + if (be_get_irg_options(irg)->pic) { + const arch_env_t *arch_env = be_get_irg_arch_env(irg); + return arch_env->impl->get_pic_base(irg); + } + + return noreg_GP; +} + /** * Transforms a Const. */ @@ -209,10 +225,11 @@ static ir_node *gen_Const(ir_node *node) if (mode_is_float(mode)) { ir_node *res = NULL; ir_node *load; + ir_node *base; ir_entity *floatent; if (ia32_cg_config.use_sse2) { - tarval *tv = get_Const_tarval(node); + ir_tarval *tv = get_Const_tarval(node); if (tarval_is_null(tv)) { load = new_bd_ia32_xZero(dbgi, block); set_ia32_ls_mode(load, mode); @@ -220,8 +237,8 @@ static ir_node *gen_Const(ir_node *node) #ifdef CONSTRUCT_SSE_CONST } else if (tarval_is_one(tv)) { int cnst = mode == mode_F ? 26 : 55; - ir_node *imm1 = create_Immediate(NULL, 0, cnst); - ir_node *imm2 = create_Immediate(NULL, 0, 2); + ir_node *imm1 = ia32_create_Immediate(NULL, 0, cnst); + ir_node *imm2 = ia32_create_Immediate(NULL, 0, 2); ir_node *pslld, *psrld; load = new_bd_ia32_xAllOnes(dbgi, block); @@ -238,7 +255,7 @@ static ir_node *gen_Const(ir_node *node) (get_tarval_sub_bits(tv, 1) << 8) | (get_tarval_sub_bits(tv, 2) << 16) | (get_tarval_sub_bits(tv, 3) << 24); - ir_node *cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, val); + ir_node *cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, val); load = new_bd_ia32_xMovd(dbgi, block, cnst); set_ia32_ls_mode(load, mode); res = load; @@ -250,7 +267,7 @@ static ir_node *gen_Const(ir_node *node) (get_tarval_sub_bits(tv, 2) << 16) | (get_tarval_sub_bits(tv, 3) << 24); if (val == 0) { - ir_node *imm32 = create_Immediate(NULL, 0, 32); + ir_node *imm32 = ia32_create_Immediate(NULL, 0, 32); ir_node *cnst, *psllq; /* fine, lower 32bit are zero, produce 32bit value */ @@ -258,7 +275,7 @@ static ir_node *gen_Const(ir_node *node) (get_tarval_sub_bits(tv, 5) << 8) | (get_tarval_sub_bits(tv, 6) << 16) | (get_tarval_sub_bits(tv, 7) << 24); - cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, val); + cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, val); load = new_bd_ia32_xMovd(dbgi, block, cnst); set_ia32_ls_mode(load, mode); psllq = new_bd_ia32_xPsllq(dbgi, block, load, imm32); @@ -268,13 +285,15 @@ static ir_node *gen_Const(ir_node *node) } } #endif /* CONSTRUCT_SSE_CONST */ - floatent = create_float_const_entity(node); + floatent = ia32_create_float_const_entity(node); - load = new_bd_ia32_xLoad(dbgi, block, noreg_GP, noreg_GP, nomem, mode); + base = get_symconst_base(); + load = new_bd_ia32_xLoad(dbgi, block, base, noreg_GP, nomem, + mode); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_am_sc(load, floatent); arch_irn_add_flags(load, arch_irn_flags_rematerializable); - res = new_r_Proj(current_ir_graph, block, load, mode_xmm, pn_ia32_xLoad_res); + res = new_r_Proj(load, mode_xmm, pn_ia32_xLoad_res); } } else { if (is_Const_null(node)) { @@ -287,18 +306,19 @@ static ir_node *gen_Const(ir_node *node) set_ia32_ls_mode(load, mode); } else { ir_mode *ls_mode; + ir_node *base; - floatent = create_float_const_entity(node); + floatent = ia32_create_float_const_entity(node); /* create_float_const_ent is smart and sometimes creates smaller entities */ ls_mode = get_type_mode(get_entity_type(floatent)); - - load = new_bd_ia32_vfld(dbgi, block, noreg_GP, noreg_GP, nomem, + base = get_symconst_base(); + load = new_bd_ia32_vfld(dbgi, block, base, noreg_GP, nomem, ls_mode); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_am_sc(load, floatent); arch_irn_add_flags(load, arch_irn_flags_rematerializable); - res = new_r_Proj(current_ir_graph, block, load, mode_vfp, pn_ia32_vfld_res); + res = new_r_Proj(load, mode_vfp, pn_ia32_vfld_res); } } #ifdef CONSTRUCT_SSE_CONST @@ -309,9 +329,9 @@ end: be_dep_on_frame(load); return res; } else { /* non-float mode */ - ir_node *cnst; - tarval *tv = get_Const_tarval(node); - long val; + ir_node *cnst; + ir_tarval *tv = get_Const_tarval(node); + long val; tv = tarval_convert_to(tv, mode_Iu); @@ -321,7 +341,7 @@ end: } val = get_tarval_long(tv); - cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, val); + cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, val); SET_IA32_ORIG_NODE(cnst, node); be_dep_on_frame(cnst); @@ -354,7 +374,7 @@ static ir_node *gen_SymConst(ir_node *node) panic("backend only support symconst_addr_ent (at %+F)", node); } entity = get_SymConst_entity(node); - cnst = new_bd_ia32_Const(dbgi, block, entity, 0, 0); + cnst = new_bd_ia32_Const(dbgi, block, entity, 0, 0, 0); } SET_IA32_ORIG_NODE(cnst, node); @@ -369,8 +389,8 @@ static ir_node *gen_SymConst(ir_node *node) * @param mode the mode for the float type (might be integer mode for SSE2 types) * @param align alignment */ -static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) { - char buf[32]; +static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) +{ ir_type *tp; assert(align <= 16); @@ -379,8 +399,7 @@ static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) { static ir_type *int_Iu[16] = {NULL, }; if (int_Iu[align] == NULL) { - snprintf(buf, sizeof(buf), "int_Iu_%u", align); - int_Iu[align] = tp = new_type_primitive(new_id_from_str(buf), mode); + int_Iu[align] = tp = new_type_primitive(mode); /* set the specified alignment */ set_type_alignment_bytes(tp, align); } @@ -389,8 +408,7 @@ static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) { static ir_type *int_Lu[16] = {NULL, }; if (int_Lu[align] == NULL) { - snprintf(buf, sizeof(buf), "int_Lu_%u", align); - int_Lu[align] = tp = new_type_primitive(new_id_from_str(buf), mode); + int_Lu[align] = tp = new_type_primitive(mode); /* set the specified alignment */ set_type_alignment_bytes(tp, align); } @@ -399,8 +417,7 @@ static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) { static ir_type *float_F[16] = {NULL, }; if (float_F[align] == NULL) { - snprintf(buf, sizeof(buf), "float_F_%u", align); - float_F[align] = tp = new_type_primitive(new_id_from_str(buf), mode); + float_F[align] = tp = new_type_primitive(mode); /* set the specified alignment */ set_type_alignment_bytes(tp, align); } @@ -409,8 +426,7 @@ static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) { static ir_type *float_D[16] = {NULL, }; if (float_D[align] == NULL) { - snprintf(buf, sizeof(buf), "float_D_%u", align); - float_D[align] = tp = new_type_primitive(new_id_from_str(buf), mode); + float_D[align] = tp = new_type_primitive(mode); /* set the specified alignment */ set_type_alignment_bytes(tp, align); } @@ -419,8 +435,7 @@ static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) { static ir_type *float_E[16] = {NULL, }; if (float_E[align] == NULL) { - snprintf(buf, sizeof(buf), "float_E_%u", align); - float_E[align] = tp = new_type_primitive(new_id_from_str(buf), mode); + float_E[align] = tp = new_type_primitive(mode); /* set the specified alignment */ set_type_alignment_bytes(tp, align); } @@ -433,8 +448,8 @@ static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) { * * @param tp the atomic type */ -static ir_type *ia32_create_float_array(ir_type *tp) { - char buf[32]; +static ir_type *ia32_create_float_array(ir_type *tp) +{ ir_mode *mode = get_type_mode(tp); unsigned align = get_type_alignment_bytes(tp); ir_type *arr; @@ -446,22 +461,19 @@ static ir_type *ia32_create_float_array(ir_type *tp) { if (float_F[align] != NULL) return float_F[align]; - snprintf(buf, sizeof(buf), "arr_float_F_%u", align); - arr = float_F[align] = new_type_array(new_id_from_str(buf), 1, tp); + arr = float_F[align] = new_type_array(1, tp); } else if (mode == mode_D) { static ir_type *float_D[16] = {NULL, }; if (float_D[align] != NULL) return float_D[align]; - snprintf(buf, sizeof(buf), "arr_float_D_%u", align); - arr = float_D[align] = new_type_array(new_id_from_str(buf), 1, tp); + arr = float_D[align] = new_type_array(1, tp); } else { static ir_type *float_E[16] = {NULL, }; if (float_E[align] != NULL) return float_E[align]; - snprintf(buf, sizeof(buf), "arr_float_E_%u", align); - arr = float_E[align] = new_type_array(new_id_from_str(buf), 1, tp); + arr = float_E[align] = new_type_array(1, tp); } set_type_alignment_bytes(arr, align); set_type_size_bytes(arr, 2 * get_type_size_bytes(tp)); @@ -486,11 +498,11 @@ ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct) }; static ir_entity *ent_cache[ia32_known_const_max]; - const char *ent_name, *cnst_str; - ir_type *tp; - ir_entity *ent; - tarval *tv; - ir_mode *mode; + const char *ent_name, *cnst_str; + ir_type *tp; + ir_entity *ent; + ir_tarval *tv; + ir_mode *mode; ent_name = names[kct].ent_name; if (! ent_cache[kct]) { @@ -509,15 +521,14 @@ ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct) ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp); set_entity_ld_ident(ent, get_entity_ident(ent)); - set_entity_visibility(ent, visibility_local); - set_entity_variability(ent, variability_constant); - set_entity_allocation(ent, allocation_static); + add_entity_linkage(ent, IR_LINKAGE_CONSTANT); + set_entity_visibility(ent, ir_visibility_private); if (kct == ia32_ULLBIAS) { ir_initializer_t *initializer = create_initializer_compound(2); set_initializer_compound_value(initializer, 0, - create_initializer_tarval(get_tarval_null(mode))); + create_initializer_tarval(get_mode_null(mode))); set_initializer_compound_value(initializer, 1, create_initializer_tarval(tv)); @@ -580,10 +591,10 @@ static int ia32_use_source_address_mode(ir_node *block, ir_node *node, return 0; /* don't do AM if other node inputs depend on the load (via mem-proj) */ - if (other != NULL && prevents_AM(block, load, other)) + if (other != NULL && ia32_prevents_AM(block, load, other)) return 0; - if (other2 != NULL && prevents_AM(block, load, other2)) + if (other2 != NULL && ia32_prevents_AM(block, load, other2)) return 0; return 1; @@ -607,7 +618,7 @@ static void build_address_ptr(ia32_address_t *addr, ir_node *ptr, ir_node *mem) { /* construct load address */ memset(addr, 0, sizeof(addr[0])); - ia32_create_address_mode(addr, ptr, 0); + ia32_create_address_mode(addr, ptr, ia32_create_am_normal); addr->base = addr->base ? be_transform_node(addr->base) : noreg_GP; addr->index = addr->index ? be_transform_node(addr->index) : noreg_GP; @@ -623,9 +634,10 @@ static void build_address(ia32_address_mode_t *am, ir_node *node, ir_node *mem; ir_node *new_mem; + /* floating point immediates */ if (is_Const(node)) { - ir_entity *entity = create_float_const_entity(node); - addr->base = noreg_GP; + ir_entity *entity = ia32_create_float_const_entity(node); + addr->base = get_symconst_base(); addr->index = noreg_GP; addr->mem = nomem; addr->symconst_ent = entity; @@ -699,8 +711,8 @@ static int is_downconv(const ir_node *node) return 0; /* we only want to skip the conv when we're the only user - * (not optimal but for now...) - */ + * (because this test is used in the context of address-mode selection + * and we don't want to use address mode for multiple users) */ if (get_irn_n_edges(node) > 1) return 0; @@ -712,7 +724,7 @@ static int is_downconv(const ir_node *node) get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode); } -/* Skip all Down-Conv's on a given node and return the resulting node. */ +/** Skip all Down-Conv's on a given node and return the resulting node. */ ir_node *ia32_skip_downconv(ir_node *node) { while (is_downconv(node)) @@ -721,6 +733,37 @@ ir_node *ia32_skip_downconv(ir_node *node) return node; } +static bool is_sameconv(ir_node *node) +{ + ir_mode *src_mode; + ir_mode *dest_mode; + + if (!is_Conv(node)) + return 0; + + /* we only want to skip the conv when we're the only user + * (because this test is used in the context of address-mode selection + * and we don't want to use address mode for multiple users) */ + if (get_irn_n_edges(node) > 1) + return 0; + + src_mode = get_irn_mode(get_Conv_op(node)); + dest_mode = get_irn_mode(node); + return + ia32_mode_needs_gp_reg(src_mode) && + ia32_mode_needs_gp_reg(dest_mode) && + get_mode_size_bits(dest_mode) == get_mode_size_bits(src_mode); +} + +/** Skip all signedness convs */ +static ir_node *ia32_skip_sameconv(ir_node *node) +{ + while (is_sameconv(node)) + node = get_Conv_op(node); + + return node; +} + static ir_node *create_upconv(ir_node *node, ir_node *orig_node) { ir_mode *mode = get_irn_mode(node); @@ -788,21 +831,26 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, if (op1 != NULL) { op1 = ia32_skip_downconv(op1); } + } else { + op2 = ia32_skip_sameconv(op2); + if (op1 != NULL) { + op1 = ia32_skip_sameconv(op1); + } } /* match immediates. firm nodes are normalized: constants are always on the * op2 input */ new_op2 = NULL; if (!(flags & match_try_am) && use_immediate) { - new_op2 = try_create_Immediate(op2, 0); + new_op2 = ia32_try_create_Immediate(op2, 0); } if (new_op2 == NULL && use_am && ia32_use_source_address_mode(block, op2, op1, other_op, flags)) { - build_address(am, op2, 0); + build_address(am, op2, ia32_create_am_normal); new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); if (mode_is_float(mode)) { - new_op2 = ia32_new_NoReg_vfp(env_cg); + new_op2 = ia32_new_NoReg_vfp(current_ir_graph); } else { new_op2 = noreg_GP; } @@ -811,10 +859,10 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, use_am && ia32_use_source_address_mode(block, op1, op2, other_op, flags)) { ir_node *noreg; - build_address(am, op1, 0); + build_address(am, op1, ia32_create_am_normal); if (mode_is_float(mode)) { - noreg = ia32_new_NoReg_vfp(env_cg); + noreg = ia32_new_NoReg_vfp(current_ir_graph); } else { noreg = noreg_GP; } @@ -828,6 +876,7 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, } am->op_type = ia32_AddrModeS; } else { + ir_mode *mode; am->op_type = ia32_Normal; if (flags & match_try_am) { @@ -836,11 +885,18 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, return; } - new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); - if (new_op2 == NULL) - new_op2 = be_transform_node(op2); - am->ls_mode = - (flags & match_mode_neutral ? mode_Iu : get_irn_mode(op2)); + mode = get_irn_mode(op2); + if (flags & match_upconv_32 && get_mode_size_bits(mode) != 32) { + new_op1 = (op1 == NULL ? NULL : create_upconv(op1, NULL)); + if (new_op2 == NULL) + new_op2 = create_upconv(op2, NULL); + am->ls_mode = mode_Iu; + } else { + new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); + if (new_op2 == NULL) + new_op2 = be_transform_node(op2); + am->ls_mode = (flags & match_mode_neutral) ? mode_Iu : mode; + } } if (addr->base == NULL) addr->base = noreg_GP; @@ -880,7 +936,7 @@ static ir_node *fix_mem_proj(ir_node *node, ia32_address_mode_t *am) if (mode != mode_T) { set_irn_mode(node, mode_T); - return new_rd_Proj(NULL, current_ir_graph, get_nodes_block(node), node, mode, pn_ia32_res); + return new_rd_Proj(NULL, node, mode, pn_ia32_res); } else { return node; } @@ -930,12 +986,12 @@ enum { n_ia32_l_binop_right, /**< ia32 right input */ n_ia32_l_binop_eflags /**< ia32 eflags input */ }; -COMPILETIME_ASSERT(n_ia32_l_binop_left == n_ia32_l_Adc_left, n_Adc_left) -COMPILETIME_ASSERT(n_ia32_l_binop_right == n_ia32_l_Adc_right, n_Adc_right) -COMPILETIME_ASSERT(n_ia32_l_binop_eflags == n_ia32_l_Adc_eflags, n_Adc_eflags) -COMPILETIME_ASSERT(n_ia32_l_binop_left == n_ia32_l_Sbb_minuend, n_Sbb_minuend) -COMPILETIME_ASSERT(n_ia32_l_binop_right == n_ia32_l_Sbb_subtrahend, n_Sbb_subtrahend) -COMPILETIME_ASSERT(n_ia32_l_binop_eflags == n_ia32_l_Sbb_eflags, n_Sbb_eflags) +COMPILETIME_ASSERT((int)n_ia32_l_binop_left == (int)n_ia32_l_Adc_left, n_Adc_left) +COMPILETIME_ASSERT((int)n_ia32_l_binop_right == (int)n_ia32_l_Adc_right, n_Adc_right) +COMPILETIME_ASSERT((int)n_ia32_l_binop_eflags == (int)n_ia32_l_Adc_eflags, n_Adc_eflags) +COMPILETIME_ASSERT((int)n_ia32_l_binop_left == (int)n_ia32_l_Sbb_minuend, n_Sbb_minuend) +COMPILETIME_ASSERT((int)n_ia32_l_binop_right == (int)n_ia32_l_Sbb_subtrahend, n_Sbb_subtrahend) +COMPILETIME_ASSERT((int)n_ia32_l_binop_eflags == (int)n_ia32_l_Sbb_eflags, n_Sbb_eflags) /** * Construct a binary operation which also consumes the eflags. @@ -982,8 +1038,8 @@ static ir_node *get_fpcw(void) if (initial_fpcw != NULL) return initial_fpcw; - fpcw = be_abi_get_ignore_irn(env_cg->birg->abi, - &ia32_fp_cw_regs[REG_FPCW]); + fpcw = be_abi_get_ignore_irn(be_get_irg_abi(current_ir_graph), + &ia32_registers[REG_FPCW]); initial_fpcw = be_transform_node(fpcw); return initial_fpcw; @@ -1000,7 +1056,7 @@ static ir_node *get_fpcw(void) static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, construct_binop_float_func *func) { - ir_mode *mode = get_irn_mode(node); + ir_mode *mode = get_irn_mode(node); dbg_info *dbgi; ir_node *block, *new_block, *new_node; ia32_address_mode_t am; @@ -1010,6 +1066,10 @@ static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, * variants */ match_flags_t flags = match_commutative; + /* happens for div nodes... */ + if (mode == mode_T) + mode = get_divop_resmod(node); + /* cannot use address mode with long double on x87 */ if (get_mode_size_bits(mode) <= 64) flags |= match_am; @@ -1118,7 +1178,7 @@ static ir_node *gen_unop(ir_node *node, ir_node *op, construct_unop_func *func, return new_node; } -static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block, +static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block, ia32_address_t *addr) { ir_node *base, *index, *res; @@ -1199,7 +1259,7 @@ static ir_node *gen_Add(ir_node *node) /* a constant? */ if (addr.base == NULL && addr.index == NULL) { new_node = new_bd_ia32_Const(dbgi, new_block, addr.symconst_ent, - addr.symconst_sign, addr.offset); + addr.symconst_sign, 0, addr.offset); be_dep_on_frame(new_node); SET_IA32_ORIG_NODE(new_node, node); return new_node; @@ -1281,23 +1341,23 @@ static ir_node *gen_Mul(ir_node *node) */ static ir_node *gen_Mulh(ir_node *node) { - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *op1 = get_Mulh_left(node); - ir_node *op2 = get_Mulh_right(node); - ir_mode *mode = get_irn_mode(node); - ir_node *new_node; - ir_node *proj_res_high; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *op1 = get_Mulh_left(node); + ir_node *op2 = get_Mulh_right(node); + ir_mode *mode = get_irn_mode(node); + ir_node *new_node; + ir_node *proj_res_high; + + if (get_mode_size_bits(mode) != 32) { + panic("Mulh without 32bit size not supported in ia32 backend (%+F)", node); + } if (mode_is_signed(mode)) { new_node = gen_binop(node, op1, op2, new_bd_ia32_IMul1OP, match_commutative | match_am); - proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node, - mode_Iu, pn_ia32_IMul1OP_res_high); + proj_res_high = new_rd_Proj(dbgi, new_node, mode_Iu, pn_ia32_IMul1OP_res_high); } else { new_node = gen_binop(node, op1, op2, new_bd_ia32_Mul, match_commutative | match_am); - proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node, - mode_Iu, pn_ia32_Mul_res_high); + proj_res_high = new_rd_Proj(dbgi, new_node, mode_Iu, pn_ia32_Mul_res_high); } return proj_res_high; } @@ -1315,8 +1375,8 @@ static ir_node *gen_And(ir_node *node) /* is it a zero extension? */ if (is_Const(op2)) { - tarval *tv = get_Const_tarval(op2); - long v = get_tarval_long(tv); + ir_tarval *tv = get_Const_tarval(op2); + long v = get_tarval_long(tv); if (v == 0xFF || v == 0xFFFF) { dbg_info *dbgi = get_irn_dbg_info(node); @@ -1401,7 +1461,7 @@ static ir_node *gen_Sub(ir_node *node) | match_am | match_immediate); } -static ir_node *transform_AM_mem(ir_graph *const irg, ir_node *const block, +static ir_node *transform_AM_mem(ir_node *const block, ir_node *const src_val, ir_node *const src_mem, ir_node *const am_mem) @@ -1437,13 +1497,13 @@ static ir_node *transform_AM_mem(ir_graph *const irg, ir_node *const block, ins[n++] = am_mem; - return new_r_Sync(irg, block, n, ins); + return new_r_Sync(block, n, ins); } else { ir_node *ins[2]; ins[0] = be_transform_node(src_mem); ins[1] = am_mem; - return new_r_Sync(irg, block, 2, ins); + return new_r_Sync(block, 2, ins); } } @@ -1466,7 +1526,7 @@ static ir_node *create_sex_32_64(dbg_info *dbgi, ir_node *block, be_dep_on_frame(pval); res = new_bd_ia32_Cltd(dbgi, block, val, pval); } else { - ir_node *imm31 = create_Immediate(NULL, 0, 31); + ir_node *imm31 = ia32_create_Immediate(NULL, 0, 31); res = new_bd_ia32_Sar(dbgi, block, val, imm31); } SET_IA32_ORIG_NODE(res, orig); @@ -1516,19 +1576,19 @@ static ir_node *create_Div(ir_node *node) panic("invalid divmod node %+F", node); } - match_arguments(&am, block, op1, op2, NULL, match_am); + match_arguments(&am, block, op1, op2, NULL, match_am | match_upconv_32); /* Beware: We don't need a Sync, if the memory predecessor of the Div node is the memory of the consumed address. We can have only the second op as address in Div nodes, so check only op2. */ - new_mem = transform_AM_mem(current_ir_graph, block, op2, mem, addr->mem); + new_mem = transform_AM_mem(block, op2, mem, addr->mem); if (mode_is_signed(mode)) { sign_extension = create_sex_32_64(dbgi, new_block, am.new_op1, node); new_node = new_bd_ia32_IDiv(dbgi, new_block, addr->base, addr->index, new_mem, am.new_op2, am.new_op1, sign_extension); } else { - sign_extension = new_bd_ia32_Const(dbgi, new_block, NULL, 0, 0); + sign_extension = new_bd_ia32_Const(dbgi, new_block, NULL, 0, 0, 0); be_dep_on_frame(sign_extension); new_node = new_bd_ia32_Div(dbgi, new_block, addr->base, @@ -1630,8 +1690,8 @@ static ir_node *gen_Shrs(ir_node *node) ir_node *right = get_Shrs_right(node); if (is_Const(right)) { - tarval *tv = get_Const_tarval(right); - long val = get_tarval_long(tv); + ir_tarval *tv = get_Const_tarval(right); + long val = get_tarval_long(tv); if (val == 31) { /* this is a sign extension */ dbg_info *dbgi = get_irn_dbg_info(node); @@ -1647,8 +1707,8 @@ static ir_node *gen_Shrs(ir_node *node) ir_node *shl_left = get_Shl_left(left); ir_node *shl_right = get_Shl_right(left); if (is_Const(shl_right)) { - tarval *tv1 = get_Const_tarval(right); - tarval *tv2 = get_Const_tarval(shl_right); + ir_tarval *tv1 = get_Const_tarval(right); + ir_tarval *tv2 = get_Const_tarval(shl_right); if (tv1 == tv2 && tarval_is_long(tv1)) { long val = get_tarval_long(tv1); if (val == 16 || val == 24) { @@ -1714,39 +1774,14 @@ static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2) */ static ir_node *gen_Rotl(ir_node *node) { - ir_node *rotate = NULL; ir_node *op1 = get_Rotl_left(node); ir_node *op2 = get_Rotl_right(node); - /* Firm has only RotL, so we are looking for a right (op2) - operand "-e+mode_size_bits" (it's an already modified "mode_size_bits-e", - that means we can create a RotR instead of an Add and a RotL */ - - if (is_Add(op2)) { - ir_node *add = op2; - ir_node *left = get_Add_left(add); - ir_node *right = get_Add_right(add); - if (is_Const(right)) { - tarval *tv = get_Const_tarval(right); - ir_mode *mode = get_irn_mode(node); - long bits = get_mode_size_bits(mode); - - if (is_Minus(left) && - tarval_is_long(tv) && - get_tarval_long(tv) == bits && - bits == 32) - { - DB((dbg, LEVEL_1, "RotL into RotR ... ")); - rotate = gen_Ror(node, op1, get_Minus_op(left)); - } - } + if (is_Minus(op2)) { + return gen_Ror(node, op1, get_Minus_op(op2)); } - if (rotate == NULL) { - rotate = gen_Rol(node, op1, op2); - } - - return rotate; + return gen_Rol(node, op1, op2); } @@ -1772,10 +1807,10 @@ static ir_node *gen_Minus(ir_node *node) /* TODO: non-optimal... if we have many xXors, then we should * rather create a load for the const and use that instead of * several AM nodes... */ - ir_node *noreg_xmm = ia32_new_NoReg_xmm(env_cg); + ir_node *noreg_xmm = ia32_new_NoReg_xmm(current_ir_graph); - new_node = new_bd_ia32_xXor(dbgi, block, noreg_GP, noreg_GP, - nomem, new_op, noreg_xmm); + new_node = new_bd_ia32_xXor(dbgi, block, get_symconst_base(), + noreg_GP, nomem, new_op, noreg_xmm); size = get_mode_size_bits(mode); ent = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); @@ -1802,7 +1837,7 @@ static ir_node *gen_Minus(ir_node *node) */ static ir_node *gen_Not(ir_node *node) { - ir_node *op = get_Not_op(node); + ir_node *op = get_Not_op(node); assert(get_irn_mode(node) != mode_b); /* should be lowered already */ assert (! mode_is_float(get_irn_mode(node))); @@ -1810,20 +1845,11 @@ static ir_node *gen_Not(ir_node *node) return gen_unop(node, op, new_bd_ia32_Not, match_mode_neutral); } - - -/** - * Transforms an Abs node. - * - * @return The created ia32 Abs node - */ -static ir_node *gen_Abs(ir_node *node) +static ir_node *create_abs(dbg_info *dbgi, ir_node *block, ir_node *op, + bool negate, ir_node *node) { - ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); - ir_node *op = get_Abs_op(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); + ir_mode *mode = get_irn_mode(op); ir_node *new_op; ir_node *new_node; int size; @@ -1833,9 +1859,9 @@ static ir_node *gen_Abs(ir_node *node) new_op = be_transform_node(op); if (ia32_cg_config.use_sse2) { - ir_node *noreg_fp = ia32_new_NoReg_xmm(env_cg); - new_node = new_bd_ia32_xAnd(dbgi, new_block, noreg_GP, noreg_GP, - nomem, new_op, noreg_fp); + ir_node *noreg_fp = ia32_new_NoReg_xmm(current_ir_graph); + new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(), + noreg_GP, nomem, new_op, noreg_fp); size = get_mode_size_bits(mode); ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); @@ -1846,12 +1872,20 @@ static ir_node *gen_Abs(ir_node *node) set_ia32_op_type(new_node, ia32_AddrModeS); set_ia32_ls_mode(new_node, mode); + + /* TODO, implement -Abs case */ + assert(!negate); } else { new_node = new_bd_ia32_vfabs(dbgi, new_block, new_op); SET_IA32_ORIG_NODE(new_node, node); + if (negate) { + new_node = new_bd_ia32_vfchs(dbgi, new_block, new_node); + SET_IA32_ORIG_NODE(new_node, node); + } } } else { - ir_node *xor, *sign_extension; + ir_node *xorn; + ir_node *sign_extension; if (get_mode_size_bits(mode) == 32) { new_op = be_transform_node(op); @@ -1861,12 +1895,17 @@ static ir_node *gen_Abs(ir_node *node) sign_extension = create_sex_32_64(dbgi, new_block, new_op, node); - xor = new_bd_ia32_Xor(dbgi, new_block, noreg_GP, noreg_GP, + xorn = new_bd_ia32_Xor(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_op, sign_extension); - SET_IA32_ORIG_NODE(xor, node); + SET_IA32_ORIG_NODE(xorn, node); - new_node = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, - nomem, xor, sign_extension); + if (negate) { + new_node = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, + nomem, sign_extension, xorn); + } else { + new_node = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, + nomem, xorn, sign_extension); + } SET_IA32_ORIG_NODE(new_node, node); } @@ -1893,7 +1932,7 @@ static ir_node *gen_bt(ir_node *cmp, ir_node *x, ir_node *n) * @param node the node to transform * @param pnc_out the compare mode to use */ -static ir_node *get_flags_node(ir_node *node, pn_Cmp *pnc_out) +static ir_node *get_flags_node(ir_node *node, int *pnc_out) { ir_node *flags; ir_node *new_op; @@ -1904,7 +1943,7 @@ static ir_node *get_flags_node(ir_node *node, pn_Cmp *pnc_out) if (is_Proj(node)) { ir_node *pred = get_Proj_pred(node); if (is_Cmp(pred)) { - pn_Cmp pnc = get_Proj_proj(node); + int pnc = get_Proj_pn_cmp(node); if (ia32_cg_config.use_bt && (pnc == pn_Cmp_Lg || pnc == pn_Cmp_Eq)) { ir_node *l = get_Cmp_left(pred); ir_node *r = get_Cmp_right(pred); @@ -1941,8 +1980,17 @@ static ir_node *get_flags_node(ir_node *node, pn_Cmp *pnc_out) } } } - flags = be_transform_node(pred); + /* add ia32 compare flags */ + { + ir_node *l = get_Cmp_left(pred); + ir_mode *mode = get_irn_mode(l); + if (mode_is_float(mode)) + pnc |= ia32_pn_Cmp_float; + else if (! mode_is_signed(mode)) + pnc |= ia32_pn_Cmp_unsigned; + } *pnc_out = pnc; + flags = be_transform_node(pred); return flags; } } @@ -1973,13 +2021,12 @@ static ir_node *gen_Load(ir_node *node) ir_node *index; dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_Load_mode(node); - ir_mode *res_mode; ir_node *new_node; ia32_address_t addr; /* construct load address */ memset(&addr, 0, sizeof(addr)); - ia32_create_address_mode(&addr, ptr, 0); + ia32_create_address_mode(&addr, ptr, ia32_create_am_normal); base = addr.base; index = addr.index; @@ -1999,11 +2046,9 @@ static ir_node *gen_Load(ir_node *node) if (ia32_cg_config.use_sse2) { new_node = new_bd_ia32_xLoad(dbgi, block, base, index, new_mem, mode); - res_mode = mode_xmm; } else { new_node = new_bd_ia32_vfld(dbgi, block, base, index, new_mem, mode); - res_mode = mode_vfp; } } else { assert(mode != mode_b); @@ -2015,7 +2060,6 @@ static ir_node *gen_Load(ir_node *node) } else { new_node = new_bd_ia32_Load(dbgi, block, base, index, new_mem); } - res_mode = mode_Iu; } set_irn_pinned(new_node, get_irn_pinned(node)); @@ -2024,9 +2068,9 @@ static ir_node *gen_Load(ir_node *node) set_address(new_node, &addr); if (get_irn_pinned(node) == op_pin_state_floats) { - assert(pn_ia32_xLoad_res == pn_ia32_vfld_res - && pn_ia32_vfld_res == pn_ia32_Load_res - && pn_ia32_Load_res == pn_ia32_res); + assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res + && (int)pn_ia32_vfld_res == (int)pn_ia32_Load_res + && (int)pn_ia32_Load_res == (int)pn_ia32_res); arch_irn_add_flags(new_node, arch_irn_flags_rematerializable); } @@ -2061,14 +2105,14 @@ static int use_dest_am(ir_node *block, ir_node *node, ir_node *mem, /* don't do AM if other node inputs depend on the load (via mem-proj) */ if (other != NULL && get_nodes_block(other) == block && - heights_reachable_in_block(heights, other, load)) { + heights_reachable_in_block(ia32_heights, other, load)) { return 0; } - if (prevents_AM(block, load, mem)) + if (ia32_prevents_AM(block, load, mem)) return 0; /* Store should be attached to the load via mem */ - assert(heights_reachable_in_block(heights, mem, load)); + assert(heights_reachable_in_block(ia32_heights, mem, load)); return 1; } @@ -2113,7 +2157,7 @@ static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, dbgi = get_irn_dbg_info(node); block = be_transform_node(src_block); - new_mem = transform_AM_mem(current_ir_graph, block, am.am_node, mem, addr->mem); + new_mem = transform_AM_mem(block, am.am_node, mem, addr->mem); if (get_mode_size_bits(mode) == 8) { new_node = func8bit(dbgi, block, addr->base, addr->index, new_mem, new_op); @@ -2153,7 +2197,7 @@ static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, dbgi = get_irn_dbg_info(node); block = be_transform_node(src_block); - new_mem = transform_AM_mem(current_ir_graph, block, am.am_node, mem, addr->mem); + new_mem = transform_AM_mem(block, am.am_node, mem, addr->mem); new_node = func(dbgi, block, addr->base, addr->index, new_mem); set_address(new_node, addr); set_ia32_op_type(new_node, ia32_AddrModeD); @@ -2167,43 +2211,53 @@ static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, return new_node; } +static int ia32_get_negated_pnc(int pnc) +{ + ir_mode *mode = pnc & ia32_pn_Cmp_float ? mode_F : mode_Iu; + return get_negated_pnc(pnc, mode); +} + static ir_node *try_create_SetMem(ir_node *node, ir_node *ptr, ir_node *mem) { - ir_mode *mode = get_irn_mode(node); - ir_node *mux_true = get_Mux_true(node); - ir_node *mux_false = get_Mux_false(node); - ir_node *cond; - ir_node *new_mem; - dbg_info *dbgi; - ir_node *block; - ir_node *new_block; - ir_node *flags; - ir_node *new_node; - int negated; - pn_Cmp pnc; - ia32_address_t addr; + ir_mode *mode = get_irn_mode(node); + ir_node *mux_true = get_Mux_true(node); + ir_node *mux_false = get_Mux_false(node); + ir_node *cond; + dbg_info *dbgi; + ir_node *block; + ir_node *new_block; + ir_node *flags; + ir_node *new_node; + bool negated; + int pnc; + ia32_address_t addr; if (get_mode_size_bits(mode) != 8) return NULL; if (is_Const_1(mux_true) && is_Const_0(mux_false)) { - negated = 0; + negated = false; } else if (is_Const_0(mux_true) && is_Const_1(mux_false)) { - negated = 1; + negated = true; } else { return NULL; } + cond = get_Mux_sel(node); + flags = get_flags_node(cond, &pnc); + /* we can't handle the float special cases with SetM */ + if (pnc & ia32_pn_Cmp_float) + return NULL; + if (negated) + pnc = ia32_get_negated_pnc(pnc); + build_address_ptr(&addr, ptr, mem); dbgi = get_irn_dbg_info(node); block = get_nodes_block(node); new_block = be_transform_node(block); - cond = get_Mux_sel(node); - flags = get_flags_node(cond, &pnc); - new_mem = be_transform_node(mem); - new_node = new_bd_ia32_SetMem(dbgi, new_block, addr.base, - addr.index, addr.mem, flags, pnc, negated); + new_node = new_bd_ia32_SetccMem(dbgi, new_block, addr.base, + addr.index, addr.mem, flags, pnc); set_address(new_node, &addr); set_ia32_op_type(new_node, ia32_AddrModeD); set_ia32_ls_mode(new_node, mode); @@ -2329,6 +2383,7 @@ static ir_node *try_create_dest_am(ir_node *node) case iro_Mux: new_node = try_create_SetMem(val, ptr, mem); break; + case iro_Minus: op1 = get_Minus_op(val); new_node = dest_am_unop(val, op1, mem, ptr, mode, new_bd_ia32_NegMem); @@ -2395,7 +2450,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) { ir_mode *mode = get_irn_mode(cns); unsigned size = get_mode_size_bytes(mode); - tarval *tv = get_Const_tarval(cns); + ir_tarval *tv = get_Const_tarval(cns); ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); ir_node *ptr = get_Store_ptr(node); @@ -2417,7 +2472,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) (get_tarval_sub_bits(tv, ofs + 1) << 8) | (get_tarval_sub_bits(tv, ofs + 2) << 16) | (get_tarval_sub_bits(tv, ofs + 3) << 24); - ir_node *imm = create_Immediate(NULL, 0, val); + ir_node *imm = ia32_create_Immediate(NULL, 0, val); ir_node *new_node = new_bd_ia32_Store(dbgi, new_block, addr.base, addr.index, addr.mem, imm); @@ -2437,7 +2492,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) } while (size != 0); if (i > 1) { - return new_rd_Sync(dbgi, current_ir_graph, new_block, i, ins); + return new_rd_Sync(dbgi, new_block, i, ins); } else { return ins[0]; } @@ -2446,7 +2501,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) /** * Generate a vfist or vfisttp instruction. */ -static ir_node *gen_vfist(dbg_info *dbgi, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, +static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, ir_node *index, ir_node *mem, ir_node *val, ir_node **fist) { ir_node *new_node; @@ -2454,15 +2509,14 @@ static ir_node *gen_vfist(dbg_info *dbgi, ir_graph *irg, ir_node *block, ir_node if (ia32_cg_config.use_fisttp) { /* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied if other users exists */ - const arch_register_class_t *reg_class = &ia32_reg_classes[CLASS_ia32_vfp]; ir_node *vfisttp = new_bd_ia32_vfisttp(dbgi, block, base, index, mem, val); - ir_node *value = new_r_Proj(irg, block, vfisttp, mode_E, pn_ia32_vfisttp_res); - be_new_Keep(reg_class, irg, block, 1, &value); + ir_node *value = new_r_Proj(vfisttp, mode_E, pn_ia32_vfisttp_res); + be_new_Keep(block, 1, &value); - new_node = new_r_Proj(irg, block, vfisttp, mode_M, pn_ia32_vfisttp_M); + new_node = new_r_Proj(vfisttp, mode_M, pn_ia32_vfisttp_M); *fist = vfisttp; } else { - ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg); + ir_node *trunc_mode = ia32_new_Fpu_truncate(current_ir_graph); /* do a fist */ new_node = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode); @@ -2494,7 +2548,7 @@ static ir_node *gen_general_Store(ir_node *node) /* construct store address */ memset(&addr, 0, sizeof(addr)); - ia32_create_address_mode(&addr, ptr, 0); + ia32_create_address_mode(&addr, ptr, ia32_create_am_normal); if (addr.base == NULL) { addr.base = noreg_GP; @@ -2541,7 +2595,7 @@ static ir_node *gen_general_Store(ir_node *node) val = op; } new_val = be_transform_node(val); - new_node = gen_vfist(dbgi, current_ir_graph, new_block, addr.base, addr.index, addr.mem, new_val, &store); + new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val, &store); } else { new_val = create_immediate_or_transform(val, 0); assert(mode != mode_b); @@ -2600,7 +2654,7 @@ static ir_node *create_Switch(ir_node *node) ir_node *new_sel = be_transform_node(sel); long switch_min = LONG_MAX; long switch_max = LONG_MIN; - long default_pn = get_Cond_defaultProj(node); + long default_pn = get_Cond_default_proj(node); ir_node *new_node; const ir_edge_t *edge; @@ -2619,8 +2673,8 @@ static ir_node *create_Switch(ir_node *node) switch_max = pn; } - if ((unsigned long) (switch_max - switch_min) > 256000) { - panic("Size of switch %+F bigger than 256000", node); + if ((unsigned long) (switch_max - switch_min) > 128000) { + panic("Size of switch %+F bigger than 128000", node); } if (switch_min != 0) { @@ -2646,11 +2700,11 @@ static ir_node *gen_Cond(ir_node *node) ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *sel = get_Cond_selector(node); + ir_node *sel = get_Cond_selector(node); ir_mode *sel_mode = get_irn_mode(sel); ir_node *flags = NULL; ir_node *new_node; - pn_Cmp pnc; + int pnc; if (sel_mode != mode_b) { return create_Switch(node); @@ -2753,7 +2807,7 @@ static bool can_fold_test_and(ir_node *node) /** we can only have eq and lg projs */ foreach_out_edge(node, edge) { ir_node *proj = get_edge_src_irn(edge); - pn_Cmp pnc = get_Proj_proj(proj); + pn_Cmp pnc = get_Proj_pn_cmp(proj); if (pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) return false; } @@ -2935,7 +2989,7 @@ static ir_node *gen_Cmp(ir_node *node) } static ir_node *create_CMov(ir_node *node, ir_node *flags, ir_node *new_flags, - pn_Cmp pnc) + int pnc) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); @@ -2954,9 +3008,12 @@ static ir_node *create_CMov(ir_node *node, ir_node *flags, ir_node *new_flags, match_arguments(&am, block, val_false, val_true, flags, match_commutative | match_am | match_16bit_am | match_mode_neutral); - new_node = new_bd_ia32_CMov(dbgi, new_block, addr->base, addr->index, - addr->mem, am.new_op1, am.new_op2, new_flags, - am.ins_permuted, pnc); + if (am.ins_permuted) + pnc = ia32_get_negated_pnc(pnc); + + new_node = new_bd_ia32_CMovcc(dbgi, new_block, addr->base, addr->index, + addr->mem, am.new_op1, am.new_op2, new_flags, + pnc); set_am_attributes(new_node, &am); SET_IA32_ORIG_NODE(new_node, node); @@ -2970,13 +3027,13 @@ static ir_node *create_CMov(ir_node *node, ir_node *flags, ir_node *new_flags, * Creates a ia32 Setcc instruction. */ static ir_node *create_set_32bit(dbg_info *dbgi, ir_node *new_block, - ir_node *flags, pn_Cmp pnc, ir_node *orig_node, - int ins_permuted) + ir_node *flags, int pnc, + ir_node *orig_node) { ir_mode *mode = get_irn_mode(orig_node); ir_node *new_node; - new_node = new_bd_ia32_Set(dbgi, new_block, flags, pnc, ins_permuted); + new_node = new_bd_ia32_Setcc(dbgi, new_block, flags, pnc); SET_IA32_ORIG_NODE(new_node, orig_node); /* we might need to conv the result up */ @@ -2992,11 +3049,15 @@ static ir_node *create_set_32bit(dbg_info *dbgi, ir_node *new_block, /** * Create instruction for an unsigned Difference or Zero. */ -static ir_node *create_Doz(ir_node *psi, ir_node *a, ir_node *b) +static ir_node *create_doz(ir_node *psi, ir_node *a, ir_node *b) { - ir_graph *irg = current_ir_graph; - ir_mode *mode = get_irn_mode(psi); - ir_node *new_node, *sub, *sbb, *eflags, *block; + ir_mode *mode = get_irn_mode(psi); + ir_node *new_node; + ir_node *sub; + ir_node *sbb; + ir_node *notn; + ir_node *eflags; + ir_node *block; dbg_info *dbgi; @@ -3011,14 +3072,15 @@ static ir_node *create_Doz(ir_node *psi, ir_node *a, ir_node *b) } else { sub = new_node; set_irn_mode(sub, mode_T); - new_node = new_rd_Proj(NULL, irg, block, sub, mode, pn_ia32_res); + new_node = new_rd_Proj(NULL, sub, mode, pn_ia32_res); } - eflags = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_Sub_flags); + eflags = new_rd_Proj(NULL, sub, mode_Iu, pn_ia32_Sub_flags); - dbgi = get_irn_dbg_info(psi); - sbb = new_bd_ia32_Sbb0(dbgi, block, eflags); + dbgi = get_irn_dbg_info(psi); + sbb = new_bd_ia32_Sbb0(dbgi, block, eflags); + notn = new_bd_ia32_Not(dbgi, block, sbb); - new_node = new_bd_ia32_And(dbgi, block, noreg_GP, noreg_GP, nomem, new_node, sbb); + new_node = new_bd_ia32_And(dbgi, block, noreg_GP, noreg_GP, nomem, new_node, notn); set_ia32_commutative(new_node); return new_node; } @@ -3031,13 +3093,14 @@ static ir_node *create_Doz(ir_node *psi, ir_node *a, ir_node *b) * @param new_mode IN/OUT for the mode of the constants, if NULL * smallest possible mode will be used */ -static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode) { +static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode) +{ ir_entity *ent; ir_mode *mode = *new_mode; ir_type *tp; ir_initializer_t *initializer; - tarval *tv0 = get_Const_tarval(c0); - tarval *tv1 = get_Const_tarval(c1); + ir_tarval *tv0 = get_Const_tarval(c0); + ir_tarval *tv1 = get_Const_tarval(c1); if (mode == NULL) { /* detect the best mode for the constants */ @@ -3064,12 +3127,11 @@ static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **ne tp = ia32_create_float_type(mode, 4); tp = ia32_create_float_array(tp); - ent = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp); + ent = new_entity(get_glob_type(), id_unique("C%u"), tp); set_entity_ld_ident(ent, get_entity_ident(ent)); - set_entity_visibility(ent, visibility_local); - set_entity_variability(ent, variability_constant); - set_entity_allocation(ent, allocation_static); + set_entity_visibility(ent, ir_visibility_private); + add_entity_linkage(ent, IR_LINKAGE_CONSTANT); initializer = create_initializer_compound(2); @@ -3082,6 +3144,154 @@ static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **ne return ent; } +/** + * Possible transformations for creating a Setcc. + */ +enum setcc_transform_insn { + SETCC_TR_ADD, + SETCC_TR_ADDxx, + SETCC_TR_LEA, + SETCC_TR_LEAxx, + SETCC_TR_SHL, + SETCC_TR_NEG, + SETCC_TR_NOT, + SETCC_TR_AND, + SETCC_TR_SET, + SETCC_TR_SBB, +}; + +typedef struct setcc_transform { + unsigned num_steps; + int pnc; + struct { + enum setcc_transform_insn transform; + long val; + int scale; + } steps[4]; +} setcc_transform_t; + +/** + * Setcc can only handle 0 and 1 result. + * Find a transformation that creates 0 and 1 from + * tv_t and tv_f. + */ +static void find_const_transform(int pnc, ir_tarval *t, ir_tarval *f, + setcc_transform_t *res) +{ + unsigned step = 0; + + res->num_steps = 0; + + if (tarval_is_null(t)) { + ir_tarval *tmp = t; + t = f; + f = tmp; + pnc = ia32_get_negated_pnc(pnc); + } else if (tarval_cmp(t, f) == pn_Cmp_Lt) { + // now, t is the bigger one + ir_tarval *tmp = t; + t = f; + f = tmp; + pnc = ia32_get_negated_pnc(pnc); + } + res->pnc = pnc; + + if (! tarval_is_null(f)) { + ir_tarval *t_sub = tarval_sub(t, f, NULL); + + t = t_sub; + res->steps[step].transform = SETCC_TR_ADD; + + if (t == tarval_bad) + panic("constant subtract failed"); + if (! tarval_is_long(f)) + panic("tarval is not long"); + + res->steps[step].val = get_tarval_long(f); + ++step; + f = tarval_sub(f, f, NULL); + assert(tarval_is_null(f)); + } + + if (tarval_is_one(t)) { + res->steps[step].transform = SETCC_TR_SET; + res->num_steps = ++step; + return; + } + + if (tarval_is_minus_one(t)) { + res->steps[step].transform = SETCC_TR_NEG; + ++step; + res->steps[step].transform = SETCC_TR_SET; + res->num_steps = ++step; + return; + } + if (tarval_is_long(t)) { + long v = get_tarval_long(t); + + res->steps[step].val = 0; + switch (v) { + case 9: + if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD) + --step; + res->steps[step].transform = SETCC_TR_LEAxx; + res->steps[step].scale = 3; /* (a << 3) + a */ + break; + case 8: + if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD) + --step; + res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA; + res->steps[step].scale = 3; /* (a << 3) */ + break; + case 5: + if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD) + --step; + res->steps[step].transform = SETCC_TR_LEAxx; + res->steps[step].scale = 2; /* (a << 2) + a */ + break; + case 4: + if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD) + --step; + res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA; + res->steps[step].scale = 2; /* (a << 2) */ + break; + case 3: + if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD) + --step; + res->steps[step].transform = SETCC_TR_LEAxx; + res->steps[step].scale = 1; /* (a << 1) + a */ + break; + case 2: + if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD) + --step; + res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA; + res->steps[step].scale = 1; /* (a << 1) */ + break; + case 1: + res->num_steps = step; + return; + default: + if (! tarval_is_single_bit(t)) { + res->steps[step].transform = SETCC_TR_AND; + res->steps[step].val = v; + ++step; + res->steps[step].transform = SETCC_TR_NEG; + } else { + int v = get_tarval_lowest_bit(t); + assert(v >= 0); + + res->steps[step].transform = SETCC_TR_SHL; + res->steps[step].scale = v; + } + } + ++step; + res->steps[step].transform = SETCC_TR_SET; + res->num_steps = ++step; + return; + } + panic("tarval is not long"); +} + /** * Transforms a Mux node into some code sequence. * @@ -3089,25 +3299,31 @@ static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **ne */ static ir_node *gen_Mux(ir_node *node) { - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - ir_node *mux_true = get_Mux_true(node); - ir_node *mux_false = get_Mux_false(node); - ir_node *cond = get_Mux_sel(node); - ir_mode *mode = get_irn_mode(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *mux_true = get_Mux_true(node); + ir_node *mux_false = get_Mux_false(node); + ir_node *cond = get_Mux_sel(node); + ir_mode *mode = get_irn_mode(node); ir_node *flags; ir_node *new_node; - pn_Cmp pnc; + int is_abs; + int pnc; assert(get_irn_mode(cond) == mode_b); + is_abs = be_mux_is_abs(cond, mux_true, mux_false); + if (is_abs != 0) { + return create_abs(dbgi, block, be_get_abs_op(cond), is_abs < 0, node); + } + /* Note: a Mux node uses a Load two times IFF it's used in the compare AND in the result */ if (mode_is_float(mode)) { ir_node *cmp = get_Proj_pred(cond); ir_node *cmp_left = get_Cmp_left(cmp); ir_node *cmp_right = get_Cmp_right(cmp); - pn_Cmp pnc = get_Proj_proj(cond); + int pnc = get_Proj_proj(cond); if (ia32_cg_config.use_sse2) { if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Le) { @@ -3132,6 +3348,7 @@ static ir_node *gen_Mux(ir_node *node) } } } + if (is_Const(mux_true) && is_Const(mux_false)) { ia32_address_mode_t am; ir_node *load; @@ -3139,7 +3356,7 @@ static ir_node *gen_Mux(ir_node *node) unsigned scale; flags = get_flags_node(cond, &pnc); - new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, /*is_premuted=*/0); + new_node = create_set_32bit(dbgi, new_block, flags, pnc, node); if (ia32_cg_config.use_sse2) { /* cannot load from different mode on SSE */ @@ -3180,7 +3397,7 @@ static ir_node *gen_Mux(ir_node *node) } am.ls_mode = new_mode; - am.addr.base = noreg_GP; + am.addr.base = get_symconst_base(); am.addr.index = new_node; am.addr.mem = nomem; am.addr.offset = 0; @@ -3202,7 +3419,7 @@ static ir_node *gen_Mux(ir_node *node) load = new_bd_ia32_vfld(dbgi, block, am.addr.base, am.addr.index, am.addr.mem, new_mode); set_am_attributes(load, &am); - return new_rd_Proj(NULL, current_ir_graph, block, load, mode_vfp, pn_ia32_res); + return new_rd_Proj(NULL, load, mode_vfp, pn_ia32_res); } panic("cannot transform floating point Mux"); @@ -3212,21 +3429,29 @@ static ir_node *gen_Mux(ir_node *node) if (is_Proj(cond)) { ir_node *cmp = get_Proj_pred(cond); if (is_Cmp(cmp)) { - ir_node *cmp_left = get_Cmp_left(cmp); - ir_node *cmp_right = get_Cmp_right(cmp); - pn_Cmp pnc = get_Proj_proj(cond); - - /* check for unsigned Doz first */ - if ((pnc & pn_Cmp_Gt) && !mode_is_signed(mode) && - is_Const_0(mux_false) && is_Sub(mux_true) && - get_Sub_left(mux_true) == cmp_left && get_Sub_right(mux_true) == cmp_right) { - /* Mux(a >=u b, a - b, 0) unsigned Doz */ - return create_Doz(node, cmp_left, cmp_right); - } else if ((pnc & pn_Cmp_Lt) && !mode_is_signed(mode) && - is_Const_0(mux_true) && is_Sub(mux_false) && - get_Sub_left(mux_false) == cmp_left && get_Sub_right(mux_false) == cmp_right) { - /* Mux(a <=u b, 0, a - b) unsigned Doz */ - return create_Doz(node, cmp_left, cmp_right); + ir_node *cmp_left = get_Cmp_left(cmp); + ir_node *cmp_right = get_Cmp_right(cmp); + ir_node *val_true = mux_true; + ir_node *val_false = mux_false; + int pnc = get_Proj_proj(cond); + + if (is_Const(val_true) && is_Const_null(val_true)) { + ir_node *tmp = val_false; + val_false = val_true; + val_true = tmp; + pnc = ia32_get_negated_pnc(pnc); + } + if (is_Const_0(val_false) && is_Sub(val_true)) { + if ((pnc == pn_Cmp_Gt || pnc == pn_Cmp_Ge) + && get_Sub_left(val_true) == cmp_left + && get_Sub_right(val_true) == cmp_right) { + return create_doz(node, cmp_left, cmp_right); + } + if ((pnc == pn_Cmp_Lt || pnc == pn_Cmp_Le) + && get_Sub_left(val_true) == cmp_right + && get_Sub_right(val_true) == cmp_left) { + return create_doz(node, cmp_right, cmp_left); + } } } } @@ -3235,16 +3460,59 @@ static ir_node *gen_Mux(ir_node *node) if (is_Const(mux_true) && is_Const(mux_false)) { /* both are const, good */ - if (is_Const_1(mux_true) && is_Const_0(mux_false)) { - new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, /*is_premuted=*/0); - } else if (is_Const_0(mux_true) && is_Const_1(mux_false)) { - new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, /*is_premuted=*/1); - } else { - /* Not that simple. */ - goto need_cmov; + ir_tarval *tv_true = get_Const_tarval(mux_true); + ir_tarval *tv_false = get_Const_tarval(mux_false); + setcc_transform_t res; + int step; + + find_const_transform(pnc, tv_true, tv_false, &res); + new_node = node; + for (step = (int)res.num_steps - 1; step >= 0; --step) { + ir_node *imm; + + switch (res.steps[step].transform) { + case SETCC_TR_ADD: + imm = ia32_immediate_from_long(res.steps[step].val); + new_node = new_bd_ia32_Add(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, imm); + break; + case SETCC_TR_ADDxx: + new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node); + break; + case SETCC_TR_LEA: + new_node = new_bd_ia32_Lea(dbgi, new_block, noreg_GP, new_node); + set_ia32_am_scale(new_node, res.steps[step].scale); + set_ia32_am_offs_int(new_node, res.steps[step].val); + break; + case SETCC_TR_LEAxx: + new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node); + set_ia32_am_scale(new_node, res.steps[step].scale); + set_ia32_am_offs_int(new_node, res.steps[step].val); + break; + case SETCC_TR_SHL: + imm = ia32_immediate_from_long(res.steps[step].scale); + new_node = new_bd_ia32_Shl(dbgi, new_block, new_node, imm); + break; + case SETCC_TR_NEG: + new_node = new_bd_ia32_Neg(dbgi, new_block, new_node); + break; + case SETCC_TR_NOT: + new_node = new_bd_ia32_Not(dbgi, new_block, new_node); + break; + case SETCC_TR_AND: + imm = ia32_immediate_from_long(res.steps[step].val); + new_node = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, imm); + break; + case SETCC_TR_SET: + new_node = create_set_32bit(dbgi, new_block, flags, res.pnc, node); + break; + case SETCC_TR_SBB: + new_node = new_bd_ia32_Sbb0(dbgi, new_block, flags); + break; + default: + panic("unknown setcc transform"); + } } } else { -need_cmov: new_node = create_CMov(node, cond, flags, pnc); } return new_node; @@ -3265,7 +3533,7 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) ir_mode *mode = get_irn_mode(node); ir_node *fist, *load, *mem; - mem = gen_vfist(dbgi, irg, block, get_irg_frame(irg), noreg_GP, nomem, new_op, &fist); + mem = gen_vfist(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, new_op, &fist); set_irn_pinned(fist, op_pin_state_floats); set_ia32_use_frame(fist); set_ia32_op_type(fist, ia32_AddrModeD); @@ -3296,7 +3564,7 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) } SET_IA32_ORIG_NODE(load, node); - return new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res); + return new_r_Proj(load, mode_Iu, pn_ia32_Load_res); } /** @@ -3305,7 +3573,7 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) { ir_node *block = get_nodes_block(node); - ir_graph *irg = current_ir_graph; + ir_graph *irg = get_Block_irg(block); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *frame = get_irg_frame(irg); ir_node *store, *load; @@ -3321,7 +3589,7 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) set_ia32_op_type(load, ia32_AddrModeS); SET_IA32_ORIG_NODE(load, node); - new_node = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res); + new_node = new_r_Proj(load, mode_E, pn_ia32_vfld_res); return new_node; } @@ -3342,7 +3610,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { ir_node *src_block = get_nodes_block(node); ir_node *block = be_transform_node(src_block); - ir_graph *irg = current_ir_graph; + ir_graph *irg = get_Block_irg(block); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *op = get_Conv_op(node); ir_node *new_op = NULL; @@ -3361,7 +3629,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) ia32_address_t *addr = &am.addr; fild = new_bd_ia32_vfild(dbgi, block, addr->base, addr->index, addr->mem); - new_node = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); + new_node = new_r_Proj(fild, mode_vfp, pn_ia32_vfild_res); set_am_attributes(fild, &am); SET_IA32_ORIG_NODE(fild, node); @@ -3399,7 +3667,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) if (!mode_is_signed(mode)) { ir_node *in[2]; /* store a zero */ - ir_node *zero_const = create_Immediate(NULL, 0, 0); + ir_node *zero_const = ia32_create_Immediate(NULL, 0, 0); ir_node *zero_store = new_bd_ia32_Store(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, zero_const); @@ -3412,7 +3680,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) in[0] = zero_store; in[1] = store; - store = new_rd_Sync(dbgi, irg, block, 2, in); + store = new_rd_Sync(dbgi, block, 2, in); store_mode = mode_Ls; } else { store_mode = mode_Is; @@ -3425,7 +3693,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) set_ia32_op_type(fild, ia32_AddrModeS); set_ia32_ls_mode(fild, store_mode); - new_node = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); + new_node = new_r_Proj(fild, mode_vfp, pn_ia32_vfild_res); return new_node; } @@ -3502,10 +3770,9 @@ static ir_node *gen_Conv(ir_node *node) assert(!mode_is_int(src_mode) || src_bits <= 32); assert(!mode_is_int(tgt_mode) || tgt_bits <= 32); + /* modeB -> X should already be lowered by the lower_mode_b pass */ if (src_mode == mode_b) { - assert(mode_is_int(tgt_mode) || mode_is_reference(tgt_mode)); - /* nothing to do, we already model bools as 0/1 ints */ - return be_transform_node(op); + panic("ConvB not lowered %+F", node); } if (src_mode == tgt_mode) { @@ -3526,16 +3793,6 @@ static ir_node *gen_Conv(ir_node *node) new_op = be_transform_node(op); /* we convert from float ... */ if (mode_is_float(tgt_mode)) { -#if 0 - /* Matze: I'm a bit unsure what the following is for? seems wrong - * to me... */ - if (src_mode == mode_E && tgt_mode == mode_D - && !get_Conv_strict(node)) { - DB((dbg, LEVEL_1, "killed Conv(mode, mode) ...")); - return new_op; - } -#endif - /* ... to float */ if (ia32_cg_config.use_sse2) { DB((dbg, LEVEL_1, "create Conv(float, float) ...")); @@ -3547,8 +3804,7 @@ static ir_node *gen_Conv(ir_node *node) /* if fp_no_float_fold is not set then we assume that we * don't have any float operations in a non * mode_float_arithmetic mode and can skip strict upconvs */ - if (src_bits < tgt_bits - && !(get_irg_fp_model(current_ir_graph) & fp_no_float_fold)) { + if (src_bits < tgt_bits) { DB((dbg, LEVEL_1, "killed Conv(float, float) ...")); return new_op; } else { @@ -3618,7 +3874,7 @@ static ir_node *gen_Conv(ir_node *node) static ir_node *create_immediate_or_transform(ir_node *node, char immediate_constraint_type) { - ir_node *new_node = try_create_Immediate(node, immediate_constraint_type); + ir_node *new_node = ia32_try_create_Immediate(node, immediate_constraint_type); if (new_node == NULL) { new_node = be_transform_node(node); } @@ -3713,8 +3969,8 @@ static ir_node *gen_be_Return(ir_node *node) set_ia32_op_type(fld, ia32_AddrModeS); set_ia32_use_frame(fld); - mproj = new_r_Proj(irg, block, fld, mode_M, pn_ia32_vfld_M); - fld = new_r_Proj(irg, block, fld, mode_vfp, pn_ia32_vfld_res); + mproj = new_r_Proj(fld, mode_M, pn_ia32_vfld_M); + fld = new_r_Proj(fld, mode_vfp, pn_ia32_vfld_res); /* create a new barrier */ arity = get_irn_arity(barrier); @@ -3736,7 +3992,7 @@ static ir_node *gen_be_Return(ir_node *node) new_barrier = new_ir_node(dbgi, irg, block, get_irn_op(barrier), get_irn_mode(barrier), arity, in); - copy_node_attr(barrier, new_barrier); + copy_node_attr(irg, barrier, new_barrier); be_duplicate_deps(barrier, new_barrier); be_set_transformed_node(barrier, new_barrier); @@ -3773,6 +4029,7 @@ static ir_node *gen_be_SubSP(ir_node *node) */ static ir_node *gen_Phi(ir_node *node) { + const arch_register_req_t *req; ir_node *block = be_transform_node(get_nodes_block(node)); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); @@ -3784,26 +4041,46 @@ static ir_node *gen_Phi(ir_node *node) assert(get_mode_size_bits(mode) <= 32); /* all integer operations are on 32bit registers now */ mode = mode_Iu; + req = ia32_reg_classes[CLASS_ia32_gp].class_req; } else if (mode_is_float(mode)) { if (ia32_cg_config.use_sse2) { mode = mode_xmm; + req = ia32_reg_classes[CLASS_ia32_xmm].class_req; } else { mode = mode_vfp; + req = ia32_reg_classes[CLASS_ia32_vfp].class_req; } + } else { + req = arch_no_register_req; } /* phi nodes allow loops, so we use the old arguments for now * and fix this later */ phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1); - copy_node_attr(node, phi); + copy_node_attr(irg, node, phi); be_duplicate_deps(node, phi); + arch_set_out_register_req(phi, 0, req); + be_enqueue_preds(node); return phi; } +static ir_node *gen_Jmp(ir_node *node) +{ + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_node; + + new_node = new_bd_ia32_Jmp(dbgi, new_block); + SET_IA32_ORIG_NODE(new_node, node); + + return new_node; +} + /** * Transform IJmp */ @@ -3843,20 +4120,20 @@ static ir_node *gen_Bound(ir_node *node) if (is_Const_0(lower)) { /* typical case for Java */ ir_node *sub, *res, *flags, *block; - ir_graph *irg = current_ir_graph; res = gen_binop(node, get_Bound_index(node), get_Bound_upper(node), - new_bd_ia32_Sub, match_mode_neutral | match_am | match_immediate); + new_bd_ia32_Sub, + match_mode_neutral | match_am | match_immediate); block = get_nodes_block(res); if (! is_Proj(res)) { sub = res; set_irn_mode(sub, mode_T); - res = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_res); + res = new_rd_Proj(NULL, sub, mode_Iu, pn_ia32_res); } else { sub = get_Proj_pred(res); } - flags = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_Sub_flags); + flags = new_rd_Proj(NULL, sub, mode_Iu, pn_ia32_Sub_flags); new_node = new_bd_ia32_Jcc(dbgi, block, flags, pn_Cmp_Lt | ia32_pn_Cmp_unsigned); SET_IA32_ORIG_NODE(new_node, node); } else { @@ -3900,7 +4177,7 @@ static ir_node *gen_ia32_l_Add(ir_node *node) match_mode_neutral); if (is_Proj(lowered)) { - lowered = get_Proj_pred(lowered); + lowered = get_Proj_pred(lowered); } else { assert(is_ia32_Add(lowered)); set_irn_mode(lowered, mode_T); @@ -3952,7 +4229,7 @@ static ir_node *gen_ia32_l_Sub(ir_node *node) match_am | match_immediate | match_mode_neutral); if (is_Proj(lowered)) { - lowered = get_Proj_pred(lowered); + lowered = get_Proj_pred(lowered); } else { assert(is_ia32_Sub(lowered)); set_irn_mode(lowered, mode_T); @@ -4060,7 +4337,7 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) in[0] = store_low; in[1] = store_high; - sync = new_rd_Sync(dbgi, irg, block, 2, in); + sync = new_rd_Sync(dbgi, block, 2, in); /* do a fild */ fild = new_bd_ia32_vfild(dbgi, block, frame, noreg_GP, sync); @@ -4071,15 +4348,15 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) SET_IA32_ORIG_NODE(fild, node); - res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); + res = new_r_Proj(fild, mode_vfp, pn_ia32_vfild_res); if (! mode_is_signed(get_irn_mode(val_high))) { ia32_address_mode_t am; - ir_node *count = create_Immediate(NULL, 0, 31); + ir_node *count = ia32_create_Immediate(NULL, 0, 31); ir_node *fadd; - am.addr.base = noreg_GP; + am.addr.base = get_symconst_base(); am.addr.index = new_bd_ia32_Shr(dbgi, block, new_val_high, count); am.addr.mem = nomem; am.addr.offset = 0; @@ -4092,7 +4369,7 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) am.mem_proj = nomem; am.op_type = ia32_AddrModeS; am.new_op1 = res; - am.new_op2 = ia32_new_NoReg_vfp(env_cg); + am.new_op2 = ia32_new_NoReg_vfp(current_ir_graph); am.pinned = op_pin_state_floats; am.commutative = 1; am.ins_permuted = 0; @@ -4102,7 +4379,7 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) set_am_attributes(fadd, &am); set_irn_mode(fadd, mode_T); - res = new_rd_Proj(NULL, irg, block, fadd, mode_vfp, pn_ia32_res); + res = new_rd_Proj(NULL, fadd, mode_vfp, pn_ia32_res); } return res; } @@ -4111,14 +4388,14 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node) { ir_node *src_block = get_nodes_block(node); ir_node *block = be_transform_node(src_block); - ir_graph *irg = current_ir_graph; + ir_graph *irg = get_Block_irg(block); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *frame = get_irg_frame(irg); ir_node *val = get_irn_n(node, n_ia32_l_FloattoLL_val); ir_node *new_val = be_transform_node(val); ir_node *fist, *mem; - mem = gen_vfist(dbgi, irg, block, frame, noreg_GP, nomem, new_val, &fist); + mem = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val, &fist); SET_IA32_ORIG_NODE(fist, node); set_ia32_use_frame(fist); set_ia32_op_type(fist, ia32_AddrModeD); @@ -4127,19 +4404,10 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node) return mem; } -/** - * the BAD transformer. - */ -static ir_node *bad_transform(ir_node *node) -{ - panic("No transform function for %+F available.", node); - return NULL; -} - static ir_node *gen_Proj_l_FloattoLL(ir_node *node) { - ir_graph *irg = current_ir_graph; ir_node *block = be_transform_node(get_nodes_block(node)); + ir_graph *irg = get_Block_irg(block); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); ir_node *frame = get_irg_frame(irg); @@ -4165,7 +4433,7 @@ static ir_node *gen_Proj_l_FloattoLL(ir_node *node) assert(pn == pn_ia32_l_FloattoLL_res_low); } - proj = new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res); + proj = new_r_Proj(load, mode_Iu, pn_ia32_Load_res); return proj; } @@ -4175,23 +4443,21 @@ static ir_node *gen_Proj_l_FloattoLL(ir_node *node) */ static ir_node *gen_Proj_be_AddSP(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); - ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); if (proj == pn_be_AddSP_sp) { - ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_SubSP_stack); - arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]); + arch_set_irn_register(res, &ia32_registers[REG_ESP]); return res; } else if (proj == pn_be_AddSP_res) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_SubSP_addr); } else if (proj == pn_be_AddSP_M) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_SubSP_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_SubSP_M); } panic("No idea how to transform proj->AddSP"); @@ -4202,20 +4468,18 @@ static ir_node *gen_Proj_be_AddSP(ir_node *node) */ static ir_node *gen_Proj_be_SubSP(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); - ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); if (proj == pn_be_SubSP_sp) { - ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_AddSP_stack); - arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]); + arch_set_irn_register(res, &ia32_registers[REG_ESP]); return res; } else if (proj == pn_be_SubSP_M) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_AddSP_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_AddSP_M); } panic("No idea how to transform proj->SubSP"); @@ -4229,7 +4493,6 @@ static ir_node *gen_Proj_Load(ir_node *node) ir_node *new_pred; ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); - ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); @@ -4244,7 +4507,7 @@ static ir_node *gen_Proj_Load(ir_node *node) reachable through the ProjM */ be_enqueue_preds(node); /* do it in 2 steps, to silence firm verifier */ - res = new_rd_Proj(dbgi, irg, block, pred, mode_M, pn_Load_M); + res = new_rd_Proj(dbgi, pred, mode_M, pn_Load_M); set_Proj_proj(res, pn_ia32_mem); return res; } @@ -4254,15 +4517,15 @@ static ir_node *gen_Proj_Load(ir_node *node) if (is_ia32_Load(new_pred)) { switch (proj) { case pn_Load_res: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Load_res); + return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Load_res); case pn_Load_M: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Load_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Load_M); case pn_Load_X_regular: - return new_rd_Jmp(dbgi, irg, block); + return new_rd_Jmp(dbgi, block); case pn_Load_X_except: /* This Load might raise an exception. Mark it. */ set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Load_X_exc); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Load_X_exc); default: break; } @@ -4270,37 +4533,37 @@ static ir_node *gen_Proj_Load(ir_node *node) is_ia32_Conv_I2I8Bit(new_pred)) { set_irn_mode(new_pred, mode_T); if (proj == pn_Load_res) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_res); + return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_res); } else if (proj == pn_Load_M) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_mem); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_mem); } } else if (is_ia32_xLoad(new_pred)) { switch (proj) { case pn_Load_res: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xLoad_res); + return new_rd_Proj(dbgi, new_pred, mode_xmm, pn_ia32_xLoad_res); case pn_Load_M: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xLoad_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xLoad_M); case pn_Load_X_regular: - return new_rd_Jmp(dbgi, irg, block); + return new_rd_Jmp(dbgi, block); case pn_Load_X_except: /* This Load might raise an exception. Mark it. */ set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_xLoad_X_exc); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xLoad_X_exc); default: break; } } else if (is_ia32_vfld(new_pred)) { switch (proj) { case pn_Load_res: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfld_res); + return new_rd_Proj(dbgi, new_pred, mode_vfp, pn_ia32_vfld_res); case pn_Load_M: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfld_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfld_M); case pn_Load_X_regular: - return new_rd_Jmp(dbgi, irg, block); + return new_rd_Jmp(dbgi, block); case pn_Load_X_except: /* This Load might raise an exception. Mark it. */ set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_xLoad_X_exc); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfld_X_exc); default: break; } @@ -4314,7 +4577,7 @@ static ir_node *gen_Proj_Load(ir_node *node) if (proj != pn_Load_M) { panic("internal error: transformed node not a Load"); } - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, 1); + return new_rd_Proj(dbgi, new_pred, mode_M, 1); } panic("No idea how to transform proj"); @@ -4328,7 +4591,6 @@ static ir_node *gen_Proj_DivMod(ir_node *node) ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); - ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); @@ -4338,14 +4600,14 @@ static ir_node *gen_Proj_DivMod(ir_node *node) case iro_Div: switch (proj) { case pn_Div_M: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M); case pn_Div_res: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); + return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_div_res); case pn_Div_X_regular: - return new_rd_Jmp(dbgi, irg, block); + return new_rd_Jmp(dbgi, block); case pn_Div_X_except: set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc); default: break; } @@ -4353,12 +4615,12 @@ static ir_node *gen_Proj_DivMod(ir_node *node) case iro_Mod: switch (proj) { case pn_Mod_M: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M); case pn_Mod_res: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); + return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_mod_res); case pn_Mod_X_except: set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc); default: break; } @@ -4366,16 +4628,16 @@ static ir_node *gen_Proj_DivMod(ir_node *node) case iro_DivMod: switch (proj) { case pn_DivMod_M: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M); case pn_DivMod_res_div: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); + return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_div_res); case pn_DivMod_res_mod: - return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); + return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_mod_res); case pn_DivMod_X_regular: - return new_rd_Jmp(dbgi, irg, block); + return new_rd_Jmp(dbgi, block); case pn_DivMod_X_except: set_ia32_exc_label(new_pred, 1); - return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc); default: break; } @@ -4392,19 +4654,17 @@ static ir_node *gen_Proj_DivMod(ir_node *node) */ static ir_node *gen_Proj_CopyB(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); - ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); switch (proj) { - case pn_CopyB_M_regular: + case pn_CopyB_M: if (is_ia32_CopyB_i(new_pred)) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_i_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_i_M); } else if (is_ia32_CopyB(new_pred)) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_M); } break; default: @@ -4419,26 +4679,24 @@ static ir_node *gen_Proj_CopyB(ir_node *node) */ static ir_node *gen_Proj_Quot(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); - ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); switch (proj) { case pn_Quot_M: if (is_ia32_xDiv(new_pred)) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xDiv_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xDiv_M); } else if (is_ia32_vfdiv(new_pred)) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M); + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfdiv_M); } break; case pn_Quot_res: if (is_ia32_xDiv(new_pred)) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xDiv_res); + return new_rd_Proj(dbgi, new_pred, mode_xmm, pn_ia32_xDiv_res); } else if (is_ia32_vfdiv(new_pred)) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfdiv_res); + return new_rd_Proj(dbgi, new_pred, mode_vfp, pn_ia32_vfdiv_res); } break; case pn_Quot_X_regular: @@ -4453,7 +4711,6 @@ static ir_node *gen_Proj_Quot(ir_node *node) static ir_node *gen_be_Call(ir_node *node) { dbg_info *const dbgi = get_irn_dbg_info(node); - ir_graph *const irg = current_ir_graph; ir_node *const src_block = get_nodes_block(node); ir_node *const block = be_transform_node(src_block); ir_node *const src_mem = get_irn_n(node, be_pos_Call_mem); @@ -4471,6 +4728,7 @@ static ir_node *gen_be_Call(ir_node *node) ir_node * edx = noreg_GP; unsigned const pop = be_Call_get_pop(node); ir_type *const call_tp = be_Call_get_type(node); + int old_no_pic_adjust; /* Run the x87 simulator if the call returns a float value */ if (get_method_n_ress(call_tp) > 0) { @@ -4478,16 +4736,24 @@ static ir_node *gen_be_Call(ir_node *node) ir_mode *const res_mode = get_type_mode(res_type); if (res_mode != NULL && mode_is_float(res_mode)) { - env_cg->do_x87_sim = 1; + ir_graph *irg = current_ir_graph; + ia32_irg_data_t *irg_data = ia32_get_irg_data(irg); + irg_data->do_x87_sim = 1; } } /* We do not want be_Call direct calls */ assert(be_Call_get_entity(node) == NULL); + /* special case for PIC trampoline calls */ + old_no_pic_adjust = ia32_no_pic_adjust; + ia32_no_pic_adjust = be_get_irg_options(current_ir_graph)->pic; + match_arguments(&am, src_block, NULL, src_ptr, src_mem, match_am | match_immediate); + ia32_no_pic_adjust = old_no_pic_adjust; + i = get_irn_arity(node) - 1; fpcw = be_transform_node(get_irn_n(node, i--)); for (; i >= be_pos_Call_first_arg; --i) { @@ -4498,14 +4764,14 @@ static ir_node *gen_be_Call(ir_node *node) assert(req->cls == &ia32_reg_classes[CLASS_ia32_gp]); switch (*req->limited) { - case 1 << REG_EAX: assert(eax == noreg_GP); eax = reg_parm; break; - case 1 << REG_ECX: assert(ecx == noreg_GP); ecx = reg_parm; break; - case 1 << REG_EDX: assert(edx == noreg_GP); edx = reg_parm; break; + case 1 << REG_GP_EAX: assert(eax == noreg_GP); eax = reg_parm; break; + case 1 << REG_GP_ECX: assert(ecx == noreg_GP); ecx = reg_parm; break; + case 1 << REG_GP_EDX: assert(edx == noreg_GP); edx = reg_parm; break; default: panic("Invalid GP register for register parameter"); } } - mem = transform_AM_mem(irg, block, src_ptr, src_mem, addr->mem); + mem = transform_AM_mem(block, src_ptr, src_mem, addr->mem); call = new_bd_ia32_Call(dbgi, block, addr->base, addr->index, mem, am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp); set_am_attributes(call, &am); @@ -4528,7 +4794,8 @@ static ir_node *gen_be_Call(ir_node *node) /** * Transform Builtin trap */ -static ir_node *gen_trap(ir_node *node) { +static ir_node *gen_trap(ir_node *node) +{ dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *mem = be_transform_node(get_Builtin_mem(node)); @@ -4539,7 +4806,8 @@ static ir_node *gen_trap(ir_node *node) { /** * Transform Builtin debugbreak */ -static ir_node *gen_debugbreak(ir_node *node) { +static ir_node *gen_debugbreak(ir_node *node) +{ dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *mem = be_transform_node(get_Builtin_mem(node)); @@ -4550,11 +4818,12 @@ static ir_node *gen_debugbreak(ir_node *node) { /** * Transform Builtin return_address */ -static ir_node *gen_return_address(ir_node *node) { - ir_node *param = get_Builtin_param(node, 0); - ir_node *frame = get_Builtin_param(node, 1); - dbg_info *dbgi = get_irn_dbg_info(node); - tarval *tv = get_Const_tarval(param); +static ir_node *gen_return_address(ir_node *node) +{ + ir_node *param = get_Builtin_param(node, 0); + ir_node *frame = get_Builtin_param(node, 1); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_tarval *tv = get_Const_tarval(param); unsigned long value = get_tarval_long(tv); ir_node *block = be_transform_node(get_nodes_block(node)); @@ -4579,24 +4848,25 @@ static ir_node *gen_return_address(ir_node *node) { set_ia32_frame_ent(load, ia32_get_return_address_entity()); if (get_irn_pinned(node) == op_pin_state_floats) { - assert(pn_ia32_xLoad_res == pn_ia32_vfld_res - && pn_ia32_vfld_res == pn_ia32_Load_res - && pn_ia32_Load_res == pn_ia32_res); + assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res + && (int)pn_ia32_vfld_res == (int)pn_ia32_Load_res + && (int)pn_ia32_Load_res == (int)pn_ia32_res); arch_irn_add_flags(load, arch_irn_flags_rematerializable); } SET_IA32_ORIG_NODE(load, node); - return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res); + return new_r_Proj(load, mode_Iu, pn_ia32_Load_res); } /** * Transform Builtin frame_address */ -static ir_node *gen_frame_address(ir_node *node) { - ir_node *param = get_Builtin_param(node, 0); - ir_node *frame = get_Builtin_param(node, 1); - dbg_info *dbgi = get_irn_dbg_info(node); - tarval *tv = get_Const_tarval(param); +static ir_node *gen_frame_address(ir_node *node) +{ + ir_node *param = get_Builtin_param(node, 0); + ir_node *frame = get_Builtin_param(node, 1); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_tarval *tv = get_Const_tarval(param); unsigned long value = get_tarval_long(tv); ir_node *block = be_transform_node(get_nodes_block(node)); @@ -4628,25 +4898,26 @@ static ir_node *gen_frame_address(ir_node *node) { } if (get_irn_pinned(node) == op_pin_state_floats) { - assert(pn_ia32_xLoad_res == pn_ia32_vfld_res - && pn_ia32_vfld_res == pn_ia32_Load_res - && pn_ia32_Load_res == pn_ia32_res); + assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res + && (int)pn_ia32_vfld_res == (int)pn_ia32_Load_res + && (int)pn_ia32_Load_res == (int)pn_ia32_res); arch_irn_add_flags(load, arch_irn_flags_rematerializable); } SET_IA32_ORIG_NODE(load, node); - return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res); + return new_r_Proj(load, mode_Iu, pn_ia32_Load_res); } /** * Transform Builtin frame_address */ -static ir_node *gen_prefetch(ir_node *node) { +static ir_node *gen_prefetch(ir_node *node) +{ dbg_info *dbgi; ir_node *ptr, *block, *mem, *base, *index; ir_node *param, *new_node; long rw, locality; - tarval *tv; + ir_tarval *tv; ia32_address_t addr; if (!ia32_cg_config.use_sse_prefetch && !ia32_cg_config.use_3dnow_prefetch) { @@ -4661,7 +4932,7 @@ static ir_node *gen_prefetch(ir_node *node) { /* construct load address */ memset(&addr, 0, sizeof(addr)); ptr = get_Builtin_param(node, 0); - ia32_create_address_mode(&addr, ptr, 0); + ia32_create_address_mode(&addr, ptr, ia32_create_am_normal); base = addr.base; index = addr.index; @@ -4719,7 +4990,7 @@ static ir_node *gen_prefetch(ir_node *node) { SET_IA32_ORIG_NODE(new_node, node); be_dep_on_frame(new_node); - return new_r_Proj(current_ir_graph, block, new_node, mode_M, pn_ia32_Prefetch_M); + return new_r_Proj(new_node, mode_M, pn_ia32_Prefetch_M); } /** @@ -4756,18 +5027,18 @@ static ir_node *gen_ffs(ir_node *node) ir_node *real = skip_Proj(bsf); dbg_info *dbgi = get_irn_dbg_info(real); ir_node *block = get_nodes_block(real); - ir_node *flag, *set, *conv, *neg, *or; + ir_node *flag, *set, *conv, *neg, *orn; /* bsf x */ if (get_irn_mode(real) != mode_T) { set_irn_mode(real, mode_T); - bsf = new_r_Proj(current_ir_graph, block, real, mode_Iu, pn_ia32_res); + bsf = new_r_Proj(real, mode_Iu, pn_ia32_res); } - flag = new_r_Proj(current_ir_graph, block, real, mode_b, pn_ia32_flags); + flag = new_r_Proj(real, mode_b, pn_ia32_flags); /* sete */ - set = new_bd_ia32_Set(dbgi, block, flag, pn_Cmp_Eq, 0); + set = new_bd_ia32_Setcc(dbgi, block, flag, pn_Cmp_Eq); SET_IA32_ORIG_NODE(set, node); /* conv to 32bit */ @@ -4778,11 +5049,11 @@ static ir_node *gen_ffs(ir_node *node) neg = new_bd_ia32_Neg(dbgi, block, conv); /* or */ - or = new_bd_ia32_Or(dbgi, block, noreg_GP, noreg_GP, nomem, bsf, neg); - set_ia32_commutative(or); + orn = new_bd_ia32_Or(dbgi, block, noreg_GP, noreg_GP, nomem, bsf, neg); + set_ia32_commutative(orn); /* add 1 */ - return new_bd_ia32_Add(dbgi, block, noreg_GP, noreg_GP, nomem, or, create_Immediate(NULL, 0, 1)); + return new_bd_ia32_Add(dbgi, block, noreg_GP, noreg_GP, nomem, orn, ia32_create_Immediate(NULL, 0, 1)); } /** @@ -4794,7 +5065,7 @@ static ir_node *gen_clz(ir_node *node) ir_node *real = skip_Proj(bsr); dbg_info *dbgi = get_irn_dbg_info(real); ir_node *block = get_nodes_block(real); - ir_node *imm = create_Immediate(NULL, 0, 31); + ir_node *imm = ia32_create_Immediate(NULL, 0, 31); return new_bd_ia32_Xor(dbgi, block, noreg_GP, noreg_GP, nomem, bsr, imm); } @@ -4826,7 +5097,7 @@ static ir_node *gen_parity(ir_node *node) /* cmp param, 0 */ match_arguments(&am, block, NULL, param, NULL, match_am); - imm = create_Immediate(NULL, 0, 0); + imm = ia32_create_Immediate(NULL, 0, 0); cmp = new_bd_ia32_Cmp(dbgi, new_block, addr->base, addr->index, addr->mem, imm, am.new_op2, am.ins_permuted, 0); set_am_attributes(cmp, &am); @@ -4837,7 +5108,7 @@ static ir_node *gen_parity(ir_node *node) cmp = fix_mem_proj(cmp, &am); /* setp */ - new_node = new_bd_ia32_Set(dbgi, new_block, cmp, ia32_pn_Cmp_parity, 0); + new_node = new_bd_ia32_Setcc(dbgi, new_block, cmp, ia32_pn_Cmp_parity); SET_IA32_ORIG_NODE(new_node, node); /* conv to 32bit */ @@ -4850,7 +5121,8 @@ static ir_node *gen_parity(ir_node *node) /** * Transform builtin popcount */ -static ir_node *gen_popcount(ir_node *node) { +static ir_node *gen_popcount(ir_node *node) +{ ir_node *param = get_Builtin_param(node, 0); dbg_info *dbgi = get_irn_dbg_info(node); @@ -4881,11 +5153,11 @@ static ir_node *gen_popcount(ir_node *node) { /* do the standard popcount algo */ /* m1 = x & 0x55555555 */ - imm = create_Immediate(NULL, 0, 0x55555555); + imm = ia32_create_Immediate(NULL, 0, 0x55555555); m1 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_param, imm); /* s1 = x >> 1 */ - simm = create_Immediate(NULL, 0, 1); + simm = ia32_create_Immediate(NULL, 0, 1); s1 = new_bd_ia32_Shl(dbgi, new_block, new_param, simm); /* m2 = s1 & 0x55555555 */ @@ -4895,11 +5167,11 @@ static ir_node *gen_popcount(ir_node *node) { m3 = new_bd_ia32_Lea(dbgi, new_block, m2, m1); /* m4 = m3 & 0x33333333 */ - imm = create_Immediate(NULL, 0, 0x33333333); + imm = ia32_create_Immediate(NULL, 0, 0x33333333); m4 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m3, imm); /* s2 = m3 >> 2 */ - simm = create_Immediate(NULL, 0, 2); + simm = ia32_create_Immediate(NULL, 0, 2); s2 = new_bd_ia32_Shl(dbgi, new_block, m3, simm); /* m5 = s2 & 0x33333333 */ @@ -4909,11 +5181,11 @@ static ir_node *gen_popcount(ir_node *node) { m6 = new_bd_ia32_Lea(dbgi, new_block, m4, m5); /* m7 = m6 & 0x0F0F0F0F */ - imm = create_Immediate(NULL, 0, 0x0F0F0F0F); + imm = ia32_create_Immediate(NULL, 0, 0x0F0F0F0F); m7 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m6, imm); /* s3 = m6 >> 4 */ - simm = create_Immediate(NULL, 0, 4); + simm = ia32_create_Immediate(NULL, 0, 4); s3 = new_bd_ia32_Shl(dbgi, new_block, m6, simm); /* m8 = s3 & 0x0F0F0F0F */ @@ -4923,11 +5195,11 @@ static ir_node *gen_popcount(ir_node *node) { m9 = new_bd_ia32_Lea(dbgi, new_block, m7, m8); /* m10 = m9 & 0x00FF00FF */ - imm = create_Immediate(NULL, 0, 0x00FF00FF); + imm = ia32_create_Immediate(NULL, 0, 0x00FF00FF); m10 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m9, imm); /* s4 = m9 >> 8 */ - simm = create_Immediate(NULL, 0, 8); + simm = ia32_create_Immediate(NULL, 0, 8); s4 = new_bd_ia32_Shl(dbgi, new_block, m9, simm); /* m11 = s4 & 0x00FF00FF */ @@ -4937,11 +5209,11 @@ static ir_node *gen_popcount(ir_node *node) { m12 = new_bd_ia32_Lea(dbgi, new_block, m10, m11); /* m13 = m12 & 0x0000FFFF */ - imm = create_Immediate(NULL, 0, 0x0000FFFF); + imm = ia32_create_Immediate(NULL, 0, 0x0000FFFF); m13 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m12, imm); /* s5 = m12 >> 16 */ - simm = create_Immediate(NULL, 0, 16); + simm = ia32_create_Immediate(NULL, 0, 16); s5 = new_bd_ia32_Shl(dbgi, new_block, m12, simm); /* res = m13 + s5 */ @@ -4951,7 +5223,8 @@ static ir_node *gen_popcount(ir_node *node) { /** * Transform builtin byte swap. */ -static ir_node *gen_bswap(ir_node *node) { +static ir_node *gen_bswap(ir_node *node) +{ ir_node *param = be_transform_node(get_Builtin_param(node, 0)); dbg_info *dbgi = get_irn_dbg_info(node); @@ -4967,18 +5240,18 @@ static ir_node *gen_bswap(ir_node *node) { /* swap available */ return new_bd_ia32_Bswap(dbgi, new_block, param); } - s1 = new_bd_ia32_Shl(dbgi, new_block, param, create_Immediate(NULL, 0, 24)); - s2 = new_bd_ia32_Shl(dbgi, new_block, param, create_Immediate(NULL, 0, 8)); + s1 = new_bd_ia32_Shl(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 24)); + s2 = new_bd_ia32_Shl(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 8)); - m1 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s2, create_Immediate(NULL, 0, 0xFF00)); + m1 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s2, ia32_create_Immediate(NULL, 0, 0xFF00)); m2 = new_bd_ia32_Lea(dbgi, new_block, s1, m1); - s3 = new_bd_ia32_Shr(dbgi, new_block, param, create_Immediate(NULL, 0, 8)); + s3 = new_bd_ia32_Shr(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 8)); - m3 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s3, create_Immediate(NULL, 0, 0xFF0000)); + m3 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s3, ia32_create_Immediate(NULL, 0, 0xFF0000)); m4 = new_bd_ia32_Lea(dbgi, new_block, m2, m3); - s4 = new_bd_ia32_Shr(dbgi, new_block, param, create_Immediate(NULL, 0, 24)); + s4 = new_bd_ia32_Shr(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 24)); return new_bd_ia32_Lea(dbgi, new_block, m4, s4); case 16: @@ -4993,7 +5266,8 @@ static ir_node *gen_bswap(ir_node *node) { /** * Transform builtin outport. */ -static ir_node *gen_outport(ir_node *node) { +static ir_node *gen_outport(ir_node *node) +{ ir_node *port = create_immediate_or_transform(get_Builtin_param(node, 0), 0); ir_node *oldv = get_Builtin_param(node, 1); ir_mode *mode = get_irn_mode(oldv); @@ -5010,7 +5284,8 @@ static ir_node *gen_outport(ir_node *node) { /** * Transform builtin inport. */ -static ir_node *gen_inport(ir_node *node) { +static ir_node *gen_inport(ir_node *node) +{ ir_type *tp = get_Builtin_type(node); ir_type *rstp = get_method_res_type(tp, 0); ir_mode *mode = get_type_mode(rstp); @@ -5026,10 +5301,101 @@ static ir_node *gen_inport(ir_node *node) { return res; } +/** + * Transform a builtin inner trampoline + */ +static ir_node *gen_inner_trampoline(ir_node *node) +{ + ir_node *ptr = get_Builtin_param(node, 0); + ir_node *callee = get_Builtin_param(node, 1); + ir_node *env = be_transform_node(get_Builtin_param(node, 2)); + ir_node *mem = get_Builtin_mem(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *val; + ir_node *store; + ir_node *rel; + ir_node *trampoline; + ir_node *in[2]; + dbg_info *dbgi = get_irn_dbg_info(node); + ia32_address_t addr; + + /* construct store address */ + memset(&addr, 0, sizeof(addr)); + ia32_create_address_mode(&addr, ptr, ia32_create_am_normal); + + if (addr.base == NULL) { + addr.base = noreg_GP; + } else { + addr.base = be_transform_node(addr.base); + } + + if (addr.index == NULL) { + addr.index = noreg_GP; + } else { + addr.index = be_transform_node(addr.index); + } + addr.mem = be_transform_node(mem); + + /* mov ecx, */ + val = ia32_create_Immediate(NULL, 0, 0xB9); + store = new_bd_ia32_Store8Bit(dbgi, new_block, addr.base, + addr.index, addr.mem, val); + set_irn_pinned(store, get_irn_pinned(node)); + set_ia32_op_type(store, ia32_AddrModeD); + set_ia32_ls_mode(store, mode_Bu); + set_address(store, &addr); + addr.mem = store; + addr.offset += 1; + + store = new_bd_ia32_Store(dbgi, new_block, addr.base, + addr.index, addr.mem, env); + set_irn_pinned(store, get_irn_pinned(node)); + set_ia32_op_type(store, ia32_AddrModeD); + set_ia32_ls_mode(store, mode_Iu); + set_address(store, &addr); + addr.mem = store; + addr.offset += 4; + + /* jmp rel */ + val = ia32_create_Immediate(NULL, 0, 0xE9); + store = new_bd_ia32_Store8Bit(dbgi, new_block, addr.base, + addr.index, addr.mem, val); + set_irn_pinned(store, get_irn_pinned(node)); + set_ia32_op_type(store, ia32_AddrModeD); + set_ia32_ls_mode(store, mode_Bu); + set_address(store, &addr); + addr.mem = store; + addr.offset += 1; + + trampoline = be_transform_node(ptr); + + /* the callee is typically an immediate */ + if (is_SymConst(callee)) { + rel = new_bd_ia32_Const(dbgi, new_block, get_SymConst_entity(callee), 0, 0, -10); + } else { + rel = new_bd_ia32_Lea(dbgi, new_block, be_transform_node(callee), ia32_create_Immediate(NULL, 0, -10)); + } + rel = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, nomem, rel, trampoline); + + store = new_bd_ia32_Store(dbgi, new_block, addr.base, + addr.index, addr.mem, rel); + set_irn_pinned(store, get_irn_pinned(node)); + set_ia32_op_type(store, ia32_AddrModeD); + set_ia32_ls_mode(store, mode_Iu); + set_address(store, &addr); + + in[0] = store; + in[1] = trampoline; + + return new_r_Tuple(new_block, 2, in); +} + /** * Transform Builtin node. */ -static ir_node *gen_Builtin(ir_node *node) { +static ir_node *gen_Builtin(ir_node *node) +{ ir_builtin_kind kind = get_Builtin_kind(node); switch (kind) { @@ -5039,7 +5405,7 @@ static ir_node *gen_Builtin(ir_node *node) { return gen_debugbreak(node); case ir_bk_return_address: return gen_return_address(node); - case ir_bk_frame_addess: + case ir_bk_frame_address: return gen_frame_address(node); case ir_bk_prefetch: return gen_prefetch(node); @@ -5059,6 +5425,8 @@ static ir_node *gen_Builtin(ir_node *node) { return gen_outport(node); case ir_bk_inport: return gen_inport(node); + case ir_bk_inner_trampoline: + return gen_inner_trampoline(node); } panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind)); } @@ -5066,14 +5434,15 @@ static ir_node *gen_Builtin(ir_node *node) { /** * Transform Proj(Builtin) node. */ -static ir_node *gen_Proj_Builtin(ir_node *proj) { +static ir_node *gen_Proj_Builtin(ir_node *proj) +{ ir_node *node = get_Proj_pred(proj); ir_node *new_node = be_transform_node(node); ir_builtin_kind kind = get_Builtin_kind(node); switch (kind) { case ir_bk_return_address: - case ir_bk_frame_addess: + case ir_bk_frame_address: case ir_bk_ffs: case ir_bk_clz: case ir_bk_ctz: @@ -5090,12 +5459,17 @@ static ir_node *gen_Proj_Builtin(ir_node *proj) { return new_node; case ir_bk_inport: if (get_Proj_proj(proj) == pn_Builtin_1_result) { - return new_r_Proj(current_ir_graph, get_nodes_block(new_node), - new_node, get_irn_mode(proj), pn_ia32_Inport_res); + return new_r_Proj(new_node, get_irn_mode(proj), pn_ia32_Inport_res); } else { assert(get_Proj_proj(proj) == pn_Builtin_M); - return new_r_Proj(current_ir_graph, get_nodes_block(new_node), - new_node, mode_M, pn_ia32_Inport_M); + return new_r_Proj(new_node, mode_M, pn_ia32_Inport_M); + } + case ir_bk_inner_trampoline: + if (get_Proj_proj(proj) == pn_Builtin_1_result) { + return get_Tuple_pred(new_node, 1); + } else { + assert(get_Proj_proj(proj) == pn_Builtin_M); + return get_Tuple_pred(new_node, 0); } } panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind)); @@ -5114,17 +5488,15 @@ static ir_node *gen_be_IncSP(ir_node *node) */ static ir_node *gen_Proj_be_Call(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *call = get_Proj_pred(node); ir_node *new_call = be_transform_node(call); - ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); ir_mode *mode = get_irn_mode(node); ir_node *res; if (proj == pn_be_Call_M_regular) { - return new_rd_Proj(dbgi, irg, block, new_call, mode_M, n_ia32_Call_mem); + return new_rd_Proj(dbgi, new_call, mode_M, n_ia32_Call_mem); } /* transform call modes */ if (mode_is_data(mode)) { @@ -5146,7 +5518,8 @@ static ir_node *gen_Proj_be_Call(ir_node *node) assert(req->type & arch_register_req_type_limited); for (i = 0; i < n_outs; ++i) { - arch_register_req_t const *const new_req = get_ia32_out_req(new_call, i); + arch_register_req_t const *const new_req + = arch_get_out_register_req(new_call, i); if (!(new_req->type & arch_register_req_type_limited) || new_req->cls != req->cls || @@ -5159,16 +5532,16 @@ static ir_node *gen_Proj_be_Call(ir_node *node) assert(i < n_outs); } - res = new_rd_Proj(dbgi, irg, block, new_call, mode, proj); + res = new_rd_Proj(dbgi, new_call, mode, proj); /* TODO arch_set_irn_register() only operates on Projs, need variant with index */ switch (proj) { case pn_ia32_Call_stack: - arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]); + arch_set_irn_register(res, &ia32_registers[REG_ESP]); break; case pn_ia32_Call_fpcw: - arch_set_irn_register(res, &ia32_fp_cw_regs[REG_FPCW]); + arch_set_irn_register(res, &ia32_registers[REG_FPCW]); break; } @@ -5190,7 +5563,7 @@ static ir_node *gen_Proj_Cmp(ir_node *node) */ static ir_node *gen_Proj_Bound(ir_node *node) { - ir_node *new_node, *block; + ir_node *new_node; ir_node *pred = get_Proj_pred(node); switch (get_Proj_proj(node)) { @@ -5198,12 +5571,10 @@ static ir_node *gen_Proj_Bound(ir_node *node) return be_transform_node(get_Bound_mem(pred)); case pn_Bound_X_regular: new_node = be_transform_node(pred); - block = get_nodes_block(new_node); - return new_r_Proj(current_ir_graph, block, new_node, mode_X, pn_ia32_Jcc_true); + return new_r_Proj(new_node, mode_X, pn_ia32_Jcc_true); case pn_Bound_X_except: new_node = be_transform_node(pred); - block = get_nodes_block(new_node); - return new_r_Proj(current_ir_graph, block, new_node, mode_X, pn_ia32_Jcc_false); + return new_r_Proj(new_node, mode_X, pn_ia32_Jcc_false); case pn_Bound_res: return be_transform_node(get_Bound_index(pred)); default: @@ -5216,11 +5587,10 @@ static ir_node *gen_Proj_ASM(ir_node *node) ir_mode *mode = get_irn_mode(node); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); - ir_node *block = get_nodes_block(new_pred); long pos = get_Proj_proj(node); if (mode == mode_M) { - pos = arch_irn_get_n_outs(new_pred) + 1; + pos = arch_irn_get_n_outs(new_pred)-1; } else if (mode_is_int(mode) || mode_is_reference(mode)) { mode = mode_Iu; } else if (mode_is_float(mode)) { @@ -5229,7 +5599,7 @@ static ir_node *gen_Proj_ASM(ir_node *node) panic("unexpected proj mode at ASM"); } - return new_r_Proj(current_ir_graph, block, new_pred, mode, pos); + return new_r_Proj(new_pred, mode, pos); } /** @@ -5280,13 +5650,13 @@ static ir_node *gen_Proj(ir_node *node) ir_node *new_block = be_transform_node(block); dbg_info *dbgi = get_irn_dbg_info(node); /* we exchange the ProjX with a jump */ - ir_node *jump = new_rd_Jmp(dbgi, current_ir_graph, new_block); + ir_node *jump = new_rd_Jmp(dbgi, new_block); return jump; } case pn_Start_P_tls: - return gen_Proj_tls(node); + return ia32_gen_Proj_tls(node); } break; @@ -5301,12 +5671,9 @@ static ir_node *gen_Proj(ir_node *node) ir_mode *mode = get_irn_mode(node); if (ia32_mode_needs_gp_reg(mode)) { ir_node *new_pred = be_transform_node(pred); - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *new_proj = new_r_Proj(current_ir_graph, block, new_pred, - mode_Iu, get_Proj_proj(node)); -#ifdef DEBUG_libfirm + ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, + get_Proj_proj(node)); new_proj->node_nr = node->node_nr; -#endif return new_proj; } } @@ -5320,96 +5687,70 @@ static ir_node *gen_Proj(ir_node *node) static void register_transformers(void) { /* first clear the generic function pointer for all ops */ - clear_irp_opcodes_generic_func(); - -#define GEN(a) { be_transform_func *func = gen_##a; op_##a->ops.generic = (op_func) func; } -#define BAD(a) op_##a->ops.generic = (op_func)bad_transform - - GEN(Add); - GEN(Sub); - GEN(Mul); - GEN(Mulh); - GEN(And); - GEN(Or); - GEN(Eor); - - GEN(Shl); - GEN(Shr); - GEN(Shrs); - GEN(Rotl); - - GEN(Quot); - - GEN(Div); - GEN(Mod); - GEN(DivMod); - - GEN(Minus); - GEN(Conv); - GEN(Abs); - GEN(Not); - - GEN(Load); - GEN(Store); - GEN(Cond); - - GEN(Cmp); - GEN(ASM); - GEN(CopyB); - GEN(Mux); - GEN(Proj); - GEN(Phi); - GEN(IJmp); - GEN(Bound); - - /* transform ops from intrinsic lowering */ - GEN(ia32_l_Add); - GEN(ia32_l_Adc); - GEN(ia32_l_Mul); - GEN(ia32_l_IMul); - GEN(ia32_l_ShlDep); - GEN(ia32_l_ShrDep); - GEN(ia32_l_SarDep); - GEN(ia32_l_ShlD); - GEN(ia32_l_ShrD); - GEN(ia32_l_Sub); - GEN(ia32_l_Sbb); - GEN(ia32_l_LLtoFloat); - GEN(ia32_l_FloattoLL); - - GEN(Const); - GEN(SymConst); - GEN(Unknown); - - /* we should never see these nodes */ - BAD(Raise); - BAD(Sel); - BAD(InstOf); - BAD(Cast); - BAD(Free); - BAD(Tuple); - BAD(Id); - //BAD(Bad); - BAD(Confirm); - BAD(Filter); - BAD(CallBegin); - BAD(EndReg); - BAD(EndExcept); - - /* handle builtins */ - GEN(Builtin); - - /* handle generic backend nodes */ - GEN(be_FrameAddr); - GEN(be_Call); - GEN(be_IncSP); - GEN(be_Return); - GEN(be_AddSP); - GEN(be_SubSP); - GEN(be_Copy); - -#undef GEN -#undef BAD + be_start_transform_setup(); + + be_set_transform_function(op_Add, gen_Add); + be_set_transform_function(op_And, gen_And); + be_set_transform_function(op_ASM, ia32_gen_ASM); + be_set_transform_function(op_be_AddSP, gen_be_AddSP); + be_set_transform_function(op_be_Call, gen_be_Call); + be_set_transform_function(op_be_Copy, gen_be_Copy); + be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr); + be_set_transform_function(op_be_IncSP, gen_be_IncSP); + be_set_transform_function(op_be_Return, gen_be_Return); + be_set_transform_function(op_be_SubSP, gen_be_SubSP); + be_set_transform_function(op_Bound, gen_Bound); + be_set_transform_function(op_Builtin, gen_Builtin); + be_set_transform_function(op_Cmp, gen_Cmp); + be_set_transform_function(op_Cond, gen_Cond); + be_set_transform_function(op_Const, gen_Const); + be_set_transform_function(op_Conv, gen_Conv); + be_set_transform_function(op_CopyB, ia32_gen_CopyB); + be_set_transform_function(op_Div, gen_Div); + be_set_transform_function(op_DivMod, gen_DivMod); + be_set_transform_function(op_Eor, gen_Eor); + be_set_transform_function(op_ia32_l_Adc, gen_ia32_l_Adc); + be_set_transform_function(op_ia32_l_Add, gen_ia32_l_Add); + be_set_transform_function(op_ia32_Leave, be_duplicate_node); + be_set_transform_function(op_ia32_l_FloattoLL, gen_ia32_l_FloattoLL); + be_set_transform_function(op_ia32_l_IMul, gen_ia32_l_IMul); + be_set_transform_function(op_ia32_l_LLtoFloat, gen_ia32_l_LLtoFloat); + be_set_transform_function(op_ia32_l_Mul, gen_ia32_l_Mul); + be_set_transform_function(op_ia32_l_SarDep, gen_ia32_l_SarDep); + be_set_transform_function(op_ia32_l_Sbb, gen_ia32_l_Sbb); + be_set_transform_function(op_ia32_l_ShlDep, gen_ia32_l_ShlDep); + be_set_transform_function(op_ia32_l_ShlD, gen_ia32_l_ShlD); + be_set_transform_function(op_ia32_l_ShrDep, gen_ia32_l_ShrDep); + be_set_transform_function(op_ia32_l_ShrD, gen_ia32_l_ShrD); + be_set_transform_function(op_ia32_l_Sub, gen_ia32_l_Sub); + be_set_transform_function(op_ia32_GetEIP, be_duplicate_node); + be_set_transform_function(op_ia32_Minus64Bit, be_duplicate_node); + be_set_transform_function(op_ia32_NoReg_GP, be_duplicate_node); + be_set_transform_function(op_ia32_NoReg_VFP, be_duplicate_node); + be_set_transform_function(op_ia32_NoReg_XMM, be_duplicate_node); + be_set_transform_function(op_ia32_PopEbp, be_duplicate_node); + be_set_transform_function(op_ia32_Push, be_duplicate_node); + be_set_transform_function(op_IJmp, gen_IJmp); + be_set_transform_function(op_Jmp, gen_Jmp); + be_set_transform_function(op_Load, gen_Load); + be_set_transform_function(op_Minus, gen_Minus); + be_set_transform_function(op_Mod, gen_Mod); + be_set_transform_function(op_Mul, gen_Mul); + be_set_transform_function(op_Mulh, gen_Mulh); + be_set_transform_function(op_Mux, gen_Mux); + be_set_transform_function(op_Not, gen_Not); + be_set_transform_function(op_Or, gen_Or); + be_set_transform_function(op_Phi, gen_Phi); + be_set_transform_function(op_Proj, gen_Proj); + be_set_transform_function(op_Quot, gen_Quot); + be_set_transform_function(op_Rotl, gen_Rotl); + be_set_transform_function(op_Shl, gen_Shl); + be_set_transform_function(op_Shr, gen_Shr); + be_set_transform_function(op_Shrs, gen_Shrs); + be_set_transform_function(op_Store, gen_Store); + be_set_transform_function(op_Sub, gen_Sub); + be_set_transform_function(op_SymConst, gen_SymConst); + be_set_transform_function(op_Unknown, ia32_gen_Unknown); } /** @@ -5417,113 +5758,26 @@ static void register_transformers(void) */ static void ia32_pretransform_node(void) { - ia32_code_gen_t *cg = env_cg; + ir_graph *irg = current_ir_graph; + ia32_irg_data_t *irg_data = ia32_get_irg_data(current_ir_graph); - cg->unknown_gp = be_pre_transform_node(cg->unknown_gp); - cg->unknown_vfp = be_pre_transform_node(cg->unknown_vfp); - cg->unknown_xmm = be_pre_transform_node(cg->unknown_xmm); - cg->noreg_gp = be_pre_transform_node(cg->noreg_gp); - cg->noreg_vfp = be_pre_transform_node(cg->noreg_vfp); - cg->noreg_xmm = be_pre_transform_node(cg->noreg_xmm); + irg_data->noreg_gp = be_pre_transform_node(irg_data->noreg_gp); + irg_data->noreg_vfp = be_pre_transform_node(irg_data->noreg_vfp); + irg_data->noreg_xmm = be_pre_transform_node(irg_data->noreg_xmm); - nomem = get_irg_no_mem(current_ir_graph); - noreg_GP = ia32_new_NoReg_gp(cg); + nomem = get_irg_no_mem(irg); + noreg_GP = ia32_new_NoReg_gp(irg); get_fpcw(); } -/** - * Walker, checks if all ia32 nodes producing more than one result have their - * Projs, otherwise creates new Projs and keeps them using a be_Keep node. - */ -static void add_missing_keep_walker(ir_node *node, void *data) -{ - int n_outs, i; - unsigned found_projs = 0; - const ir_edge_t *edge; - ir_mode *mode = get_irn_mode(node); - ir_node *last_keep; - (void) data; - if (mode != mode_T) - return; - if (!is_ia32_irn(node)) - return; - - n_outs = arch_irn_get_n_outs(node); - if (n_outs <= 0) - return; - if (is_ia32_SwitchJmp(node)) - return; - - assert(n_outs < (int) sizeof(unsigned) * 8); - foreach_out_edge(node, edge) { - ir_node *proj = get_edge_src_irn(edge); - int pn; - - /* The node could be kept */ - if (is_End(proj)) - continue; - - if (get_irn_mode(proj) == mode_M) - continue; - - pn = get_Proj_proj(proj); - assert(pn < n_outs); - found_projs |= 1 << pn; - } - - - /* are keeps missing? */ - last_keep = NULL; - for (i = 0; i < n_outs; ++i) { - ir_node *block; - ir_node *in[1]; - const arch_register_req_t *req; - const arch_register_class_t *cls; - - if (found_projs & (1 << i)) { - continue; - } - - req = get_ia32_out_req(node, i); - cls = req->cls; - if (cls == NULL) { - continue; - } - if (cls == &ia32_reg_classes[CLASS_ia32_flags]) { - continue; - } - - block = get_nodes_block(node); - in[0] = new_r_Proj(current_ir_graph, block, node, - arch_register_class_mode(cls), i); - if (last_keep != NULL) { - be_Keep_add_node(last_keep, cls, in[0]); - } else { - last_keep = be_new_Keep(cls, current_ir_graph, block, 1, in); - if (sched_is_scheduled(node)) { - sched_add_after(node, last_keep); - } - } - } -} - -/** - * Adds missing keeps to nodes. Adds missing Proj nodes for unused outputs - * and keeps them. - */ -void ia32_add_missing_keeps(ia32_code_gen_t *cg) -{ - ir_graph *irg = be_get_birg_irg(cg->birg); - irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL); -} - /** * Post-process all calls if we are in SSE mode. * The ABI requires that the results are in st0, copy them * to a xmm register. */ -static void postprocess_fp_call_results(void) { +static void postprocess_fp_call_results(void) +{ int i; for (i = ARR_LEN(call_list) - 1; i >= 0; --i) { @@ -5583,7 +5837,7 @@ static void postprocess_fp_call_results(void) { ir_node *block = get_nodes_block(call); ir_node *frame = get_irg_frame(current_ir_graph); ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M); - ir_node *call_mem = new_r_Proj(current_ir_graph, block, call, mode_M, pn_ia32_Call_M); + ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M); ir_node *vfst, *xld, *new_mem; /* store st(0) on stack */ @@ -5596,8 +5850,8 @@ static void postprocess_fp_call_results(void) { set_ia32_op_type(xld, ia32_AddrModeS); set_ia32_use_frame(xld); - new_res = new_r_Proj(current_ir_graph, block, xld, mode, pn_ia32_xLoad_res); - new_mem = new_r_Proj(current_ir_graph, block, xld, mode_M, pn_ia32_xLoad_M); + new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res); + new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M); if (old_mem != NULL) { edges_reroute(old_mem, new_mem, current_ir_graph); @@ -5612,18 +5866,18 @@ static void postprocess_fp_call_results(void) { } /* do the transformation */ -void ia32_transform_graph(ia32_code_gen_t *cg) +void ia32_transform_graph(ir_graph *irg) { int cse_last; register_transformers(); - env_cg = cg; - initial_fpcw = NULL; + initial_fpcw = NULL; + ia32_no_pic_adjust = 0; - BE_TIMER_PUSH(t_heights); - heights = heights_new(cg->irg); - BE_TIMER_POP(t_heights); - ia32_calculate_non_address_mode_nodes(cg->birg); + be_timer_push(T_HEIGHTS); + ia32_heights = heights_new(irg); + be_timer_pop(T_HEIGHTS); + ia32_calculate_non_address_mode_nodes(irg); /* the transform phase is not safe for CSE (yet) because several nodes get * attributes set after their creation */ @@ -5632,7 +5886,7 @@ void ia32_transform_graph(ia32_code_gen_t *cg) call_list = NEW_ARR_F(ir_node *, 0); call_types = NEW_ARR_F(ir_type *, 0); - be_transform_graph(cg->birg, ia32_pretransform_node); + be_transform_graph(irg, ia32_pretransform_node); if (ia32_cg_config.use_sse2) postprocess_fp_call_results(); @@ -5642,8 +5896,8 @@ void ia32_transform_graph(ia32_code_gen_t *cg) set_opt_cse(cse_last); ia32_free_non_address_mode_nodes(); - heights_free(heights); - heights = NULL; + heights_free(ia32_heights); + ia32_heights = NULL; } void ia32_init_transform(void)