X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;ds=sidebyside;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=c48353a5eecf876319a6b59192ef1d32333447f0;hb=7946c76e39669957af8cf9c5a520116eb77b9cb5;hp=ea53837636d74728d78e7528aa0138113bfdcd28;hpb=75e3b5fe17402ca27fc671dd404ff958664506b1;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index ea5383763..c48353a5e 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -463,15 +463,18 @@ const char *ia32_get_old_node_name(ia32_code_gen_t *cg, ir_node *irn) { } #endif /* NDEBUG */ -int use_source_address_mode(ir_node *block, ir_node *node, ir_node *other) +int ia32_use_source_address_mode(ir_node *block, ir_node *node, ir_node *other) { ir_mode *mode = get_irn_mode(node); ir_node *load; long pn; /* float constants are always available */ - if(is_Const(node) && mode_is_float(mode) - && !is_simple_x87_Const(node) && get_irn_n_edges(node) == 1) { + if(is_Const(node) && mode_is_float(mode)) { + if(!is_simple_x87_Const(node)) + return 0; + if(get_irn_n_edges(node) > 1) + return 0; return 1; } @@ -486,8 +489,10 @@ int use_source_address_mode(ir_node *block, ir_node *node, ir_node *other) /* we only use address mode if we're the only user of the load */ if(get_irn_n_edges(node) > 1) return 0; - - if(other != NULL && get_Load_mode(load) != get_irn_mode(other)) + /* in some edge cases with address mode we might reach the load normally + * and through some AM sequence, if it is already materialized then we + * can't create an AM node from it */ + if(be_is_transformed(node)) return 0; /* don't do AM if other node inputs depend on the load (via mem-proj) */ @@ -507,10 +512,23 @@ struct ia32_address_mode_t { ir_node *new_op1; ir_node *new_op2; op_pin_state pinned; - int commutative; - int ins_permuted; + unsigned commutative : 1; + unsigned ins_permuted : 1; }; +static void build_address_ptr(ia32_address_t *addr, ir_node *ptr, ir_node *mem) +{ + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + + /* construct load address */ + memset(addr, 0, sizeof(addr[0])); + ia32_create_address_mode(addr, ptr, /*force=*/0); + + addr->base = addr->base ? be_transform_node(addr->base) : noreg_gp; + addr->index = addr->index ? be_transform_node(addr->index) : noreg_gp; + addr->mem = be_transform_node(mem); +} + static void build_address(ia32_address_mode_t *am, ir_node *node) { ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); @@ -519,8 +537,6 @@ static void build_address(ia32_address_mode_t *am, ir_node *node) ir_node *ptr; ir_node *mem; ir_node *new_mem; - ir_node *base; - ir_node *index; if(is_Const(node)) { ir_entity *entity = create_float_const_entity(node); @@ -543,28 +559,14 @@ static void build_address(ia32_address_mode_t *am, ir_node *node) am->mem_proj = be_get_Proj_for_pn(load, pn_Load_M); /* construct load address */ - ia32_create_address_mode(addr, ptr, 0); - base = addr->base; - index = addr->index; + ia32_create_address_mode(addr, ptr, /*force=*/0); - if(base == NULL) { - base = noreg_gp; - } else { - base = be_transform_node(base); - } - - if(index == NULL) { - index = noreg_gp; - } else { - index = be_transform_node(index); - } - - addr->base = base; - addr->index = index; + addr->base = addr->base ? be_transform_node(addr->base) : noreg_gp; + addr->index = addr->index ? be_transform_node(addr->index) : noreg_gp; addr->mem = new_mem; } -static void set_address(ir_node *node, ia32_address_t *addr) +static void set_address(ir_node *node, const ia32_address_t *addr) { set_ia32_am_scale(node, addr->scale); set_ia32_am_sc(node, addr->symconst_ent); @@ -576,7 +578,7 @@ static void set_address(ir_node *node, ia32_address_t *addr) set_ia32_frame_ent(node, addr->frame_entity); } -static void set_am_attributes(ir_node *node, ia32_address_mode_t *am) +static void set_am_attributes(ir_node *node, const ia32_address_mode_t *am) { set_address(node, &am->addr); @@ -589,6 +591,14 @@ static void set_am_attributes(ir_node *node, ia32_address_mode_t *am) set_ia32_commutative(node); } +/** + * Check, if a given node is a Down-Conv, ie. a integer Conv + * from a mode with a mode with more bits to a mode with lesser bits. + * Moreover, we return only true if the node has not more than 1 user. + * + * @param node the node + * @return non-zero if node is a Down-Conv + */ static int is_downconv(const ir_node *node) { ir_mode *src_mode; @@ -610,16 +620,33 @@ static int is_downconv(const ir_node *node) && get_mode_size_bits(dest_mode) < get_mode_size_bits(src_mode); } -typedef enum { - match_commutative = 1 << 0, - match_am_and_immediates = 1 << 1, - match_no_am = 1 << 2, - match_8_bit_am = 1 << 3, - match_16_bit_am = 1 << 4, - match_no_immediate = 1 << 5, - match_force_32bit_op = 1 << 6, - match_skip_input_conv = 1 << 7 -} match_flags_t; +/* Skip all Down-Conv's on a given node and return the resulting node. */ +ir_node *ia32_skip_downconv(ir_node *node) { + while (is_downconv(node)) + node = get_Conv_op(node); + + return node; +} + +#if 0 +static ir_node *create_upconv(ir_node *node, ir_node *orig_node) +{ + ir_mode *mode = get_irn_mode(node); + ir_node *block; + ir_mode *tgt_mode; + dbg_info *dbgi; + + if(mode_is_signed(mode)) { + tgt_mode = mode_Is; + } else { + tgt_mode = mode_Iu; + } + block = get_nodes_block(node); + dbgi = get_irn_dbg_info(node); + + return create_I2I_Conv(mode, tgt_mode, dbgi, block, node, orig_node); +} +#endif static void match_arguments(ia32_address_mode_t *am, ir_node *block, ir_node *op1, ir_node *op2, match_flags_t flags) @@ -630,40 +657,49 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, ir_node *new_op2; ir_mode *mode = get_irn_mode(op2); int use_am; - int commutative; + unsigned commutative; int use_am_and_immediates; int use_immediate; - int skip_input_conv; int mode_bits = get_mode_size_bits(mode); memset(am, 0, sizeof(am[0])); commutative = (flags & match_commutative) != 0; use_am_and_immediates = (flags & match_am_and_immediates) != 0; - use_am = ! (flags & match_no_am); - use_immediate = !(flags & match_no_immediate); - skip_input_conv = (flags & match_skip_input_conv) != 0; + use_am = (flags & match_am) != 0; + use_immediate = (flags & match_immediate) != 0; + assert(!use_am_and_immediates || use_immediate); assert(op2 != NULL); assert(!commutative || op1 != NULL); - - if(mode_bits == 8 && !(flags & match_8_bit_am)) { - use_am = 0; - } else if(mode_bits == 16 && !(flags & match_16_bit_am)) { - use_am = 0; + assert(use_am || !(flags & match_8bit_am)); + assert(use_am || !(flags & match_16bit_am)); + + if(mode_bits == 8) { + if (! (flags & match_8bit_am)) + use_am = 0; + assert((flags & match_mode_neutral) || (flags & match_8bit)); + } else if(mode_bits == 16) { + if(! (flags & match_16bit_am)) + use_am = 0; + assert((flags & match_mode_neutral) || (flags & match_16bit)); } - while(is_downconv(op2)) { - op2 = get_Conv_op(op2); - } - if(op1 != NULL) { - while(is_downconv(op1)) { - op1 = get_Conv_op(op1); + /* we can simply skip downconvs for mode neutral nodes: the upper bits + * can be random for these operations */ + if(flags & match_mode_neutral) { + op2 = ia32_skip_downconv(op2); + if(op1 != NULL) { + op1 = ia32_skip_downconv(op1); } } - new_op2 = (use_immediate ? try_create_Immediate(op2, 0) : NULL); - if(new_op2 == NULL && use_am && use_source_address_mode(block, op2, op1)) { + if(! (flags & match_try_am) && use_immediate) + new_op2 = try_create_Immediate(op2, 0); + else + new_op2 = NULL; + + if(new_op2 == NULL && use_am && ia32_use_source_address_mode(block, op2, op1)) { build_address(am, op2); new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); if(mode_is_float(mode)) { @@ -673,7 +709,7 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, } am->op_type = ia32_AddrModeS; } else if(commutative && (new_op2 == NULL || use_am_and_immediates) && - use_am && use_source_address_mode(block, op1, op2)) { + use_am && ia32_use_source_address_mode(block, op1, op2)) { ir_node *noreg; build_address(am, op1); @@ -692,15 +728,20 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, } am->op_type = ia32_AddrModeS; } else { + if(flags & match_try_am) { + am->new_op1 = NULL; + am->new_op2 = NULL; + am->op_type = ia32_Normal; + return; + } + new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); if(new_op2 == NULL) new_op2 = be_transform_node(op2); am->op_type = ia32_Normal; - if(flags & match_force_32bit_op) { + am->ls_mode = get_irn_mode(op2); + if(flags & match_mode_neutral) am->ls_mode = mode_Iu; - } else { - am->ls_mode = get_irn_mode(op2); - } } if(addr->base == NULL) addr->base = noreg_gp; @@ -757,8 +798,6 @@ static ir_node *gen_binop(ir_node *node, ir_node *op1, ir_node *op2, ia32_address_mode_t am; ia32_address_t *addr = &am.addr; - flags |= match_force_32bit_op; - match_arguments(&am, block, op1, op2, flags); new_node = func(dbgi, irg, new_block, addr->base, addr->index, addr->mem, @@ -824,39 +863,6 @@ static ir_node *gen_binop_flags(ir_node *node, construct_binop_flags_func *func, return new_node; } -/** - * Construct a standard binary operation, set AM and immediate if required. - * - * @param op1 The first operand - * @param op2 The second operand - * @param func The node constructor function - * @return The constructed ia32 node. - */ -static ir_node *gen_binop_sse_float(ir_node *node, ir_node *op1, ir_node *op2, - construct_binop_func *func, - match_flags_t flags) -{ - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_graph *irg = current_ir_graph; - ir_node *new_node; - ia32_address_mode_t am; - ia32_address_t *addr = &am.addr; - - match_arguments(&am, block, op1, op2, flags); - - new_node = func(dbgi, irg, new_block, addr->base, addr->index, addr->mem, - am.new_op1, am.new_op2); - set_am_attributes(new_node, &am); - - SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - - new_node = fix_mem_proj(new_node, &am); - - return new_node; -} - static ir_node *get_fpcw(void) { ir_node *fpcw; @@ -886,10 +892,14 @@ static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); + ir_mode *mode = get_irn_mode(node); ir_node *new_node; ia32_address_mode_t am; ia32_address_t *addr = &am.addr; + /* cannot use addresmode with long double on x87 */ + if (get_mode_size_bits(mode) > 64) flags &= ~match_am; + match_arguments(&am, block, op1, op2, flags); new_node = func(dbgi, irg, new_block, addr->base, addr->index, addr->mem, @@ -912,18 +922,34 @@ static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, * @return The constructed ia32 node. */ static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2, - construct_shift_func *func) + construct_shift_func *func, + match_flags_t flags) { dbg_info *dbgi = get_irn_dbg_info(node); ir_graph *irg = current_ir_graph; ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); - ir_node *new_op1 = be_transform_node(op1); - ir_node *new_op2 = create_immediate_or_transform(op2, 0); + ir_mode *mode = get_irn_mode(node); + ir_node *new_op1; + ir_node *new_op2; ir_node *new_node; - assert(! mode_is_float(get_irn_mode(node)) - && "Shift/Rotate with float not supported"); + assert(! mode_is_float(mode)); + assert(flags & match_immediate); + assert((flags & ~(match_mode_neutral | match_immediate)) == 0); + + if(flags & match_mode_neutral) { + op1 = ia32_skip_downconv(op1); + } + new_op1 = be_transform_node(op1); + + /* the shift amount can be any mode that is bigger than 5 bits, since all + * other bits are ignored anyway */ + while (is_Conv(op2) && get_irn_n_edges(op2) == 1) { + op2 = get_Conv_op(op2); + assert(get_mode_size_bits(get_irn_mode(op2)) >= 5); + } + new_op2 = create_immediate_or_transform(op2, 0); new_node = func(dbgi, irg, new_block, new_op1, new_op2); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); @@ -946,15 +972,23 @@ static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2, * @param func The node constructor function * @return The constructed ia32 node. */ -static ir_node *gen_unop(ir_node *node, ir_node *op, construct_unop_func *func) +static ir_node *gen_unop(ir_node *node, ir_node *op, construct_unop_func *func, + match_flags_t flags) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *new_op = be_transform_node(op); - ir_node *new_node = NULL; - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *new_op; + ir_node *new_node; - new_node = func(dbgi, irg, block, new_op); + assert(flags == 0 || flags == match_mode_neutral); + if(flags & match_mode_neutral) { + op = ia32_skip_downconv(op); + } + + new_op = be_transform_node(op); + new_node = func(dbgi, irg, new_block, new_op); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); @@ -1013,17 +1047,17 @@ static ir_node *gen_Add(ir_node *node) { if (mode_is_float(mode)) { if (USE_SSE2(env_cg)) - return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xAdd, match_commutative); + return gen_binop(node, op1, op2, new_rd_ia32_xAdd, + match_commutative | match_am); else - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfadd, match_commutative); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfadd, + match_commutative | match_am); } - while(is_downconv(op2)) { - op2 = get_Conv_op(op2); - } - while(is_downconv(op1)) { - op1 = get_Conv_op(op1); - } + ia32_mark_non_am(node); + + op2 = ia32_skip_downconv(op2); + op1 = ia32_skip_downconv(op1); /** * Rules for an Add: @@ -1033,7 +1067,7 @@ static ir_node *gen_Add(ir_node *node) { * 3. Otherwise -> Lea */ memset(&addr, 0, sizeof(addr)); - ia32_create_address_mode(&addr, node, 1); + ia32_create_address_mode(&addr, node, /*force=*/1); add_immediate_op = NULL; /* a constant? */ if(addr.base == NULL && addr.index == NULL) { @@ -1065,8 +1099,8 @@ static ir_node *gen_Add(ir_node *node) { } /* test if we can use source address mode */ - match_arguments(&am, block, op1, op2, - match_commutative | match_force_32bit_op | match_skip_input_conv); + match_arguments(&am, block, op1, op2, match_commutative + | match_mode_neutral | match_am | match_immediate | match_try_am); /* construct an Add with source address mode */ if (am.op_type == ia32_AddrModeS) { @@ -1100,9 +1134,11 @@ static ir_node *gen_Mul(ir_node *node) { if (mode_is_float(mode)) { if (USE_SSE2(env_cg)) - return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xMul, match_commutative); + return gen_binop(node, op1, op2, new_rd_ia32_xMul, + match_commutative | match_am); else - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfmul, match_commutative); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfmul, + match_commutative | match_am); } /* @@ -1111,7 +1147,8 @@ static ir_node *gen_Mul(ir_node *node) { constraints */ return gen_binop(node, op1, op2, new_rd_ia32_IMul, - match_commutative | match_skip_input_conv | match_force_32bit_op); + match_commutative | match_am | match_mode_neutral | + match_immediate | match_am_and_immediates); } /** @@ -1132,15 +1169,13 @@ static ir_node *gen_Mulh(ir_node *node) ir_node *op2 = get_Mulh_right(node); ir_node *proj_EDX; ir_node *new_node; - match_flags_t flags; ia32_address_mode_t am; ia32_address_t *addr = &am.addr; - flags = match_force_32bit_op | match_commutative | match_no_immediate; - assert(!mode_is_float(mode) && "Mulh with float not supported"); + assert(get_mode_size_bits(mode) == 32); - match_arguments(&am, block, op1, op2, flags); + match_arguments(&am, block, op1, op2, match_commutative | match_am); if (mode_is_signed(mode)) { new_node = new_rd_ia32_IMul1OP(dbgi, irg, new_block, addr->base, @@ -1205,7 +1240,8 @@ static ir_node *gen_And(ir_node *node) { } return gen_binop(node, op1, op2, new_rd_ia32_And, - match_commutative | match_force_32bit_op | match_skip_input_conv); + match_commutative | match_mode_neutral | match_am + | match_immediate); } @@ -1220,8 +1256,8 @@ static ir_node *gen_Or(ir_node *node) { ir_node *op2 = get_Or_right(node); assert (! mode_is_float(get_irn_mode(node))); - return gen_binop(node, op1, op2, new_rd_ia32_Or, - match_commutative | match_skip_input_conv | match_force_32bit_op); + return gen_binop(node, op1, op2, new_rd_ia32_Or, match_commutative + | match_mode_neutral | match_am | match_immediate); } @@ -1236,8 +1272,8 @@ static ir_node *gen_Eor(ir_node *node) { ir_node *op2 = get_Eor_right(node); assert(! mode_is_float(get_irn_mode(node))); - return gen_binop(node, op1, op2, new_rd_ia32_Xor, - match_commutative | match_skip_input_conv | match_force_32bit_op); + return gen_binop(node, op1, op2, new_rd_ia32_Xor, match_commutative + | match_mode_neutral | match_am | match_immediate); } @@ -1253,9 +1289,10 @@ static ir_node *gen_Sub(ir_node *node) { if (mode_is_float(mode)) { if (USE_SSE2(env_cg)) - return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xSub, 0); + return gen_binop(node, op1, op2, new_rd_ia32_xSub, match_am); else - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfsub, 0); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfsub, + match_am); } if(is_Const(op2)) { @@ -1263,120 +1300,113 @@ static ir_node *gen_Sub(ir_node *node) { node); } - return gen_binop(node, op1, op2, new_rd_ia32_Sub, - match_force_32bit_op | match_skip_input_conv); + return gen_binop(node, op1, op2, new_rd_ia32_Sub, match_mode_neutral + | match_am | match_immediate); } -typedef enum { flavour_Div = 1, flavour_Mod, flavour_DivMod } ia32_op_flavour_t; - /** * Generates an ia32 DivMod with additional infrastructure for the * register allocator if needed. - * - * @param dividend -no comment- :) - * @param divisor -no comment- :) - * @param dm_flav flavour_Div/Mod/DivMod - * @return The created ia32 DivMod node */ -static ir_node *generate_DivMod(ir_node *node, ir_node *dividend, - ir_node *divisor, ia32_op_flavour_t dm_flav) +static ir_node *create_Div(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *new_dividend = be_transform_node(dividend); - ir_node *new_divisor = be_transform_node(divisor); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *res, *proj_div, *proj_mod; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *mem; + ir_node *new_mem; + ir_node *op1; + ir_node *op2; + ir_node *new_node; ir_mode *mode; ir_node *sign_extension; - ir_node *mem, *new_mem; int has_exc; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; /* the upper bits have random contents for smaller modes */ - - proj_div = proj_mod = NULL; - has_exc = 0; - switch (dm_flav) { - case flavour_Div: - mem = get_Div_mem(node); - mode = get_Div_resmode(node); - proj_div = be_get_Proj_for_pn(node, pn_Div_res); - has_exc = be_get_Proj_for_pn(node, pn_Div_X_except) != NULL; - break; - case flavour_Mod: - mem = get_Mod_mem(node); - mode = get_Mod_resmode(node); - proj_mod = be_get_Proj_for_pn(node, pn_Mod_res); - has_exc = be_get_Proj_for_pn(node, pn_Mod_X_except) != NULL; - break; - case flavour_DivMod: - mem = get_DivMod_mem(node); - mode = get_DivMod_resmode(node); - proj_div = be_get_Proj_for_pn(node, pn_DivMod_res_div); - proj_mod = be_get_Proj_for_pn(node, pn_DivMod_res_mod); - has_exc = be_get_Proj_for_pn(node, pn_DivMod_X_except) != NULL; - break; - default: - panic("invalid divmod flavour!"); + has_exc = 0; + switch (get_irn_opcode(node)) { + case iro_Div: + op1 = get_Div_left(node); + op2 = get_Div_right(node); + mem = get_Div_mem(node); + mode = get_Div_resmode(node); + has_exc = be_get_Proj_for_pn(node, pn_Div_X_except) != NULL; + break; + case iro_Mod: + op1 = get_Mod_left(node); + op2 = get_Mod_right(node); + mem = get_Mod_mem(node); + mode = get_Mod_resmode(node); + has_exc = be_get_Proj_for_pn(node, pn_Mod_X_except) != NULL; + break; + case iro_DivMod: + op1 = get_DivMod_left(node); + op2 = get_DivMod_right(node); + mem = get_DivMod_mem(node); + mode = get_DivMod_resmode(node); + has_exc = be_get_Proj_for_pn(node, pn_DivMod_X_except) != NULL; + break; + default: + panic("invalid divmod node %+F", node); } - new_mem = be_transform_node(mem); - assert(get_mode_size_bits(mode) == 32); + match_arguments(&am, block, op1, op2, match_am); - if (mode_is_signed(mode)) { - /* in signed mode, we need to sign extend the dividend */ - ir_node *produceval = new_rd_ia32_ProduceVal(dbgi, irg, block); - add_irn_dep(produceval, get_irg_frame(irg)); - sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_dividend, - produceval); + if(!is_NoMem(mem)) { + new_mem = be_transform_node(mem); + if(!is_NoMem(addr->mem)) { + ir_node *in[2]; + in[0] = new_mem; + in[1] = addr->mem; + new_mem = new_rd_Sync(dbgi, irg, new_block, 2, in); + } } else { - sign_extension = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, 0); - set_ia32_flags(sign_extension, get_ia32_flags(sign_extension) | arch_irn_flags_modify_flags); - add_irn_dep(sign_extension, get_irg_frame(irg)); + new_mem = addr->mem; } if (mode_is_signed(mode)) { - res = new_rd_ia32_IDiv(dbgi, irg, block, noreg, noreg, new_mem, - new_dividend, sign_extension, new_divisor); + ir_node *produceval = new_rd_ia32_ProduceVal(dbgi, irg, new_block); + add_irn_dep(produceval, get_irg_frame(irg)); + sign_extension = new_rd_ia32_Cltd(dbgi, irg, new_block, am.new_op1, + produceval); + + new_node = new_rd_ia32_IDiv(dbgi, irg, new_block, addr->base, + addr->index, new_mem, am.new_op1, + sign_extension, am.new_op2); } else { - res = new_rd_ia32_Div(dbgi, irg, block, noreg, noreg, new_mem, - new_dividend, sign_extension, new_divisor); + sign_extension = new_rd_ia32_Const(dbgi, irg, new_block, NULL, 0, 0); + add_irn_dep(sign_extension, get_irg_frame(irg)); + + new_node = new_rd_ia32_Div(dbgi, irg, new_block, addr->base, + addr->index, new_mem, am.new_op1, + sign_extension, am.new_op2); } - set_ia32_exc_label(res, has_exc); - set_irn_pinned(res, get_irn_pinned(node)); + set_ia32_exc_label(new_node, has_exc); + set_irn_pinned(new_node, get_irn_pinned(node)); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + set_am_attributes(new_node, &am); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return res; + new_node = fix_mem_proj(new_node, &am); + + return new_node; } -/** - * Wrapper for generate_DivMod. Sets flavour_Mod. - * - */ static ir_node *gen_Mod(ir_node *node) { - return generate_DivMod(node, get_Mod_left(node), - get_Mod_right(node), flavour_Mod); + return create_Div(node); } -/** - * Wrapper for generate_DivMod. Sets flavour_Div. - * - */ static ir_node *gen_Div(ir_node *node) { - return generate_DivMod(node, get_Div_left(node), - get_Div_right(node), flavour_Div); + return create_Div(node); } -/** - * Wrapper for generate_DivMod. Sets flavour_DivMod. - */ static ir_node *gen_DivMod(ir_node *node) { - return generate_DivMod(node, get_DivMod_left(node), - get_DivMod_right(node), flavour_DivMod); + return create_Div(node); } @@ -1392,9 +1422,9 @@ static ir_node *gen_Quot(ir_node *node) ir_node *op2 = get_Quot_right(node); if (USE_SSE2(env_cg)) { - return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xDiv, 0); + return gen_binop(node, op1, op2, new_rd_ia32_xDiv, match_am); } else { - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfdiv, 0); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfdiv, match_am); } } @@ -1408,24 +1438,20 @@ static ir_node *gen_Shl(ir_node *node) { ir_node *left = get_Shl_left(node); ir_node *right = get_Shl_right(node); - while(is_downconv(left)) { - left = get_Conv_op(left); - } - - return gen_shift_binop(node, left, right, new_rd_ia32_Shl); + return gen_shift_binop(node, left, right, new_rd_ia32_Shl, + match_mode_neutral | match_immediate); } - - /** * Creates an ia32 Shr. * * @return The created ia32 Shr node */ static ir_node *gen_Shr(ir_node *node) { - assert(get_mode_size_bits(get_irn_mode(node)) == 32); - return gen_shift_binop(node, get_Shr_left(node), - get_Shr_right(node), new_rd_ia32_Shr); + ir_node *left = get_Shr_left(node); + ir_node *right = get_Shr_right(node); + + return gen_shift_binop(node, left, right, new_rd_ia32_Shr, match_immediate); } @@ -1440,8 +1466,6 @@ static ir_node *gen_Shrs(ir_node *node) { ir_node *right = get_Shrs_right(node); ir_mode *mode = get_irn_mode(node); - assert(get_mode_size_bits(mode) == 32); - if(is_Const(right) && mode == mode_Is) { tarval *tv = get_Const_tarval(right); long val = get_tarval_long(tv); @@ -1489,7 +1513,7 @@ static ir_node *gen_Shrs(ir_node *node) { } } - return gen_shift_binop(node, left, right, new_rd_ia32_Sar); + return gen_shift_binop(node, left, right, new_rd_ia32_Sar, match_immediate); } @@ -1501,10 +1525,8 @@ static ir_node *gen_Shrs(ir_node *node) { * @param op2 The second operator * @return The created ia32 RotL node */ -static ir_node *gen_RotL(ir_node *node, - ir_node *op1, ir_node *op2) { - assert(get_mode_size_bits(get_irn_mode(node)) == 32); - return gen_shift_binop(node, op1, op2, new_rd_ia32_Rol); +static ir_node *gen_RotL(ir_node *node, ir_node *op1, ir_node *op2) { + return gen_shift_binop(node, op1, op2, new_rd_ia32_Rol, match_immediate); } @@ -1518,10 +1540,8 @@ static ir_node *gen_RotL(ir_node *node, * @param op2 The second operator * @return The created ia32 RotR node */ -static ir_node *gen_RotR(ir_node *node, ir_node *op1, - ir_node *op2) { - assert(get_mode_size_bits(get_irn_mode(node)) == 32); - return gen_shift_binop(node, op1, op2, new_rd_ia32_Ror); +static ir_node *gen_RotR(ir_node *node, ir_node *op1, ir_node *op2) { + return gen_shift_binop(node, op1, op2, new_rd_ia32_Ror, match_immediate); } @@ -1582,38 +1602,38 @@ static ir_node *gen_Minus(ir_node *node) dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_irn_mode(node); ir_entity *ent; - ir_node *res; - int size; + ir_node *new_node; + int size; if (mode_is_float(mode)) { ir_node *new_op = be_transform_node(op); if (USE_SSE2(env_cg)) { + /* TODO: non-optimal... if we have many xXors, then we should + * rather create a load for the const and use that instead of + * several AM nodes... */ ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); ir_node *noreg_xmm = ia32_new_NoReg_xmm(env_cg); ir_node *nomem = new_rd_NoMem(irg); - res = new_rd_ia32_xXor(dbgi, irg, block, noreg_gp, noreg_gp, nomem, - new_op, noreg_xmm); + new_node = new_rd_ia32_xXor(dbgi, irg, block, noreg_gp, noreg_gp, + nomem, new_op, noreg_xmm); size = get_mode_size_bits(mode); ent = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); - set_ia32_am_sc(res, ent); - set_ia32_op_type(res, ia32_AddrModeS); - set_ia32_ls_mode(res, mode); + set_ia32_am_sc(new_node, ent); + set_ia32_op_type(new_node, ia32_AddrModeS); + set_ia32_ls_mode(new_node, mode); } else { - res = new_rd_ia32_vfchs(dbgi, irg, block, new_op); + new_node = new_rd_ia32_vfchs(dbgi, irg, block, new_op); } } else { - while(is_downconv(op)) { - op = get_Conv_op(op); - } - res = gen_unop(node, op, new_rd_ia32_Neg); + new_node = gen_unop(node, op, new_rd_ia32_Neg, match_mode_neutral); } - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return res; + return new_node; } /** @@ -1627,11 +1647,7 @@ static ir_node *gen_Not(ir_node *node) { assert(get_irn_mode(node) != mode_b); /* should be lowered already */ assert (! mode_is_float(get_irn_mode(node))); - while(is_downconv(node)) { - node = get_Conv_op(node); - } - - return gen_unop(node, op, new_rd_ia32_Not); + return gen_unop(node, op, new_rd_ia32_Not, match_mode_neutral); } @@ -1643,56 +1659,97 @@ static ir_node *gen_Not(ir_node *node) { */ static ir_node *gen_Abs(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op = get_Abs_op(node); - ir_node *new_op = be_transform_node(op); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); - ir_node *noreg_fp = ia32_new_NoReg_fp(env_cg); - ir_node *nomem = new_NoMem(); - ir_node *res; - int size; + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *op = get_Abs_op(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_node *noreg_fp = ia32_new_NoReg_fp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *new_op; + ir_node *new_node; + int size; ir_entity *ent; if (mode_is_float(mode)) { + new_op = be_transform_node(op); + if (USE_SSE2(env_cg)) { - res = new_rd_ia32_xAnd(dbgi,irg, block, noreg_gp, noreg_gp, nomem, new_op, noreg_fp); + new_node = new_rd_ia32_xAnd(dbgi,irg, new_block, noreg_gp, noreg_gp, + nomem, new_op, noreg_fp); size = get_mode_size_bits(mode); ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); - set_ia32_am_sc(res, ent); + set_ia32_am_sc(new_node, ent); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - set_ia32_op_type(res, ia32_AddrModeS); - set_ia32_ls_mode(res, mode); + set_ia32_op_type(new_node, ia32_AddrModeS); + set_ia32_ls_mode(new_node, mode); } else { - res = new_rd_ia32_vfabs(dbgi, irg, block, new_op); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + new_node = new_rd_ia32_vfabs(dbgi, irg, new_block, new_op); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); } } else { + if (get_mode_size_bits(mode) == 32) { + new_op = be_transform_node(op); + } else { + new_op = create_I2I_Conv(mode, mode_Is, dbgi, block, op, node); + } + ir_node *xor; - ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block); - ir_node *sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_op, - pval); + ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, new_block); + ir_node *sign_extension = new_rd_ia32_Cltd(dbgi, irg, new_block, + new_op, pval); add_irn_dep(pval, get_irg_frame(irg)); - SET_IA32_ORIG_NODE(sign_extension, - ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(sign_extension,ia32_get_old_node_name(env_cg, node)); - xor = new_rd_ia32_Xor(dbgi, irg, block, noreg_gp, noreg_gp, nomem, new_op, - sign_extension); + xor = new_rd_ia32_Xor(dbgi, irg, new_block, noreg_gp, noreg_gp, + nomem, new_op, sign_extension); SET_IA32_ORIG_NODE(xor, ia32_get_old_node_name(env_cg, node)); - res = new_rd_ia32_Sub(dbgi, irg, block, noreg_gp, noreg_gp, nomem, xor, - sign_extension); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + new_node = new_rd_ia32_Sub(dbgi, irg, new_block, noreg_gp, noreg_gp, + nomem, xor, sign_extension); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); } - return res; + return new_node; +} + +static ir_node *get_flags_node(ir_node *node, pn_Cmp *pnc_out) +{ + ir_graph *irg = current_ir_graph; + ir_node *flags; + ir_node *new_op; + ir_node *noreg; + ir_node *nomem; + ir_node *new_block; + dbg_info *dbgi; + + /* we have a Cmp as input */ + if(is_Proj(node)) { + ir_node *pred = get_Proj_pred(node); + if(is_Cmp(pred)) { + flags = be_transform_node(pred); + *pnc_out = get_Proj_proj(node); + return flags; + } + } + + /* a mode_b value, we have to compare it against 0 */ + dbgi = get_irn_dbg_info(node); + new_block = be_transform_node(get_nodes_block(node)); + new_op = be_transform_node(node); + noreg = ia32_new_NoReg_gp(env_cg); + nomem = new_NoMem(); + flags = new_rd_ia32_Test(dbgi, irg, new_block, noreg, noreg, nomem, + new_op, new_op, 0, 0); + *pnc_out = pn_Cmp_Lg; + return flags; } /** @@ -1713,12 +1770,12 @@ static ir_node *gen_Load(ir_node *node) { ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_mode *mode = get_Load_mode(node); ir_mode *res_mode; - ir_node *new_op; + ir_node *new_node; ia32_address_t addr; /* construct load address */ memset(&addr, 0, sizeof(addr)); - ia32_create_address_mode(&addr, ptr, 0); + ia32_create_address_mode(&addr, ptr, /*force=*/0); base = addr.base; index = addr.index; @@ -1736,44 +1793,48 @@ static ir_node *gen_Load(ir_node *node) { if (mode_is_float(mode)) { if (USE_SSE2(env_cg)) { - new_op = new_rd_ia32_xLoad(dbgi, irg, block, base, index, new_mem, - mode); + new_node = new_rd_ia32_xLoad(dbgi, irg, block, base, index, new_mem, + mode); res_mode = mode_xmm; } else { - new_op = new_rd_ia32_vfld(dbgi, irg, block, base, index, new_mem, + new_node = new_rd_ia32_vfld(dbgi, irg, block, base, index, new_mem, mode); res_mode = mode_vfp; } } else { - if(mode == mode_b) - mode = mode_Iu; + assert(mode != mode_b); /* create a conv node with address mode for smaller modes */ if(get_mode_size_bits(mode) < 32) { - new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, base, index, - new_mem, noreg, mode); + new_node = new_rd_ia32_Conv_I2I(dbgi, irg, block, base, index, + new_mem, noreg, mode); } else { - new_op = new_rd_ia32_Load(dbgi, irg, block, base, index, new_mem); + new_node = new_rd_ia32_Load(dbgi, irg, block, base, index, new_mem); } res_mode = mode_Iu; } - set_irn_pinned(new_op, get_irn_pinned(node)); - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_ls_mode(new_op, mode); - set_address(new_op, &addr); + set_irn_pinned(new_node, get_irn_pinned(node)); + set_ia32_op_type(new_node, ia32_AddrModeS); + set_ia32_ls_mode(new_node, mode); + set_address(new_node, &addr); + + if(get_irn_pinned(node) == op_pin_state_floats) { + add_ia32_flags(new_node, arch_irn_flags_rematerializable); + } /* make sure we are scheduled behind the initial IncSP/Barrier * to avoid spills being placed before it */ if (block == get_irg_start_block(irg)) { - add_irn_dep(new_op, get_irg_frame(irg)); + add_irn_dep(new_node, get_irg_frame(irg)); } - set_ia32_exc_label(new_op, be_get_Proj_for_pn(node, pn_Load_X_except) != NULL); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + set_ia32_exc_label(new_node, + be_get_Proj_for_pn(node, pn_Load_X_except) != NULL); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return new_op; + return new_node; } static int use_dest_am(ir_node *block, ir_node *node, ir_node *mem, @@ -1813,19 +1874,24 @@ static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, ir_node *mem, ir_node *ptr, ir_mode *mode, construct_binop_dest_func *func, construct_binop_dest_func *func8bit, - int commutative) + match_flags_t flags) { - ir_node *src_block = get_nodes_block(node); - ir_node *block; - ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_node *src_block = get_nodes_block(node); + ir_node *block; + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); ir_graph *irg = current_ir_graph; dbg_info *dbgi; - ir_node *new_node; - ir_node *new_op; + ir_node *new_node; + ir_node *new_op; + int commutative; ia32_address_mode_t am; - ia32_address_t *addr = &am.addr; + ia32_address_t *addr = &am.addr; memset(&am, 0, sizeof(am)); + assert(flags & match_dest_am); + assert(flags & match_immediate); /* there is no destam node without... */ + commutative = (flags & match_commutative) != 0; + if(use_dest_am(src_block, op1, mem, ptr, op2)) { build_address(&am, op1); new_op = create_immediate_or_transform(op2, 0); @@ -1843,8 +1909,8 @@ static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, if(addr->mem == NULL) addr->mem = new_NoMem(); - dbgi = get_irn_dbg_info(node); - block = be_transform_node(src_block); + dbgi = get_irn_dbg_info(node); + block = be_transform_node(src_block); if(get_mode_size_bits(mode) == 8) { new_node = func8bit(dbgi, irg, block, addr->base, addr->index, addr->mem, new_op); @@ -1864,10 +1930,9 @@ static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, ir_node *ptr, ir_mode *mode, construct_unop_dest_func *func) { + ir_graph *irg = current_ir_graph; ir_node *src_block = get_nodes_block(node); ir_node *block; - ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); - ir_graph *irg = current_ir_graph; dbg_info *dbgi; ir_node *new_node; ia32_address_mode_t am; @@ -1879,13 +1944,6 @@ static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, build_address(&am, op); - if(addr->base == NULL) - addr->base = noreg_gp; - if(addr->index == NULL) - addr->index = noreg_gp; - if(addr->mem == NULL) - addr->mem = new_NoMem(); - dbgi = get_irn_dbg_info(node); block = be_transform_node(src_block); new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem); @@ -1897,11 +1955,58 @@ static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, return new_node; } +static ir_node *try_create_SetMem(ir_node *node, ir_node *ptr, ir_node *mem) { + ir_mode *mode = get_irn_mode(node); + ir_node *psi_true = get_Psi_val(node, 0); + ir_node *psi_default = get_Psi_default(node); + ir_graph *irg; + ir_node *cond; + ir_node *new_mem; + dbg_info *dbgi; + ir_node *block; + ir_node *new_block; + ir_node *flags; + ir_node *new_node; + int negated; + pn_Cmp pnc; + ia32_address_t addr; + + if(get_mode_size_bits(mode) != 8) + return NULL; + + if(is_Const_1(psi_true) && is_Const_0(psi_default)) { + negated = 0; + } else if(is_Const_0(psi_true) && is_Const_1(psi_default)) { + negated = 1; + } else { + return NULL; + } + + build_address_ptr(&addr, ptr, mem); + + irg = current_ir_graph; + dbgi = get_irn_dbg_info(node); + block = get_nodes_block(node); + new_block = be_transform_node(block); + cond = get_Psi_cond(node, 0); + flags = get_flags_node(cond, &pnc); + new_mem = be_transform_node(mem); + new_node = new_rd_ia32_SetMem(dbgi, irg, new_block, addr.base, + addr.index, addr.mem, flags, pnc, negated); + set_address(new_node, &addr); + set_ia32_op_type(new_node, ia32_AddrModeD); + set_ia32_ls_mode(new_node, mode); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + + return new_node; +} + static ir_node *try_create_dest_am(ir_node *node) { - ir_node *val = get_Store_value(node); - ir_node *mem = get_Store_mem(node); - ir_node *ptr = get_Store_ptr(node); - ir_mode *mode = get_irn_mode(val); + ir_node *val = get_Store_value(node); + ir_node *mem = get_Store_mem(node); + ir_node *ptr = get_Store_ptr(node); + ir_mode *mode = get_irn_mode(val); + int bits = get_mode_size_bits(mode); ir_node *op1; ir_node *op2; ir_node *new_node; @@ -1910,8 +2015,24 @@ static ir_node *try_create_dest_am(ir_node *node) { if(!mode_needs_gp_reg(mode)) return NULL; - /* store must be the only user of the val node */ - if(get_irn_n_edges(val) > 1) + while(1) { + /* store must be the only user of the val node */ + if(get_irn_n_edges(val) > 1) + return NULL; + /* skip pointless convs */ + if(is_Conv(val)) { + ir_node *conv_op = get_Conv_op(val); + ir_mode *pred_mode = get_irn_mode(conv_op); + if(pred_mode == mode_b || bits <= get_mode_size_bits(pred_mode)) { + val = conv_op; + continue; + } + } + break; + } + + /* value must be in the same block */ + if(get_nodes_block(node) != get_nodes_block(val)) return NULL; switch(get_irn_opcode(val)) { @@ -1928,7 +2049,9 @@ static ir_node *try_create_dest_am(ir_node *node) { break; } new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_AddMem, new_rd_ia32_AddMem8Bit, 1); + new_rd_ia32_AddMem, new_rd_ia32_AddMem8Bit, + match_dest_am | match_commutative | + match_immediate); break; case iro_Sub: op1 = get_Sub_left(val); @@ -1938,51 +2061,66 @@ static ir_node *try_create_dest_am(ir_node *node) { "found\n"); } new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_SubMem, new_rd_ia32_SubMem8Bit, 0); + new_rd_ia32_SubMem, new_rd_ia32_SubMem8Bit, + match_dest_am | match_immediate | + match_immediate); break; case iro_And: op1 = get_And_left(val); op2 = get_And_right(val); new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_AndMem, new_rd_ia32_AndMem8Bit, 1); + new_rd_ia32_AndMem, new_rd_ia32_AndMem8Bit, + match_dest_am | match_commutative | + match_immediate); break; case iro_Or: op1 = get_Or_left(val); op2 = get_Or_right(val); new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_OrMem, new_rd_ia32_OrMem8Bit, 1); + new_rd_ia32_OrMem, new_rd_ia32_OrMem8Bit, + match_dest_am | match_commutative | + match_immediate); break; case iro_Eor: op1 = get_Eor_left(val); op2 = get_Eor_right(val); new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_XorMem, new_rd_ia32_XorMem8Bit, 1); + new_rd_ia32_XorMem, new_rd_ia32_XorMem8Bit, + match_dest_am | match_commutative | + match_immediate); break; case iro_Shl: op1 = get_Shl_left(val); op2 = get_Shl_right(val); new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_ShlMem, new_rd_ia32_ShlMem, 0); + new_rd_ia32_ShlMem, new_rd_ia32_ShlMem, + match_dest_am | match_immediate); break; case iro_Shr: op1 = get_Shr_left(val); op2 = get_Shr_right(val); new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_ShrMem, new_rd_ia32_ShrMem, 0); + new_rd_ia32_ShrMem, new_rd_ia32_ShrMem, + match_dest_am | match_immediate); break; case iro_Shrs: op1 = get_Shrs_left(val); op2 = get_Shrs_right(val); new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_SarMem, new_rd_ia32_SarMem, 0); + new_rd_ia32_SarMem, new_rd_ia32_SarMem, + match_dest_am | match_immediate); break; case iro_Rot: op1 = get_Rot_left(val); op2 = get_Rot_right(val); new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, - new_rd_ia32_RolMem, new_rd_ia32_RolMem, 0); + new_rd_ia32_RolMem, new_rd_ia32_RolMem, + match_dest_am | match_immediate); break; /* TODO: match ROR patterns... */ + case iro_Psi: + new_node = try_create_SetMem(val, ptr, mem); + break; case iro_Minus: op1 = get_Minus_op(val); new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_NegMem); @@ -1997,52 +2135,77 @@ static ir_node *try_create_dest_am(ir_node *node) { return NULL; } + if(new_node != NULL) { + if(get_irn_pinned(new_node) != op_pin_state_pinned && + get_irn_pinned(node) == op_pin_state_pinned) { + set_irn_pinned(new_node, op_pin_state_pinned); + } + } + return new_node; } +static int is_float_to_int32_conv(const ir_node *node) +{ + ir_mode *mode = get_irn_mode(node); + ir_node *conv_op; + ir_mode *conv_mode; + + if(get_mode_size_bits(mode) != 32 || !mode_needs_gp_reg(mode)) + return 0; + + if(!is_Conv(node)) + return 0; + conv_op = get_Conv_op(node); + conv_mode = get_irn_mode(conv_op); + + if(!mode_is_float(conv_mode)) + return 0; + + return 1; +} + /** * Transforms a Store. * * @return the created ia32 Store node */ -static ir_node *gen_Store(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *ptr = get_Store_ptr(node); - ir_node *base; - ir_node *index; - ir_node *val = get_Store_value(node); +static ir_node *gen_Store(ir_node *node) +{ + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *ptr = get_Store_ptr(node); + ir_node *val = get_Store_value(node); + ir_node *mem = get_Store_mem(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_mode *mode = get_irn_mode(val); ir_node *new_val; - ir_node *mem = get_Store_mem(node); - ir_node *new_mem = be_transform_node(mem); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_mode *mode = get_irn_mode(val); - ir_node *new_op; + ir_node *new_node; ia32_address_t addr; /* check for destination address mode */ - new_op = try_create_dest_am(node); - if(new_op != NULL) - return new_op; + new_node = try_create_dest_am(node); + if(new_node != NULL) + return new_node; /* construct store address */ memset(&addr, 0, sizeof(addr)); - ia32_create_address_mode(&addr, ptr, 0); - base = addr.base; - index = addr.index; + ia32_create_address_mode(&addr, ptr, /*force=*/0); - if(base == NULL) { - base = noreg; + if(addr.base == NULL) { + addr.base = noreg; } else { - base = be_transform_node(base); + addr.base = be_transform_node(addr.base); } - if(index == NULL) { - index = noreg; + if(addr.index == NULL) { + addr.index = noreg; } else { - index = be_transform_node(index); + addr.index = be_transform_node(addr.index); } + addr.mem = be_transform_node(mem); if (mode_is_float(mode)) { /* convs (and strict-convs) before stores are unnecessary if the mode @@ -2052,46 +2215,59 @@ static ir_node *gen_Store(ir_node *node) { } new_val = be_transform_node(val); if (USE_SSE2(env_cg)) { - new_op = new_rd_ia32_xStore(dbgi, irg, block, base, index, new_mem, - new_val); + new_node = new_rd_ia32_xStore(dbgi, irg, new_block, addr.base, + addr.index, addr.mem, new_val); } else { - new_op = new_rd_ia32_vfst(dbgi, irg, block, base, index, new_mem, new_val, - mode); + new_node = new_rd_ia32_vfst(dbgi, irg, new_block, addr.base, + addr.index, addr.mem, new_val, mode); + } + } else if(is_float_to_int32_conv(val)) { + ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg); + val = get_Conv_op(val); + + /* convs (and strict-convs) before stores are unnecessary if the mode + is the same */ + while(is_Conv(val) && mode == get_irn_mode(get_Conv_op(val))) { + val = get_Conv_op(val); } + new_val = be_transform_node(val); + + new_node = new_rd_ia32_vfist(dbgi, irg, new_block, addr.base, + addr.index, addr.mem, new_val, trunc_mode); } else { new_val = create_immediate_or_transform(val, 0); - if(mode == mode_b) - mode = mode_Iu; + assert(mode != mode_b); if (get_mode_size_bits(mode) == 8) { - new_op = new_rd_ia32_Store8Bit(dbgi, irg, block, base, index, new_mem, - new_val); + new_node = new_rd_ia32_Store8Bit(dbgi, irg, new_block, addr.base, + addr.index, addr.mem, new_val); } else { - new_op = new_rd_ia32_Store(dbgi, irg, block, base, index, new_mem, - new_val); + new_node = new_rd_ia32_Store(dbgi, irg, new_block, addr.base, + addr.index, addr.mem, new_val); } } - set_irn_pinned(new_op, get_irn_pinned(node)); - set_ia32_op_type(new_op, ia32_AddrModeD); - set_ia32_ls_mode(new_op, mode); + set_irn_pinned(new_node, get_irn_pinned(node)); + set_ia32_op_type(new_node, ia32_AddrModeD); + set_ia32_ls_mode(new_node, mode); - set_ia32_exc_label(new_op, be_get_Proj_for_pn(node, pn_Store_X_except) != NULL); - set_address(new_op, &addr); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + set_ia32_exc_label(new_node, + be_get_Proj_for_pn(node, pn_Store_X_except) != NULL); + set_address(new_node, &addr); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return new_op; + return new_node; } static ir_node *create_Switch(ir_node *node) { - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *sel = get_Cond_selector(node); - ir_node *new_sel = be_transform_node(sel); - ir_node *res; - int switch_min = INT_MAX; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *sel = get_Cond_selector(node); + ir_node *new_sel = be_transform_node(sel); + int switch_min = INT_MAX; + ir_node *new_node; const ir_edge_t *edge; assert(get_mode_size_bits(get_irn_mode(sel)) == 32); @@ -2115,44 +2291,11 @@ static ir_node *create_Switch(ir_node *node) SET_IA32_ORIG_NODE(new_sel, ia32_get_old_node_name(env_cg, node)); } - res = new_rd_ia32_SwitchJmp(dbgi, irg, block, new_sel); - set_ia32_pncode(res, get_Cond_defaultProj(node)); - - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - - return res; -} - -static ir_node *get_flags_node(ir_node *node, pn_Cmp *pnc_out) -{ - ir_graph *irg = current_ir_graph; - ir_node *flags; - ir_node *new_op; - ir_node *noreg; - ir_node *nomem; - ir_node *new_block; - dbg_info *dbgi; - - /* we have a Cmp as input */ - if(is_Proj(node)) { - ir_node *pred = get_Proj_pred(node); - if(is_Cmp(pred)) { - flags = be_transform_node(pred); - *pnc_out = get_Proj_proj(node); - return flags; - } - } + new_node = new_rd_ia32_SwitchJmp(dbgi, irg, block, new_sel, + get_Cond_defaultProj(node)); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - /* a mode_b value, we have to compare it against 0 */ - dbgi = get_irn_dbg_info(node); - new_block = be_transform_node(get_nodes_block(node)); - new_op = be_transform_node(node); - noreg = ia32_new_NoReg_gp(env_cg); - nomem = new_NoMem(); - flags = new_rd_ia32_Test(dbgi, irg, new_block, noreg, noreg, nomem, - new_op, new_op, 0, 0); - *pnc_out = pn_Cmp_Lg; - return flags; + return new_node; } static ir_node *gen_Cond(ir_node *node) { @@ -2162,8 +2305,8 @@ static ir_node *gen_Cond(ir_node *node) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *sel = get_Cond_selector(node); ir_mode *sel_mode = get_irn_mode(sel); - ir_node *res; ir_node *flags = NULL; + ir_node *new_node; pn_Cmp pnc; if (sel_mode != mode_b) { @@ -2173,10 +2316,10 @@ static ir_node *gen_Cond(ir_node *node) { /* we get flags from a cmp */ flags = get_flags_node(sel, &pnc); - res = new_rd_ia32_Jcc(dbgi, irg, new_block, flags, pnc); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + new_node = new_rd_ia32_Jcc(dbgi, irg, new_block, flags, pnc); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return res; + return new_node; } @@ -2207,19 +2350,15 @@ static ir_node *gen_CopyB(ir_node *node) { size >>= 2; res = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, size); + add_irn_dep(res, get_irg_frame(irg)); + + res = new_rd_ia32_CopyB(dbgi, irg, block, new_dst, new_src, res, new_mem, rem); + } else { if(size == 0) { ir_fprintf(stderr, "Optimisation warning copyb %+F with size <4\n", node); - set_ia32_flags(res, get_ia32_flags(res) | arch_irn_flags_modify_flags); } - add_irn_dep(res, get_irg_frame(irg)); - - res = new_rd_ia32_CopyB(dbgi, irg, block, new_dst, new_src, res, new_mem); - /* we misuse the pncode field for the copyb size */ - set_ia32_pncode(res, rem); - } else { - res = new_rd_ia32_CopyB_i(dbgi, irg, block, new_dst, new_src, new_mem); - set_ia32_pncode(res, size); + res = new_rd_ia32_CopyB_i(dbgi, irg, block, new_dst, new_src, new_mem, size); } SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); @@ -2229,92 +2368,14 @@ static ir_node *gen_CopyB(ir_node *node) { static ir_node *gen_be_Copy(ir_node *node) { - ir_node *result = be_duplicate_node(node); - ir_mode *mode = get_irn_mode(result); + ir_node *new_node = be_duplicate_node(node); + ir_mode *mode = get_irn_mode(new_node); if (mode_needs_gp_reg(mode)) { - set_irn_mode(result, mode_Iu); - } - - return result; -} - -/** - * helper function: checks wether all Cmp projs are Lg or Eq which is needed - * to fold an and into a test node - */ -static int can_fold_test_and(ir_node *node) -{ - const ir_edge_t *edge; - - /** we can only have eq and lg projs */ - foreach_out_edge(node, edge) { - ir_node *proj = get_edge_src_irn(edge); - pn_Cmp pnc = get_Proj_proj(proj); - if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) - return 0; - } - - return 1; -} - -static ir_node *try_create_Test(ir_node *node) -{ - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - ir_node *cmp_left = get_Cmp_left(node); - ir_node *cmp_right = get_Cmp_right(node); - ir_mode *mode; - ir_node *left; - ir_node *right; - ir_node *res; - ia32_address_mode_t am; - ia32_address_t *addr = &am.addr; - int cmp_unsigned; - - /* can we use a test instruction? */ - if(!is_Const_0(cmp_right)) - return NULL; - - if(is_And(cmp_left) && get_irn_n_edges(cmp_left) == 1 && - can_fold_test_and(node)) { - ir_node *and_left = get_And_left(cmp_left); - ir_node *and_right = get_And_right(cmp_left); - - mode = get_irn_mode(and_left); - left = and_left; - right = and_right; - } else { - mode = get_irn_mode(cmp_left); - left = cmp_left; - right = cmp_left; - } - - assert(get_mode_size_bits(mode) <= 32); - - match_arguments(&am, block, left, right, match_commutative | - match_8_bit_am | match_16_bit_am | match_am_and_immediates); - - cmp_unsigned = !mode_is_signed(mode); - if(get_mode_size_bits(mode) == 8) { - res = new_rd_ia32_Test8Bit(dbgi, irg, new_block, addr->base, - addr->index, addr->mem, am.new_op1, - am.new_op2, am.ins_permuted, cmp_unsigned); - } else { - res = new_rd_ia32_Test(dbgi, irg, new_block, addr->base, addr->index, - addr->mem, am.new_op1, am.new_op2, - am.ins_permuted, cmp_unsigned); + set_irn_mode(new_node, mode_Iu); } - set_am_attributes(res, &am); - assert(mode != NULL); - set_ia32_ls_mode(res, mode); - - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - res = fix_mem_proj(res, &am); - return res; + return new_node; } static ir_node *create_Fucom(ir_node *node) @@ -2327,31 +2388,33 @@ static ir_node *create_Fucom(ir_node *node) ir_node *new_left = be_transform_node(left); ir_node *right = get_Cmp_right(node); ir_node *new_right; - ir_node *res; + ir_node *new_node; if(transform_config.use_fucomi) { new_right = be_transform_node(right); - res = new_rd_ia32_vFucomi(dbgi, irg, new_block, new_left, new_right, 0); - set_ia32_commutative(res); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + new_node = new_rd_ia32_vFucomi(dbgi, irg, new_block, new_left, + new_right, 0); + set_ia32_commutative(new_node); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); } else { if(transform_config.use_ftst && is_Const_null(right)) { - res = new_rd_ia32_vFtstFnstsw(dbgi, irg, new_block, new_left, 0); + new_node = new_rd_ia32_vFtstFnstsw(dbgi, irg, new_block, new_left, + 0); } else { new_right = be_transform_node(right); - res = new_rd_ia32_vFucomFnstsw(dbgi, irg, new_block, new_left, + new_node = new_rd_ia32_vFucomFnstsw(dbgi, irg, new_block, new_left, new_right, 0); } - set_ia32_commutative(res); + set_ia32_commutative(new_node); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - res = new_rd_ia32_Sahf(dbgi, irg, new_block, res); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + new_node = new_rd_ia32_Sahf(dbgi, irg, new_block, new_node); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); } - return res; + return new_node; } static ir_node *create_Ucomi(ir_node *node) @@ -2366,7 +2429,7 @@ static ir_node *create_Ucomi(ir_node *node) ia32_address_mode_t am; ia32_address_t *addr = &am.addr; - match_arguments(&am, src_block, left, right, match_commutative); + match_arguments(&am, src_block, left, right, match_commutative | match_am); new_node = new_rd_ia32_Ucomi(dbgi, irg, new_block, addr->base, addr->index, addr->mem, am.new_op1, am.new_op2, @@ -2380,6 +2443,25 @@ static ir_node *create_Ucomi(ir_node *node) return new_node; } +/** + * helper function: checks wether all Cmp projs are Lg or Eq which is needed + * to fold an and into a test node + */ +static int can_fold_test_and(ir_node *node) +{ + const ir_edge_t *edge; + + /** we can only have eq and lg projs */ + foreach_out_edge(node, edge) { + ir_node *proj = get_edge_src_irn(edge); + pn_Cmp pnc = get_Proj_proj(proj); + if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) + return 0; + } + + return 1; +} + static ir_node *gen_Cmp(ir_node *node) { ir_graph *irg = current_ir_graph; @@ -2389,7 +2471,7 @@ static ir_node *gen_Cmp(ir_node *node) ir_node *left = get_Cmp_left(node); ir_node *right = get_Cmp_right(node); ir_mode *cmp_mode = get_irn_mode(left); - ir_node *res; + ir_node *new_node; ia32_address_mode_t am; ia32_address_t *addr = &am.addr; int cmp_unsigned; @@ -2406,36 +2488,86 @@ static ir_node *gen_Cmp(ir_node *node) /* we prefer the Test instruction where possible except cases where * we can use SourceAM */ - if(!use_source_address_mode(block, left, right) && - !use_source_address_mode(block, right, left)) { - res = try_create_Test(node); - if(res != NULL) - return res; - } - - match_arguments(&am, block, left, right, - match_commutative | match_8_bit_am | match_16_bit_am | - match_am_and_immediates); - - cmp_unsigned = !mode_is_signed(get_irn_mode(left)); - if(get_mode_size_bits(cmp_mode) == 8) { - res = new_rd_ia32_Cmp8Bit(dbgi, irg, new_block, addr->base, addr->index, - addr->mem, am.new_op1, am.new_op2, - am.ins_permuted, cmp_unsigned); + cmp_unsigned = !mode_is_signed(cmp_mode); + if (is_Const_0(right)) { + if (is_And(left) && + get_irn_n_edges(left) == 1 && + can_fold_test_and(node)) { + /* Test(and_left, and_right) */ + ir_node *and_left = get_And_left(left); + ir_node *and_right = get_And_right(left); + ir_mode *mode = get_irn_mode(and_left); + + match_arguments(&am, block, and_left, and_right, match_commutative | + match_am | match_8bit_am | match_16bit_am | + match_am_and_immediates | match_immediate | + match_8bit | match_16bit); + if (get_mode_size_bits(mode) == 8) { + new_node = new_rd_ia32_Test8Bit(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op1, + am.new_op2, am.ins_permuted, + cmp_unsigned); + } else { + new_node = new_rd_ia32_Test(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op1, + am.new_op2, am.ins_permuted, cmp_unsigned); + } + } else { + match_arguments(&am, block, NULL, left, match_am | match_8bit_am | + match_16bit_am | match_8bit | match_16bit); + if (am.op_type == ia32_AddrModeS) { + /* Cmp(AM, 0) */ + ir_node *imm_zero = try_create_Immediate(right, 0); + if (get_mode_size_bits(cmp_mode) == 8) { + new_node = new_rd_ia32_Cmp8Bit(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op2, + imm_zero, am.ins_permuted, + cmp_unsigned); + } else { + new_node = new_rd_ia32_Cmp(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op2, + imm_zero, am.ins_permuted, cmp_unsigned); + } + } else { + /* Test(left, left) */ + if (get_mode_size_bits(cmp_mode) == 8) { + new_node = new_rd_ia32_Test8Bit(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op2, + am.new_op2, am.ins_permuted, + cmp_unsigned); + } else { + new_node = new_rd_ia32_Test(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op2, + am.new_op2, am.ins_permuted, + cmp_unsigned); + } + } + } } else { - res = new_rd_ia32_Cmp(dbgi, irg, new_block, addr->base, addr->index, - addr->mem, am.new_op1, am.new_op2, - am.ins_permuted, cmp_unsigned); + /* Cmp(left, right) */ + match_arguments(&am, block, left, right, match_commutative | match_am | + match_8bit_am | match_16bit_am | match_am_and_immediates | + match_immediate | match_8bit | match_16bit); + if (get_mode_size_bits(cmp_mode) == 8) { + new_node = new_rd_ia32_Cmp8Bit(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op1, + am.new_op2, am.ins_permuted, + cmp_unsigned); + } else { + new_node = new_rd_ia32_Cmp(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op1, + am.new_op2, am.ins_permuted, cmp_unsigned); + } } - set_am_attributes(res, &am); + set_am_attributes(new_node, &am); assert(cmp_mode != NULL); - set_ia32_ls_mode(res, cmp_mode); + set_ia32_ls_mode(new_node, cmp_mode); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - res = fix_mem_proj(res, &am); + new_node = fix_mem_proj(new_node, &am); - return res; + return new_node; } static ir_node *create_CMov(ir_node *node, ir_node *new_flags, pn_Cmp pnc) @@ -2456,8 +2588,8 @@ static ir_node *create_CMov(ir_node *node, ir_node *new_flags, pn_Cmp pnc) addr = &am.addr; - match_flags = match_commutative | match_no_immediate | match_16_bit_am - | match_force_32bit_op; + match_flags = match_commutative | match_am | match_16bit_am | + match_mode_neutral; match_arguments(&am, block, val_false, val_true, match_flags); @@ -2482,16 +2614,20 @@ static ir_node *create_set_32bit(dbg_info *dbgi, ir_node *new_block, ir_graph *irg = current_ir_graph; ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_node *nomem = new_NoMem(); - ir_node *res; + ir_mode *mode = get_irn_mode(orig_node); + ir_node *new_node; - res = new_rd_ia32_Set(dbgi, irg, new_block, flags, pnc, ins_permuted); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, orig_node)); - res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, noreg, noreg, - nomem, res, mode_Bu); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, orig_node)); - (void) orig_node; + new_node = new_rd_ia32_Set(dbgi, irg, new_block, flags, pnc, ins_permuted); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, orig_node)); - return res; + /* we might need to conv the result up */ + if(get_mode_size_bits(mode) > 8) { + new_node = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, noreg, noreg, + nomem, new_node, mode_Bu); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, orig_node)); + } + + return new_node; } /** @@ -2508,7 +2644,7 @@ static ir_node *gen_Psi(ir_node *node) ir_node *psi_default = get_Psi_default(node); ir_node *cond = get_Psi_cond(node, 0); ir_node *flags = NULL; - ir_node *res; + ir_node *new_node; pn_Cmp pnc; assert(get_Psi_n_conds(node) == 1); @@ -2518,13 +2654,13 @@ static ir_node *gen_Psi(ir_node *node) flags = get_flags_node(cond, &pnc); if(is_Const_1(psi_true) && is_Const_0(psi_default)) { - res = create_set_32bit(dbgi, new_block, flags, pnc, node, 0); + new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, 0); } else if(is_Const_0(psi_true) && is_Const_1(psi_default)) { - res = create_set_32bit(dbgi, new_block, flags, pnc, node, 1); + new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, 1); } else { - res = create_CMov(node, flags, pnc); + new_node = create_CMov(node, flags, pnc); } - return res; + return new_node; } @@ -2592,7 +2728,7 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) ir_node *nomem = new_NoMem(); ir_node *frame = get_irg_frame(irg); ir_node *store, *load; - ir_node *res; + ir_node *new_node; store = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, nomem, node, tgt_mode); @@ -2606,8 +2742,8 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) set_ia32_op_type(load, ia32_AddrModeS); SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node)); - res = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res); - return res; + new_node = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res); + return new_node; } static ir_node *create_Immediate(ir_entity *symconst, int symconst_sign, long val) @@ -2625,41 +2761,43 @@ static ir_node *create_Immediate(ir_entity *symconst, int symconst_sign, long va * Create a conversion from general purpose to x87 register */ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { - ir_node *src_block = get_nodes_block(node); - ir_node *block = be_transform_node(src_block); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *op = get_Conv_op(node); - ir_node *new_op; + ir_node *src_block = get_nodes_block(node); + ir_node *block = be_transform_node(src_block); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *op = get_Conv_op(node); + ir_node *new_op = NULL; ir_node *noreg; ir_node *nomem; ir_mode *mode; ir_mode *store_mode; ir_node *fild; ir_node *store; - ir_node *res; + ir_node *new_node; int src_bits; /* fild can use source AM if the operand is a signed 32bit integer */ if (src_mode == mode_Is) { ia32_address_mode_t am; - match_arguments(&am, src_block, NULL, op, match_no_immediate); + match_arguments(&am, src_block, NULL, op, match_am | match_try_am); if (am.op_type == ia32_AddrModeS) { ia32_address_t *addr = &am.addr; - fild = new_rd_ia32_vfild(dbgi, irg, block, addr->base, addr->index, addr->mem); - res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); + fild = new_rd_ia32_vfild(dbgi, irg, block, addr->base, + addr->index, addr->mem); + new_node = new_r_Proj(irg, block, fild, mode_vfp, + pn_ia32_vfild_res); set_am_attributes(fild, &am); SET_IA32_ORIG_NODE(fild, ia32_get_old_node_name(env_cg, node)); fix_mem_proj(fild, &am); - return res; + return new_node; } - new_op = am.new_op2; - } else { + } + if(new_op == NULL) { new_op = be_transform_node(op); } @@ -2722,9 +2860,9 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { set_ia32_op_type(fild, ia32_AddrModeS); set_ia32_ls_mode(fild, store_mode); - res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); + new_node = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); - return res; + return new_node; } /** @@ -2738,9 +2876,7 @@ static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, int src_bits = get_mode_size_bits(src_mode); int tgt_bits = get_mode_size_bits(tgt_mode); ir_node *new_block = be_transform_node(block); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *new_op; - ir_node *res; + ir_node *new_node; ir_mode *smaller_mode; int smaller_bits; ia32_address_mode_t am; @@ -2754,40 +2890,31 @@ static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, smaller_bits = tgt_bits; } - memset(&am, 0, sizeof(am)); - if(use_source_address_mode(block, op, NULL)) { - build_address(&am, op); - new_op = noreg; - am.op_type = ia32_AddrModeS; - } else { - new_op = be_transform_node(op); - am.op_type = ia32_Normal; +#ifdef DEBUG_libfirm + if(is_Const(op)) { + ir_fprintf(stderr, "Optimisation warning: conv after constant %+F\n", + op); } - if(addr->base == NULL) - addr->base = noreg; - if(addr->index == NULL) - addr->index = noreg; - if(addr->mem == NULL) - addr->mem = new_NoMem(); +#endif - DB((dbg, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); + match_arguments(&am, block, NULL, op, match_8bit | match_16bit | match_am | + match_8bit_am | match_16bit_am); if (smaller_bits == 8) { - res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, addr->base, - addr->index, addr->mem, new_op, - smaller_mode); + new_node = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op2, + smaller_mode); } else { - res = new_rd_ia32_Conv_I2I(dbgi, irg, new_block, addr->base, - addr->index, addr->mem, new_op, - smaller_mode); + new_node = new_rd_ia32_Conv_I2I(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op2, + smaller_mode); } - - set_am_attributes(res, &am); - set_ia32_ls_mode(res, smaller_mode); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - (void) node; - res = fix_mem_proj(res, &am); - - return res; + set_am_attributes(new_node, &am); + /* match_arguments assume that out-mode = in-mode, this isn't true here + * so fix it */ + set_ia32_ls_mode(new_node, smaller_mode); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + new_node = fix_mem_proj(new_node, &am); + return new_node; } /** @@ -2944,7 +3071,7 @@ static ir_node *try_create_Immediate(ir_node *node, ir_mode *mode; ir_node *cnst = NULL; ir_node *symconst = NULL; - ir_node *res; + ir_node *new_node; mode = get_irn_mode(node); if(!mode_is_int(mode) && !mode_is_reference(mode)) { @@ -3030,9 +3157,9 @@ static ir_node *try_create_Immediate(ir_node *node, offset = tarval_neg(offset); } - res = create_Immediate(symconst_ent, symconst_sign, val); + new_node = create_Immediate(symconst_ent, symconst_sign, val); - return res; + return new_node; } static ir_node *create_immediate_or_transform(ir_node *node, @@ -3049,8 +3176,8 @@ static const arch_register_req_t no_register_req = { arch_register_req_type_none, NULL, /* regclass */ NULL, /* limit bitset */ - { -1, -1 }, /* same pos */ - -1 /* different pos */ + 0, /* same pos */ + 0 /* different pos */ }; /** @@ -3076,7 +3203,7 @@ static void parse_asm_constraint(int pos, constraint_t *constraint, const char * ir_graph *irg = current_ir_graph; struct obstack *obst = get_irg_obstack(irg); arch_register_req_t *req; - unsigned *limited_ptr; + unsigned *limited_ptr = NULL; int p; int same_as = -1; @@ -3268,9 +3395,8 @@ static void parse_asm_constraint(int pos, constraint_t *constraint, const char * req->cls = other_constr->cls; req->type = arch_register_req_type_should_be_same; req->limited = NULL; - req->other_same[0] = pos; - req->other_same[1] = -1; - req->other_different = -1; + req->other_same = 1U << pos; + req->other_different = 0; /* switch constraints. This is because in firm we have same_as * constraints on the output constraints while in the gcc asm syntax @@ -3350,7 +3476,7 @@ static ir_node *gen_ASM(ir_node *node) ir_node *new_block = be_transform_node(block); dbg_info *dbgi = get_irn_dbg_info(node); ir_node **in; - ir_node *res; + ir_node *new_node; int out_arity; int n_out_constraints; int n_clobbers; @@ -3464,15 +3590,15 @@ static ir_node *gen_ASM(ir_node *node) in[i] = transformed; } - res = new_rd_ia32_Asm(dbgi, irg, new_block, arity, in, out_arity, - get_ASM_text(node), register_map); + new_node = new_rd_ia32_Asm(dbgi, irg, new_block, arity, in, out_arity, + get_ASM_text(node), register_map); - set_ia32_out_req_all(res, out_reg_reqs); - set_ia32_in_req_all(res, in_reg_reqs); + set_ia32_out_req_all(new_node, out_reg_reqs); + set_ia32_in_req_all(new_node, in_reg_reqs); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return res; + return new_node; } /******************************************** @@ -3495,15 +3621,15 @@ static ir_node *gen_be_FrameAddr(ir_node *node) { ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *res; + ir_node *new_node; - res = new_rd_ia32_Lea(dbgi, irg, block, new_op, noreg); - set_ia32_frame_ent(res, arch_get_frame_entity(env_cg->arch_env, node)); - set_ia32_use_frame(res); + new_node = new_rd_ia32_Lea(dbgi, irg, block, new_op, noreg); + set_ia32_frame_ent(new_node, arch_get_frame_entity(env_cg->arch_env, node)); + set_ia32_use_frame(new_node); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return res; + return new_node; } /** @@ -3613,30 +3739,10 @@ static ir_node *gen_be_Return(ir_node *node) { */ static ir_node *gen_be_AddSP(ir_node *node) { - ir_node *src_block = get_nodes_block(node); - ir_node *new_block = be_transform_node(src_block); - ir_node *sz = get_irn_n(node, be_pos_AddSP_size); - ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_node; - ia32_address_mode_t am; - ia32_address_t *addr = &am.addr; - match_flags_t flags = 0; - - match_arguments(&am, src_block, sp, sz, flags); + ir_node *sz = get_irn_n(node, be_pos_AddSP_size); + ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp); - new_node = new_rd_ia32_SubSP(dbgi, irg, new_block, addr->base, addr->index, - addr->mem, am.new_op1, am.new_op2); - set_am_attributes(new_node, &am); - /* we can't use source address mode anymore when using immediates */ - if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) - set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); - SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - - new_node = fix_mem_proj(new_node, &am); - - return new_node; + return gen_binop(node, sp, sz, new_rd_ia32_SubSP, match_am); } /** @@ -3644,30 +3750,10 @@ static ir_node *gen_be_AddSP(ir_node *node) */ static ir_node *gen_be_SubSP(ir_node *node) { - ir_node *src_block = get_nodes_block(node); - ir_node *new_block = be_transform_node(src_block); - ir_node *sz = get_irn_n(node, be_pos_SubSP_size); - ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_node; - ia32_address_mode_t am; - ia32_address_t *addr = &am.addr; - match_flags_t flags = 0; + ir_node *sz = get_irn_n(node, be_pos_SubSP_size); + ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp); - match_arguments(&am, src_block, sp, sz, flags); - - new_node = new_rd_ia32_AddSP(dbgi, irg, new_block, addr->base, addr->index, - addr->mem, am.new_op1, am.new_op2); - set_am_attributes(new_node, &am); - /* we can't use source address mode anymore when using immediates */ - if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) - set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); - SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - - new_node = fix_mem_proj(new_node, &am); - - return new_node; + return gen_binop(node, sp, sz, new_rd_ia32_AddSP, match_am); } /** @@ -3691,9 +3777,8 @@ static ir_node *gen_Unknown(ir_node *node) { } else if (mode_needs_gp_reg(mode)) { return ia32_new_Unknown_gp(env_cg); } else { - assert(0 && "unsupported Unknown-Mode"); + panic("unsupported Unknown-Mode"); } - return NULL; } @@ -3746,11 +3831,12 @@ static ir_node *gen_IJmp(ir_node *node) ir_node *new_node; ia32_address_mode_t am; ia32_address_t *addr = &am.addr; - match_flags_t flags; - flags = match_force_32bit_op | match_no_immediate; + assert(get_irn_mode(op) == mode_P); - match_arguments(&am, block, NULL, op, flags); + match_arguments(&am, block, NULL, op, + match_am | match_8bit_am | match_16bit_am | + match_immediate | match_8bit | match_16bit); new_node = new_rd_ia32_IJmp(dbgi, irg, new_block, addr->base, addr->index, addr->mem, am.new_op2); @@ -3850,27 +3936,37 @@ static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func) return new_op; } +static ir_node *gen_ia32_l_ShlDep(ir_node *node) +{ + ir_node *left = get_irn_n(node, n_ia32_l_ShlDep_left); + ir_node *right = get_irn_n(node, n_ia32_l_ShlDep_right); -/** - * Transforms an ia32_l_XXX into a "real" XXX node - * - * @param node The node to transform - * @return the created ia32 XXX node - */ -#define GEN_LOWERED_SHIFT_OP(l_op, op) \ - static ir_node *gen_ia32_##l_op(ir_node *node) { \ - return gen_shift_binop(node, get_irn_n(node, 0), \ - get_irn_n(node, 1), new_rd_ia32_##op); \ - } + return gen_shift_binop(node, left, right, new_rd_ia32_Shl, + match_immediate | match_mode_neutral); +} + +static ir_node *gen_ia32_l_ShrDep(ir_node *node) +{ + ir_node *left = get_irn_n(node, n_ia32_l_ShrDep_left); + ir_node *right = get_irn_n(node, n_ia32_l_ShrDep_right); + return gen_shift_binop(node, left, right, new_rd_ia32_Shr, + match_immediate); +} -GEN_LOWERED_SHIFT_OP(l_ShlDep, Shl) -GEN_LOWERED_SHIFT_OP(l_ShrDep, Shr) -GEN_LOWERED_SHIFT_OP(l_SarDep, Sar) +static ir_node *gen_ia32_l_SarDep(ir_node *node) +{ + ir_node *left = get_irn_n(node, n_ia32_l_SarDep_left); + ir_node *right = get_irn_n(node, n_ia32_l_SarDep_right); + return gen_shift_binop(node, left, right, new_rd_ia32_Sar, + match_immediate); +} static ir_node *gen_ia32_l_Add(ir_node *node) { ir_node *left = get_irn_n(node, n_ia32_l_Add_left); ir_node *right = get_irn_n(node, n_ia32_l_Add_right); - ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Add, match_commutative); + ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Add, + match_commutative | match_am | match_immediate | + match_mode_neutral); if(is_Proj(lowered)) { lowered = get_Proj_pred(lowered); @@ -3884,17 +3980,9 @@ static ir_node *gen_ia32_l_Add(ir_node *node) { static ir_node *gen_ia32_l_Adc(ir_node *node) { - return gen_binop_flags(node, new_rd_ia32_Adc, match_commutative); -} - -/** - * Transforms an ia32_l_Neg into a "real" ia32_Neg node - * - * @param node The node to transform - * @return the created ia32 Neg node - */ -static ir_node *gen_ia32_l_Neg(ir_node *node) { - return gen_unop(node, get_unop_op(node), new_rd_ia32_Neg); + return gen_binop_flags(node, new_rd_ia32_Adc, + match_commutative | match_am | match_immediate | + match_mode_neutral); } /** @@ -3975,7 +4063,7 @@ static ir_node *gen_ia32_l_Mul(ir_node *node) { ir_node *right = get_binop_right(node); return gen_binop(node, left, right, new_rd_ia32_Mul, - match_commutative | match_no_immediate); + match_commutative | match_am | match_mode_neutral); } /** @@ -3988,13 +4076,14 @@ static ir_node *gen_ia32_l_IMul(ir_node *node) { ir_node *right = get_binop_right(node); return gen_binop(node, left, right, new_rd_ia32_IMul1OP, - match_commutative | match_no_immediate); + match_commutative | match_am | match_mode_neutral); } static ir_node *gen_ia32_l_Sub(ir_node *node) { ir_node *left = get_irn_n(node, n_ia32_l_Sub_left); ir_node *right = get_irn_n(node, n_ia32_l_Sub_right); - ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Sub, 0); + ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Sub, + match_am | match_immediate | match_mode_neutral); if(is_Proj(lowered)) { lowered = get_Proj_pred(lowered); @@ -4007,7 +4096,8 @@ static ir_node *gen_ia32_l_Sub(ir_node *node) { } static ir_node *gen_ia32_l_Sbb(ir_node *node) { - return gen_binop_flags(node, new_rd_ia32_Sbb, 0); + return gen_binop_flags(node, new_rd_ia32_Sbb, + match_am | match_immediate | match_mode_neutral); } /** @@ -4017,37 +4107,52 @@ static ir_node *gen_ia32_l_Sbb(ir_node *node) { * op3 - shift count * Only op3 can be an immediate. */ -static ir_node *gen_lowered_64bit_shifts(ir_node *node, ir_node *op1, - ir_node *op2, ir_node *count) +static ir_node *gen_lowered_64bit_shifts(ir_node *node, ir_node *high, + ir_node *low, ir_node *count) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *new_op = NULL; + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_op1 = be_transform_node(op1); - ir_node *new_op2 = be_transform_node(op2); - ir_node *new_count = create_immediate_or_transform(count, 'I'); - - /* TODO proper AM support */ + ir_node *new_high = be_transform_node(high); + ir_node *new_low = be_transform_node(low); + ir_node *new_count; + ir_node *new_node; - if (is_ia32_l_ShlD(node)) - new_op = new_rd_ia32_ShlD(dbgi, irg, block, new_op1, new_op2, new_count); - else - new_op = new_rd_ia32_ShrD(dbgi, irg, block, new_op1, new_op2, new_count); + /* the shift amount can be any mode that is bigger than 5 bits, since all + * other bits are ignored anyway */ + while (is_Conv(count) && get_irn_n_edges(count) == 1) { + assert(get_mode_size_bits(get_irn_mode(count)) >= 5); + count = get_Conv_op(count); + } + new_count = create_immediate_or_transform(count, 0); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + if (is_ia32_l_ShlD(node)) { + new_node = new_rd_ia32_ShlD(dbgi, irg, new_block, new_high, new_low, + new_count); + } else { + new_node = new_rd_ia32_ShrD(dbgi, irg, new_block, new_high, new_low, + new_count); + } + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return new_op; + return new_node; } -static ir_node *gen_ia32_l_ShlD(ir_node *node) { - return gen_lowered_64bit_shifts(node, get_irn_n(node, 0), - get_irn_n(node, 1), get_irn_n(node, 2)); +static ir_node *gen_ia32_l_ShlD(ir_node *node) +{ + ir_node *high = get_irn_n(node, n_ia32_l_ShlD_high); + ir_node *low = get_irn_n(node, n_ia32_l_ShlD_low); + ir_node *count = get_irn_n(node, n_ia32_l_ShlD_count); + return gen_lowered_64bit_shifts(node, high, low, count); } -static ir_node *gen_ia32_l_ShrD(ir_node *node) { - return gen_lowered_64bit_shifts(node, get_irn_n(node, 0), - get_irn_n(node, 1), get_irn_n(node, 2)); +static ir_node *gen_ia32_l_ShrD(ir_node *node) +{ + ir_node *high = get_irn_n(node, n_ia32_l_ShrD_high); + ir_node *low = get_irn_n(node, n_ia32_l_ShrD_low); + ir_node *count = get_irn_n(node, n_ia32_l_ShrD_count); + return gen_lowered_64bit_shifts(node, high, low, count); } /** @@ -4537,6 +4642,10 @@ static ir_node *gen_Proj_be_Call(ir_node *node) { */ static ir_node *gen_Proj_Cmp(ir_node *node) { + (void) node; + panic("not all mode_b nodes are lowered"); + +#if 0 /* normally Cmps are processed when looking at Cond nodes, but this case * can happen in complicated Psi conditions */ dbg_info *dbgi = get_irn_dbg_info(node); @@ -4550,6 +4659,7 @@ static ir_node *gen_Proj_Cmp(ir_node *node) res = create_set_32bit(dbgi, new_block, new_cmp, pnc, node, 0); return res; +#endif } /** @@ -4670,7 +4780,6 @@ static void register_transformers(void) /* transform ops from intrinsic lowering */ GEN(ia32_l_Add); GEN(ia32_l_Adc); - GEN(ia32_l_Neg); GEN(ia32_l_Mul); GEN(ia32_l_IMul); GEN(ia32_l_ShlDep); @@ -4820,22 +4929,24 @@ void ia32_add_missing_keeps(ia32_code_gen_t *cg) void ia32_transform_graph(ia32_code_gen_t *cg) { int cse_last; ir_graph *irg = cg->irg; + int opt_arch = cg->isa->opt_arch; + int arch = cg->isa->arch; /* TODO: look at cpu and fill transform config in with that... */ transform_config.use_incdec = 1; transform_config.use_sse2 = 0; - transform_config.use_ffreep = 0; + transform_config.use_ffreep = ARCH_ATHLON(opt_arch); transform_config.use_ftst = 0; - transform_config.use_femms = 0; + transform_config.use_femms = ARCH_ATHLON(opt_arch) && ARCH_MMX(arch) && ARCH_AMD(arch); transform_config.use_fucomi = 1; - transform_config.use_cmov = 1; + transform_config.use_cmov = IS_P6_ARCH(arch); register_transformers(); env_cg = cg; initial_fpcw = NULL; heights = heights_new(irg); - calculate_non_address_mode_nodes(irg); + ia32_calculate_non_address_mode_nodes(cg->birg); /* the transform phase is not safe for CSE (yet) because several nodes get * attributes set after their creation */ @@ -4846,7 +4957,7 @@ void ia32_transform_graph(ia32_code_gen_t *cg) { set_opt_cse(cse_last); - free_non_address_mode_nodes(); + ia32_free_non_address_mode_nodes(); heights_free(heights); heights = NULL; }