X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=3ad3e9d3af2b5b81312ddcc052e09c9c263169df;hb=0fbcef83aa6060534172bb13e71cdadb04428806;hp=c809001f3821909865b433307c35e93fbe5cdffc;hpb=cbcf8b93de62865b8564e6a47aca7a28790b65ad;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index c809001f3..3ad3e9d3a 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -24,11 +24,10 @@ * @author Christian Wuerdig, Matthias Braun * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include +#include #include "irargs_t.h" #include "irnode_t.h" @@ -47,6 +46,7 @@ #include "irdom.h" #include "archop.h" #include "error.h" +#include "array_t.h" #include "height.h" #include "../benode_t.h" @@ -131,46 +131,49 @@ static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, ir_node *op, ir_node *orig_node); /** Return non-zero is a node represents the 0 constant. */ -static int is_Const_0(ir_node *node) { +static bool is_Const_0(ir_node *node) +{ return is_Const(node) && is_Const_null(node); } /** Return non-zero is a node represents the 1 constant. */ -static int is_Const_1(ir_node *node) { +static bool is_Const_1(ir_node *node) +{ return is_Const(node) && is_Const_one(node); } /** Return non-zero is a node represents the -1 constant. */ -static int is_Const_Minus_1(ir_node *node) { +static bool is_Const_Minus_1(ir_node *node) +{ return is_Const(node) && is_Const_all_one(node); } /** * returns true if constant can be created with a simple float command */ -static int is_simple_x87_Const(ir_node *node) +static bool is_simple_x87_Const(ir_node *node) { tarval *tv = get_Const_tarval(node); if (tarval_is_null(tv) || tarval_is_one(tv)) - return 1; + return true; /* TODO: match all the other float constants */ - return 0; + return false; } /** * returns true if constant can be created with a simple float command */ -static int is_simple_sse_Const(ir_node *node) +static bool is_simple_sse_Const(ir_node *node) { tarval *tv = get_Const_tarval(node); ir_mode *mode = get_tarval_mode(tv); if (mode == mode_F) - return 1; + return true; if (tarval_is_null(tv) || tarval_is_one(tv)) - return 1; + return true; if (mode == mode_D) { unsigned val = get_tarval_sub_bits(tv, 0) | @@ -179,17 +182,18 @@ static int is_simple_sse_Const(ir_node *node) (get_tarval_sub_bits(tv, 3) << 24); if (val == 0) /* lower 32bit are zero, really a 32bit constant */ - return 1; + return true; } /* TODO: match all the other float constants */ - return 0; + return false; } /** * Transforms a Const. */ -static ir_node *gen_Const(ir_node *node) { +static ir_node *gen_Const(ir_node *node) +{ ir_graph *irg = current_ir_graph; ir_node *old_block = get_nodes_block(node); ir_node *block = be_transform_node(old_block); @@ -289,16 +293,9 @@ static ir_node *gen_Const(ir_node *node) { } } end: - /* Const Nodes before the initial IncSP are a bad idea, because - * they could be spilled and we have no SP ready at that point yet. - * So add a dependency to the initial frame pointer calculation to - * avoid that situation. - */ - if (get_irg_start_block(irg) == block) { - add_irn_dep(load, get_irg_frame(irg)); - } - SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node)); + + be_dep_on_frame(load); return res; } else { /* non-float mode */ ir_node *cnst; @@ -316,11 +313,7 @@ end: cnst = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, val); SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env_cg, node)); - /* see above */ - if (get_irg_start_block(irg) == block) { - add_irn_dep(cnst, get_irg_frame(irg)); - } - + be_dep_on_frame(cnst); return cnst; } } @@ -328,7 +321,8 @@ end: /** * Transforms a SymConst. */ -static ir_node *gen_SymConst(ir_node *node) { +static ir_node *gen_SymConst(ir_node *node) +{ ir_graph *irg = current_ir_graph; ir_node *old_block = get_nodes_block(node); ir_node *block = be_transform_node(old_block); @@ -349,27 +343,22 @@ static ir_node *gen_SymConst(ir_node *node) { } else { ir_entity *entity; - if(get_SymConst_kind(node) != symconst_addr_ent) { + if (get_SymConst_kind(node) != symconst_addr_ent) { panic("backend only support symconst_addr_ent (at %+F)", node); } entity = get_SymConst_entity(node); cnst = new_rd_ia32_Const(dbgi, irg, block, entity, 0, 0); } - /* Const Nodes before the initial IncSP are a bad idea, because - * they could be spilled and we have no SP ready at that point yet - */ - if (get_irg_start_block(irg) == block) { - add_irn_dep(cnst, get_irg_frame(irg)); - } - SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env_cg, node)); + be_dep_on_frame(cnst); return cnst; } /* Generates an entity for a known FP const (used for FP Neg + Abs) */ -ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct) { +ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct) +{ static const struct { const char *tp_name; const char *ent_name; @@ -431,44 +420,6 @@ ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct) { return ent_cache[kct]; } -static int prevents_AM(ir_node *const block, ir_node *const am_candidate, - ir_node *const other) -{ - if (get_nodes_block(other) != block) - return 0; - - if (is_Sync(other)) { - int i; - - for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) { - ir_node *const pred = get_Sync_pred(other, i); - - if (get_nodes_block(pred) != block) - continue; - - /* Do not block ourselves from getting eaten */ - if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate) - continue; - - if (!heights_reachable_in_block(heights, pred, am_candidate)) - continue; - - return 1; - } - - return 0; - } else { - /* Do not block ourselves from getting eaten */ - if (is_Proj(other) && get_Proj_pred(other) == am_candidate) - return 0; - - if (!heights_reachable_in_block(heights, other, am_candidate)) - return 0; - - return 1; - } -} - /** * return true if the node is a Proj(Load) and could be used in source address * mode for another node. Will return only true if the @p other node is not @@ -530,6 +481,7 @@ struct ia32_address_mode_t { ia32_address_t addr; ir_mode *ls_mode; ir_node *mem_proj; + ir_node *am_node; ia32_op_type_t op_type; ir_node *new_op1; ir_node *new_op2; @@ -580,6 +532,7 @@ static void build_address(ia32_address_mode_t *am, ir_node *node) am->pinned = get_irn_pinned(load); am->ls_mode = get_Load_mode(load); am->mem_proj = be_get_Proj_for_pn(load, pn_Load_M); + am->am_node = node; /* construct load address */ ia32_create_address_mode(addr, ptr, /*force=*/0); @@ -594,9 +547,9 @@ static void set_address(ir_node *node, const ia32_address_t *addr) set_ia32_am_scale(node, addr->scale); set_ia32_am_sc(node, addr->symconst_ent); set_ia32_am_offs_int(node, addr->offset); - if(addr->symconst_sign) + if (addr->symconst_sign) set_ia32_am_sc_sign(node); - if(addr->use_frame) + if (addr->use_frame) set_ia32_use_frame(node); set_ia32_frame_ent(node, addr->frame_entity); } @@ -632,13 +585,13 @@ static int is_downconv(const ir_node *node) ir_mode *src_mode; ir_mode *dest_mode; - if(!is_Conv(node)) + if (!is_Conv(node)) return 0; /* we only want to skip the conv when we're the only user * (not optimal but for now...) */ - if(get_irn_n_edges(node) > 1) + if (get_irn_n_edges(node) > 1) return 0; src_mode = get_irn_mode(get_Conv_op(node)); @@ -649,7 +602,8 @@ static int is_downconv(const ir_node *node) } /* Skip all Down-Conv's on a given node and return the resulting node. */ -ir_node *ia32_skip_downconv(ir_node *node) { +ir_node *ia32_skip_downconv(ir_node *node) +{ while (is_downconv(node)) node = get_Conv_op(node); @@ -663,7 +617,7 @@ static ir_node *create_upconv(ir_node *node, ir_node *orig_node) ir_mode *tgt_mode; dbg_info *dbgi; - if(mode_is_signed(mode)) { + if (mode_is_signed(mode)) { tgt_mode = mode_Is; } else { tgt_mode = mode_Iu; @@ -771,20 +725,19 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block, } am->op_type = ia32_AddrModeS; } else { + am->op_type = ia32_Normal; + if (flags & match_try_am) { am->new_op1 = NULL; am->new_op2 = NULL; - am->op_type = ia32_Normal; return; } new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); if (new_op2 == NULL) new_op2 = be_transform_node(op2); - am->op_type = ia32_Normal; - am->ls_mode = get_irn_mode(op2); - if (flags & match_mode_neutral) - am->ls_mode = mode_Iu; + am->ls_mode = + (flags & match_mode_neutral ? mode_Iu : get_irn_mode(op2)); } if (addr->base == NULL) addr->base = noreg_gp; @@ -810,7 +763,6 @@ static ir_node *fix_mem_proj(ir_node *node, ia32_address_mode_t *am) mode = get_irn_mode(node); load = get_Proj_pred(am->mem_proj); - mark_irn_visited(load); be_set_transformed_node(load, node); if (mode != mode_T) { @@ -848,8 +800,9 @@ static ir_node *gen_binop(ir_node *node, ir_node *op1, ir_node *op2, am.new_op1, am.new_op2); set_am_attributes(new_node, &am); /* we can't use source address mode anymore when using immediates */ - if (is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) - set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); + if (!(flags & match_am_and_immediates) && + (is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2))) + set_ia32_am_support(new_node, ia32_am_none); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); new_node = fix_mem_proj(new_node, &am); @@ -883,23 +836,24 @@ static ir_node *gen_binop_flags(ir_node *node, construct_binop_flags_func *func, ir_node *src_block = get_nodes_block(node); ir_node *op1 = get_irn_n(node, n_ia32_l_binop_left); ir_node *op2 = get_irn_n(node, n_ia32_l_binop_right); + ir_node *eflags = get_irn_n(node, n_ia32_l_binop_eflags); dbg_info *dbgi; - ir_node *block, *new_node, *eflags, *new_eflags; + ir_node *block, *new_node, *new_eflags; ia32_address_mode_t am; ia32_address_t *addr = &am.addr; - match_arguments(&am, src_block, op1, op2, NULL, flags); + match_arguments(&am, src_block, op1, op2, eflags, flags); dbgi = get_irn_dbg_info(node); block = be_transform_node(src_block); - eflags = get_irn_n(node, n_ia32_l_binop_eflags); new_eflags = be_transform_node(eflags); new_node = func(dbgi, current_ir_graph, block, addr->base, addr->index, addr->mem, am.new_op1, am.new_op2, new_eflags); set_am_attributes(new_node, &am); /* we can't use source address mode anymore when using immediates */ - if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) - set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); + if (!(flags & match_am_and_immediates) && + (is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2))) + set_ia32_am_support(new_node, ia32_am_none); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); new_node = fix_mem_proj(new_node, &am); @@ -929,18 +883,21 @@ static ir_node *get_fpcw(void) * @return The constructed ia32 node. */ static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, - construct_binop_float_func *func, - match_flags_t flags) + construct_binop_float_func *func) { ir_mode *mode = get_irn_mode(node); dbg_info *dbgi; ir_node *block, *new_block, *new_node; ia32_address_mode_t am; ia32_address_t *addr = &am.addr; + ia32_x87_attr_t *attr; + /* All operations are considered commutative, because there are reverse + * variants */ + match_flags_t flags = match_commutative; /* cannot use address mode with long double on x87 */ - if (get_mode_size_bits(mode) > 64) - flags &= ~match_am; + if (get_mode_size_bits(mode) <= 64) + flags |= match_am; block = get_nodes_block(node); match_arguments(&am, block, op1, op2, NULL, flags); @@ -952,6 +909,9 @@ static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, am.new_op1, am.new_op2, get_fpcw()); set_am_attributes(new_node, &am); + attr = get_ia32_x87_attr(new_node); + attr->attr.data.ins_permuted = am.ins_permuted; + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); new_node = fix_mem_proj(new_node, &am); @@ -1084,7 +1044,8 @@ static int am_has_immediates(const ia32_address_t *addr) * * @return the created ia32 Add node */ -static ir_node *gen_Add(ir_node *node) { +static ir_node *gen_Add(ir_node *node) +{ ir_mode *mode = get_irn_mode(node); ir_node *op1 = get_Add_left(node); ir_node *op2 = get_Add_right(node); @@ -1098,8 +1059,7 @@ static ir_node *gen_Add(ir_node *node) { return gen_binop(node, op1, op2, new_rd_ia32_xAdd, match_commutative | match_am); else - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfadd, - match_commutative | match_am); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfadd); } ia32_mark_non_am(node); @@ -1123,23 +1083,23 @@ static ir_node *gen_Add(ir_node *node) { new_block = be_transform_node(block); /* a constant? */ - if(addr.base == NULL && addr.index == NULL) { + if (addr.base == NULL && addr.index == NULL) { ir_graph *irg = current_ir_graph; new_node = new_rd_ia32_Const(dbgi, irg, new_block, addr.symconst_ent, addr.symconst_sign, addr.offset); - add_irn_dep(new_node, get_irg_frame(irg)); + be_dep_on_frame(new_node); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); return new_node; } /* add with immediate? */ - if(addr.index == NULL) { + if (addr.index == NULL) { add_immediate_op = addr.base; - } else if(addr.base == NULL && addr.scale == 0) { + } else if (addr.base == NULL && addr.scale == 0) { add_immediate_op = addr.index; } - if(add_immediate_op != NULL) { - if(!am_has_immediates(&addr)) { + if (add_immediate_op != NULL) { + if (!am_has_immediates(&addr)) { #ifdef DEBUG_libfirm ir_fprintf(stderr, "Optimisation warning Add x,0 (%+F) found\n", node); @@ -1182,7 +1142,8 @@ static ir_node *gen_Add(ir_node *node) { * * @return the created ia32 Mul node */ -static ir_node *gen_Mul(ir_node *node) { +static ir_node *gen_Mul(ir_node *node) +{ ir_node *op1 = get_Mul_left(node); ir_node *op2 = get_Mul_right(node); ir_mode *mode = get_irn_mode(node); @@ -1192,8 +1153,7 @@ static ir_node *gen_Mul(ir_node *node) { return gen_binop(node, op1, op2, new_rd_ia32_xMul, match_commutative | match_am); else - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfmul, - match_commutative | match_am); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfmul); } return gen_binop(node, op1, op2, new_rd_ia32_IMul, match_commutative | match_am | match_mode_neutral | @@ -1209,58 +1169,34 @@ static ir_node *gen_Mul(ir_node *node) { */ static ir_node *gen_Mulh(ir_node *node) { - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *op1 = get_Mulh_left(node); - ir_node *op2 = get_Mulh_right(node); - ir_node *proj_res_high; - ir_node *new_node; - ia32_address_mode_t am; - ia32_address_t *addr = &am.addr; - - assert(!mode_is_float(mode) && "Mulh with float not supported"); - assert(get_mode_size_bits(mode) == 32); - - match_arguments(&am, block, op1, op2, NULL, match_commutative | match_am); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *op1 = get_Mulh_left(node); + ir_node *op2 = get_Mulh_right(node); + ir_mode *mode = get_irn_mode(node); + ir_node *new_node; + ir_node *proj_res_high; if (mode_is_signed(mode)) { - new_node = new_rd_ia32_IMul1OP(dbgi, irg, new_block, addr->base, - addr->index, addr->mem, am.new_op1, - am.new_op2); + new_node = gen_binop(node, op1, op2, new_rd_ia32_IMul1OP, match_commutative | match_am); + proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node, + mode_Iu, pn_ia32_IMul1OP_res_high); } else { - new_node = new_rd_ia32_Mul(dbgi, irg, new_block, addr->base, - addr->index, addr->mem, am.new_op1, - am.new_op2); + new_node = gen_binop(node, op1, op2, new_rd_ia32_Mul, match_commutative | match_am); + proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node, + mode_Iu, pn_ia32_Mul_res_high); } - - set_am_attributes(new_node, &am); - /* we can't use source address mode anymore when using immediates */ - if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) - set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); - SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - - assert(get_irn_mode(new_node) == mode_T); - - fix_mem_proj(new_node, &am); - - assert(pn_ia32_IMul1OP_res_high == pn_ia32_Mul_res_high); - proj_res_high = new_rd_Proj(dbgi, irg, block, new_node, - mode_Iu, pn_ia32_IMul1OP_res_high); - return proj_res_high; } - - /** * Creates an ia32 And. * * @return The created ia32 And node */ -static ir_node *gen_And(ir_node *node) { +static ir_node *gen_And(ir_node *node) +{ ir_node *op1 = get_And_left(node); ir_node *op2 = get_And_right(node); assert(! mode_is_float(get_irn_mode(node))); @@ -1276,7 +1212,7 @@ static ir_node *gen_And(ir_node *node) { ir_mode *src_mode; ir_node *res; - if(v == 0xFF) { + if (v == 0xFF) { src_mode = mode_Bu; } else { assert(v == 0xFFFF); @@ -1299,7 +1235,8 @@ static ir_node *gen_And(ir_node *node) { * * @return The created ia32 Or node */ -static ir_node *gen_Or(ir_node *node) { +static ir_node *gen_Or(ir_node *node) +{ ir_node *op1 = get_Or_left(node); ir_node *op2 = get_Or_right(node); @@ -1315,7 +1252,8 @@ static ir_node *gen_Or(ir_node *node) { * * @return The created ia32 Eor node */ -static ir_node *gen_Eor(ir_node *node) { +static ir_node *gen_Eor(ir_node *node) +{ ir_node *op1 = get_Eor_left(node); ir_node *op2 = get_Eor_right(node); @@ -1330,7 +1268,8 @@ static ir_node *gen_Eor(ir_node *node) { * * @return The created ia32 Sub node */ -static ir_node *gen_Sub(ir_node *node) { +static ir_node *gen_Sub(ir_node *node) +{ ir_node *op1 = get_Sub_left(node); ir_node *op2 = get_Sub_right(node); ir_mode *mode = get_irn_mode(node); @@ -1339,8 +1278,7 @@ static ir_node *gen_Sub(ir_node *node) { if (ia32_cg_config.use_sse2) return gen_binop(node, op1, op2, new_rd_ia32_xSub, match_am); else - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfsub, - match_am); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfsub); } if (is_Const(op2)) { @@ -1448,7 +1386,7 @@ static ir_node *create_Div(ir_node *node) if (mode_is_signed(mode)) { ir_node *produceval = new_rd_ia32_ProduceVal(dbgi, irg, new_block); - add_irn_dep(produceval, get_irg_frame(irg)); + be_dep_on_frame(produceval); sign_extension = new_rd_ia32_Cltd(dbgi, irg, new_block, am.new_op1, produceval); @@ -1457,7 +1395,7 @@ static ir_node *create_Div(ir_node *node) am.new_op1, sign_extension); } else { sign_extension = new_rd_ia32_Const(dbgi, irg, new_block, NULL, 0, 0); - add_irn_dep(sign_extension, get_irg_frame(irg)); + be_dep_on_frame(sign_extension); new_node = new_rd_ia32_Div(dbgi, irg, new_block, addr->base, addr->index, new_mem, am.new_op2, @@ -1475,15 +1413,18 @@ static ir_node *create_Div(ir_node *node) } -static ir_node *gen_Mod(ir_node *node) { +static ir_node *gen_Mod(ir_node *node) +{ return create_Div(node); } -static ir_node *gen_Div(ir_node *node) { +static ir_node *gen_Div(ir_node *node) +{ return create_Div(node); } -static ir_node *gen_DivMod(ir_node *node) { +static ir_node *gen_DivMod(ir_node *node) +{ return create_Div(node); } @@ -1502,7 +1443,7 @@ static ir_node *gen_Quot(ir_node *node) if (ia32_cg_config.use_sse2) { return gen_binop(node, op1, op2, new_rd_ia32_xDiv, match_am); } else { - return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfdiv, match_am); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfdiv); } } @@ -1512,7 +1453,8 @@ static ir_node *gen_Quot(ir_node *node) * * @return The created ia32 Shl node */ -static ir_node *gen_Shl(ir_node *node) { +static ir_node *gen_Shl(ir_node *node) +{ ir_node *left = get_Shl_left(node); ir_node *right = get_Shl_right(node); @@ -1525,7 +1467,8 @@ static ir_node *gen_Shl(ir_node *node) { * * @return The created ia32 Shr node */ -static ir_node *gen_Shr(ir_node *node) { +static ir_node *gen_Shr(ir_node *node) +{ ir_node *left = get_Shr_left(node); ir_node *right = get_Shr_right(node); @@ -1539,15 +1482,16 @@ static ir_node *gen_Shr(ir_node *node) { * * @return The created ia32 Shrs node */ -static ir_node *gen_Shrs(ir_node *node) { +static ir_node *gen_Shrs(ir_node *node) +{ ir_node *left = get_Shrs_left(node); ir_node *right = get_Shrs_right(node); ir_mode *mode = get_irn_mode(node); - if(is_Const(right) && mode == mode_Is) { + if (is_Const(right) && mode == mode_Is) { tarval *tv = get_Const_tarval(right); long val = get_tarval_long(tv); - if(val == 31) { + if (val == 31) { /* this is a sign extension */ ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); @@ -1555,28 +1499,28 @@ static ir_node *gen_Shrs(ir_node *node) { ir_node *op = left; ir_node *new_op = be_transform_node(op); ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block); - add_irn_dep(pval, get_irg_frame(irg)); + be_dep_on_frame(pval); return new_rd_ia32_Cltd(dbgi, irg, block, new_op, pval); } } /* 8 or 16 bit sign extension? */ - if(is_Const(right) && is_Shl(left) && mode == mode_Is) { + if (is_Const(right) && is_Shl(left) && mode == mode_Is) { ir_node *shl_left = get_Shl_left(left); ir_node *shl_right = get_Shl_right(left); - if(is_Const(shl_right)) { + if (is_Const(shl_right)) { tarval *tv1 = get_Const_tarval(right); tarval *tv2 = get_Const_tarval(shl_right); - if(tv1 == tv2 && tarval_is_long(tv1)) { + if (tv1 == tv2 && tarval_is_long(tv1)) { long val = get_tarval_long(tv1); - if(val == 16 || val == 24) { + if (val == 16 || val == 24) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_mode *src_mode; ir_node *res; - if(val == 24) { + if (val == 24) { src_mode = mode_Bs; } else { assert(val == 16); @@ -1603,7 +1547,8 @@ static ir_node *gen_Shrs(ir_node *node) { * @param op2 The second operator * @return The created ia32 RotL node */ -static ir_node *gen_Rol(ir_node *node, ir_node *op1, ir_node *op2) { +static ir_node *gen_Rol(ir_node *node, ir_node *op1, ir_node *op2) +{ return gen_shift_binop(node, op1, op2, new_rd_ia32_Rol, match_immediate); } @@ -1618,7 +1563,8 @@ static ir_node *gen_Rol(ir_node *node, ir_node *op1, ir_node *op2) { * @param op2 The second operator * @return The created ia32 RotR node */ -static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2) { +static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2) +{ return gen_shift_binop(node, op1, op2, new_rd_ia32_Ror, match_immediate); } @@ -1629,7 +1575,8 @@ static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2) { * * @return The created ia32 RotL or RotR node */ -static ir_node *gen_Rotl(ir_node *node) { +static ir_node *gen_Rotl(ir_node *node) +{ ir_node *rotate = NULL; ir_node *op1 = get_Rotl_left(node); ir_node *op2 = get_Rotl_right(node); @@ -1719,7 +1666,8 @@ static ir_node *gen_Minus(ir_node *node) * * @return The created ia32 Not node */ -static ir_node *gen_Not(ir_node *node) { +static ir_node *gen_Not(ir_node *node) +{ ir_node *op = get_Not_op(node); assert(get_irn_mode(node) != mode_b); /* should be lowered already */ @@ -1784,7 +1732,7 @@ static ir_node *gen_Abs(ir_node *node) sign_extension = new_rd_ia32_Cltd(dbgi, irg, new_block, new_op, pval); - add_irn_dep(pval, get_irg_frame(irg)); + be_dep_on_frame(pval); SET_IA32_ORIG_NODE(sign_extension,ia32_get_old_node_name(env_cg, node)); xor = new_rd_ia32_Xor(dbgi, irg, new_block, noreg_gp, noreg_gp, @@ -1802,7 +1750,8 @@ static ir_node *gen_Abs(ir_node *node) /** * Create a bt instruction for x & (1 << n) and place it into the block of cmp. */ -static ir_node *gen_bt(ir_node *cmp, ir_node *x, ir_node *n) { +static ir_node *gen_bt(ir_node *cmp, ir_node *x, ir_node *n) +{ dbg_info *dbgi = get_irn_dbg_info(cmp); ir_node *block = get_nodes_block(cmp); ir_node *new_block = be_transform_node(block); @@ -1891,7 +1840,8 @@ static ir_node *get_flags_node(ir_node *node, pn_Cmp *pnc_out) * * @return the created ia32 Load node */ -static ir_node *gen_Load(ir_node *node) { +static ir_node *gen_Load(ir_node *node) +{ ir_node *old_block = get_nodes_block(node); ir_node *block = be_transform_node(old_block); ir_node *ptr = get_Load_ptr(node); @@ -1913,13 +1863,13 @@ static ir_node *gen_Load(ir_node *node) { base = addr.base; index = addr.index; - if(base == NULL) { + if (base == NULL) { base = noreg; } else { base = be_transform_node(base); } - if(index == NULL) { + if (index == NULL) { index = noreg; } else { index = be_transform_node(index); @@ -1939,7 +1889,7 @@ static ir_node *gen_Load(ir_node *node) { assert(mode != mode_b); /* create a conv node with address mode for smaller modes */ - if(get_mode_size_bits(mode) < 32) { + if (get_mode_size_bits(mode) < 32) { new_node = new_rd_ia32_Conv_I2I(dbgi, irg, block, base, index, new_mem, noreg, mode); } else { @@ -1953,19 +1903,13 @@ static ir_node *gen_Load(ir_node *node) { set_ia32_ls_mode(new_node, mode); set_address(new_node, &addr); - if(get_irn_pinned(node) == op_pin_state_floats) { + if (get_irn_pinned(node) == op_pin_state_floats) { add_ia32_flags(new_node, arch_irn_flags_rematerializable); } - /* make sure we are scheduled behind the initial IncSP/Barrier - * to avoid spills being placed before it - */ - if (block == get_irg_start_block(irg)) { - add_irn_dep(new_node, get_irg_frame(irg)); - } - SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + be_dep_on_frame(new_node); return new_node; } @@ -1974,38 +1918,51 @@ static int use_dest_am(ir_node *block, ir_node *node, ir_node *mem, { ir_node *load; - if(!is_Proj(node)) + if (!is_Proj(node)) return 0; /* we only use address mode if we're the only user of the load */ - if(get_irn_n_edges(node) > 1) + if (get_irn_n_edges(node) > 1) return 0; load = get_Proj_pred(node); - if(!is_Load(load)) + if (!is_Load(load)) return 0; - if(get_nodes_block(load) != block) + if (get_nodes_block(load) != block) return 0; - /* Store should be attached to the load */ - if(!is_Proj(mem) || get_Proj_pred(mem) != load) - return 0; /* store should have the same pointer as the load */ - if(get_Load_ptr(load) != ptr) + if (get_Load_ptr(load) != ptr) return 0; /* don't do AM if other node inputs depend on the load (via mem-proj) */ - if(other != NULL && get_nodes_block(other) == block - && heights_reachable_in_block(heights, other, load)) + if (other != NULL && + get_nodes_block(other) == block && + heights_reachable_in_block(heights, other, load)) { return 0; + } - return 1; -} + if (is_Sync(mem)) { + int i; -static void set_transformed_and_mark(ir_node *const old_node, ir_node *const new_node) -{ - mark_irn_visited(old_node); - be_set_transformed_node(old_node, new_node); + for (i = get_Sync_n_preds(mem) - 1; i >= 0; --i) { + ir_node *const pred = get_Sync_pred(mem, i); + + if (is_Proj(pred) && get_Proj_pred(pred) == load) + continue; + + if (get_nodes_block(pred) == block && + heights_reachable_in_block(heights, pred, load)) { + return 0; + } + } + } else { + /* Store should be attached to the load */ + if (!is_Proj(mem) || get_Proj_pred(mem) != load) + return 0; + } + + return 1; } static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, @@ -2019,6 +1976,7 @@ static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); ir_graph *irg = current_ir_graph; dbg_info *dbgi; + ir_node *new_mem; ir_node *new_node; ir_node *new_op; ir_node *mem_proj; @@ -2031,30 +1989,32 @@ static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, assert(flags & match_immediate); /* there is no destam node without... */ commutative = (flags & match_commutative) != 0; - if(use_dest_am(src_block, op1, mem, ptr, op2)) { + if (use_dest_am(src_block, op1, mem, ptr, op2)) { build_address(&am, op1); new_op = create_immediate_or_transform(op2, 0); - } else if(commutative && use_dest_am(src_block, op2, mem, ptr, op1)) { + } else if (commutative && use_dest_am(src_block, op2, mem, ptr, op1)) { build_address(&am, op2); new_op = create_immediate_or_transform(op1, 0); } else { return NULL; } - if(addr->base == NULL) + if (addr->base == NULL) addr->base = noreg_gp; - if(addr->index == NULL) + if (addr->index == NULL) addr->index = noreg_gp; - if(addr->mem == NULL) + if (addr->mem == NULL) addr->mem = new_NoMem(); - dbgi = get_irn_dbg_info(node); - block = be_transform_node(src_block); - if(get_mode_size_bits(mode) == 8) { + dbgi = get_irn_dbg_info(node); + block = be_transform_node(src_block); + new_mem = transform_AM_mem(irg, block, am.am_node, mem, addr->mem); + + if (get_mode_size_bits(mode) == 8) { new_node = func8bit(dbgi, irg, block, addr->base, addr->index, - addr->mem, new_op); + new_mem, new_op); } else { - new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem, + new_node = func(dbgi, irg, block, addr->base, addr->index, new_mem, new_op); } set_address(new_node, addr); @@ -2062,9 +2022,9 @@ static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, set_ia32_ls_mode(new_node, mode); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - set_transformed_and_mark(get_Proj_pred(am.mem_proj), new_node); + be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node); mem_proj = be_transform_node(am.mem_proj); - set_transformed_and_mark(mem_proj ? mem_proj : am.mem_proj, new_node); + be_set_transformed_node(mem_proj ? mem_proj : am.mem_proj, new_node); return new_node; } @@ -2073,37 +2033,40 @@ static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, ir_node *ptr, ir_mode *mode, construct_unop_dest_func *func) { - ir_graph *irg = current_ir_graph; - ir_node *src_block = get_nodes_block(node); - ir_node *block; + ir_graph *irg = current_ir_graph; + ir_node *src_block = get_nodes_block(node); + ir_node *block; dbg_info *dbgi; - ir_node *new_node; - ir_node *mem_proj; + ir_node *new_mem; + ir_node *new_node; + ir_node *mem_proj; ia32_address_mode_t am; ia32_address_t *addr = &am.addr; memset(&am, 0, sizeof(am)); - if(!use_dest_am(src_block, op, mem, ptr, NULL)) + if (!use_dest_am(src_block, op, mem, ptr, NULL)) return NULL; build_address(&am, op); dbgi = get_irn_dbg_info(node); block = be_transform_node(src_block); - new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem); + new_mem = transform_AM_mem(irg, block, am.am_node, mem, addr->mem); + new_node = func(dbgi, irg, block, addr->base, addr->index, new_mem); set_address(new_node, addr); set_ia32_op_type(new_node, ia32_AddrModeD); set_ia32_ls_mode(new_node, mode); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - set_transformed_and_mark(get_Proj_pred(am.mem_proj), new_node); + be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node); mem_proj = be_transform_node(am.mem_proj); - set_transformed_and_mark(mem_proj ? mem_proj : am.mem_proj, new_node); + be_set_transformed_node(mem_proj ? mem_proj : am.mem_proj, new_node); return new_node; } -static ir_node *try_create_SetMem(ir_node *node, ir_node *ptr, ir_node *mem) { +static ir_node *try_create_SetMem(ir_node *node, ir_node *ptr, ir_node *mem) +{ ir_mode *mode = get_irn_mode(node); ir_node *mux_true = get_Mux_true(node); ir_node *mux_false = get_Mux_false(node); @@ -2119,12 +2082,12 @@ static ir_node *try_create_SetMem(ir_node *node, ir_node *ptr, ir_node *mem) { pn_Cmp pnc; ia32_address_t addr; - if(get_mode_size_bits(mode) != 8) + if (get_mode_size_bits(mode) != 8) return NULL; - if(is_Const_1(mux_true) && is_Const_0(mux_false)) { + if (is_Const_1(mux_true) && is_Const_0(mux_false)) { negated = 0; - } else if(is_Const_0(mux_true) && is_Const_1(mux_false)) { + } else if (is_Const_0(mux_true) && is_Const_1(mux_false)) { negated = 1; } else { return NULL; @@ -2149,7 +2112,8 @@ static ir_node *try_create_SetMem(ir_node *node, ir_node *ptr, ir_node *mem) { return new_node; } -static ir_node *try_create_dest_am(ir_node *node) { +static ir_node *try_create_dest_am(ir_node *node) +{ ir_node *val = get_Store_value(node); ir_node *mem = get_Store_mem(node); ir_node *ptr = get_Store_ptr(node); @@ -2160,18 +2124,20 @@ static ir_node *try_create_dest_am(ir_node *node) { ir_node *new_node; /* handle only GP modes for now... */ - if(!ia32_mode_needs_gp_reg(mode)) + if (!ia32_mode_needs_gp_reg(mode)) return NULL; - while(1) { + for (;;) { /* store must be the only user of the val node */ - if(get_irn_n_edges(val) > 1) + if (get_irn_n_edges(val) > 1) return NULL; /* skip pointless convs */ - if(is_Conv(val)) { + if (is_Conv(val)) { ir_node *conv_op = get_Conv_op(val); ir_mode *pred_mode = get_irn_mode(conv_op); - if(pred_mode == mode_b || bits <= get_mode_size_bits(pred_mode)) { + if (!ia32_mode_needs_gp_reg(pred_mode)) + break; + if (pred_mode == mode_b || bits <= get_mode_size_bits(pred_mode)) { val = conv_op; continue; } @@ -2180,18 +2146,18 @@ static ir_node *try_create_dest_am(ir_node *node) { } /* value must be in the same block */ - if(get_nodes_block(node) != get_nodes_block(val)) + if (get_nodes_block(node) != get_nodes_block(val)) return NULL; switch (get_irn_opcode(val)) { case iro_Add: op1 = get_Add_left(val); op2 = get_Add_right(val); - if(is_Const_1(op2)) { + if (is_Const_1(op2)) { new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_IncMem); break; - } else if(is_Const_Minus_1(op2)) { + } else if (is_Const_Minus_1(op2)) { new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_DecMem); break; @@ -2204,9 +2170,8 @@ static ir_node *try_create_dest_am(ir_node *node) { case iro_Sub: op1 = get_Sub_left(val); op2 = get_Sub_right(val); - if(is_Const(op2)) { - ir_fprintf(stderr, "Optimisation warning: not-normalize sub ,C" - "found\n"); + if (is_Const(op2)) { + ir_fprintf(stderr, "Optimisation warning: not-normalized sub ,C found\n"); } new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, new_rd_ia32_SubMem, new_rd_ia32_SubMem8Bit, @@ -2283,8 +2248,8 @@ static ir_node *try_create_dest_am(ir_node *node) { return NULL; } - if(new_node != NULL) { - if(get_irn_pinned(new_node) != op_pin_state_pinned && + if (new_node != NULL) { + if (get_irn_pinned(new_node) != op_pin_state_pinned && get_irn_pinned(node) == op_pin_state_pinned) { set_irn_pinned(new_node, op_pin_state_pinned); } @@ -2293,25 +2258,21 @@ static ir_node *try_create_dest_am(ir_node *node) { return new_node; } -static int is_float_to_int32_conv(const ir_node *node) +static int is_float_to_int_conv(const ir_node *node) { ir_mode *mode = get_irn_mode(node); ir_node *conv_op; ir_mode *conv_mode; - if(get_mode_size_bits(mode) != 32 || !ia32_mode_needs_gp_reg(mode)) - return 0; - /* don't report unsigned as conv to 32bit, because we really need to do - * a vfist with 64bit signed in this case */ - if(!mode_is_signed(mode)) + if (mode != mode_Is && mode != mode_Hs) return 0; - if(!is_Conv(node)) + if (!is_Conv(node)) return 0; conv_op = get_Conv_op(node); conv_mode = get_irn_mode(conv_op); - if(!mode_is_float(conv_mode)) + if (!mode_is_float(conv_mode)) return 0; return 1; @@ -2322,78 +2283,52 @@ static int is_float_to_int32_conv(const ir_node *node) * * @return the created ia32 Store node */ -static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) { - ir_mode *mode = get_irn_mode(cns); - int size = get_mode_size_bits(mode); - tarval *tv = get_Const_tarval(cns); - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - ir_node *ptr = get_Store_ptr(node); - ir_node *mem = get_Store_mem(node); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - int ofs = 4; - ir_node *new_node; - ia32_address_t addr; - - unsigned val = get_tarval_sub_bits(tv, 0) | - (get_tarval_sub_bits(tv, 1) << 8) | - (get_tarval_sub_bits(tv, 2) << 16) | - (get_tarval_sub_bits(tv, 3) << 24); - ir_node *imm = create_Immediate(NULL, 0, val); - - /* construct store address */ - memset(&addr, 0, sizeof(addr)); - ia32_create_address_mode(&addr, ptr, /*force=*/0); - - if (addr.base == NULL) { - addr.base = noreg; - } else { - addr.base = be_transform_node(addr.base); - } - - if (addr.index == NULL) { - addr.index = noreg; - } else { - addr.index = be_transform_node(addr.index); - } - addr.mem = be_transform_node(mem); +static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) +{ + ir_mode *mode = get_irn_mode(cns); + unsigned size = get_mode_size_bytes(mode); + tarval *tv = get_Const_tarval(cns); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *ptr = get_Store_ptr(node); + ir_node *mem = get_Store_mem(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + int ofs = 0; + size_t i = 0; + ir_node *ins[4]; + ia32_address_t addr; - new_node = new_rd_ia32_Store(dbgi, irg, new_block, addr.base, - addr.index, addr.mem, imm); + assert(size % 4 == 0); + assert(size <= 16); - set_irn_pinned(new_node, get_irn_pinned(node)); - set_ia32_op_type(new_node, ia32_AddrModeD); - set_ia32_ls_mode(new_node, mode_Iu); - - set_address(new_node, &addr); + build_address_ptr(&addr, ptr, mem); - /** add more stores if needed */ - while (size > 32) { - unsigned val = get_tarval_sub_bits(tv, ofs) | - (get_tarval_sub_bits(tv, ofs + 1) << 8) | + do { + unsigned val = + get_tarval_sub_bits(tv, ofs) | + (get_tarval_sub_bits(tv, ofs + 1) << 8) | (get_tarval_sub_bits(tv, ofs + 2) << 16) | (get_tarval_sub_bits(tv, ofs + 3) << 24); ir_node *imm = create_Immediate(NULL, 0, val); - addr.offset += 4; - addr.mem = new_node; - - new_node = new_rd_ia32_Store(dbgi, irg, new_block, addr.base, + ir_node *new_node = new_rd_ia32_Store(dbgi, irg, new_block, addr.base, addr.index, addr.mem, imm); set_irn_pinned(new_node, get_irn_pinned(node)); set_ia32_op_type(new_node, ia32_AddrModeD); set_ia32_ls_mode(new_node, mode_Iu); - set_address(new_node, &addr); - size -= 32; - ofs += 4; - } + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return new_node; + ins[i++] = new_node; + + size -= 4; + ofs += 4; + addr.offset += 4; + } while (size != 0); + + return i == 1 ? ins[0] : new_rd_Sync(dbgi, irg, new_block, i, ins); } /** @@ -2482,7 +2417,7 @@ static ir_node *gen_normal_Store(ir_node *node) addr.index, addr.mem, new_val, mode); } store = new_node; - } else if (!ia32_cg_config.use_sse2 && is_float_to_int32_conv(val)) { + } else if (!ia32_cg_config.use_sse2 && is_float_to_int_conv(val)) { val = get_Conv_op(val); /* TODO: is this optimisation still necessary at all (middleend)? */ @@ -2532,7 +2467,7 @@ static ir_node *gen_Store(ir_node *node) ir_mode *mode = get_irn_mode(val); if (mode_is_float(mode) && is_Const(val)) { - int transform = 1; + int transform; /* we are storing a floating point constant */ if (ia32_cg_config.use_sse2) { @@ -2570,16 +2505,16 @@ static ir_node *create_Switch(ir_node *node) foreach_out_edge(node, edge) { ir_node *proj = get_edge_src_irn(edge); long pn = get_Proj_proj(proj); - if(pn == default_pn) + if (pn == default_pn) continue; - if(pn < switch_min) + if (pn < switch_min) switch_min = pn; - if(pn > switch_max) + if (pn > switch_max) switch_max = pn; } - if((unsigned) (switch_max - switch_min) > 256000) { + if ((unsigned) (switch_max - switch_min) > 256000) { panic("Size of switch %+F bigger than 256000", node); } @@ -2603,7 +2538,8 @@ static ir_node *create_Switch(ir_node *node) /** * Transform a Cond node. */ -static ir_node *gen_Cond(ir_node *node) { +static ir_node *gen_Cond(ir_node *node) +{ ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); ir_graph *irg = current_ir_graph; @@ -2651,14 +2587,14 @@ static ir_node *create_Fucom(ir_node *node) ir_node *new_right; ir_node *new_node; - if(ia32_cg_config.use_fucomi) { + if (ia32_cg_config.use_fucomi) { new_right = be_transform_node(right); new_node = new_rd_ia32_vFucomi(dbgi, irg, new_block, new_left, new_right, 0); set_ia32_commutative(new_node); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); } else { - if(ia32_cg_config.use_ftst && is_Const_0(right)) { + if (ia32_cg_config.use_ftst && is_Const_0(right)) { new_node = new_rd_ia32_vFtstFnstsw(dbgi, irg, new_block, new_left, 0); } else { @@ -2706,10 +2642,10 @@ static ir_node *create_Ucomi(ir_node *node) } /** - * helper function: checks wether all Cmp projs are Lg or Eq which is needed + * helper function: checks whether all Cmp projs are Lg or Eq which is needed * to fold an and into a test node */ -static int can_fold_test_and(ir_node *node) +static bool can_fold_test_and(ir_node *node) { const ir_edge_t *edge; @@ -2717,11 +2653,85 @@ static int can_fold_test_and(ir_node *node) foreach_out_edge(node, edge) { ir_node *proj = get_edge_src_irn(edge); pn_Cmp pnc = get_Proj_proj(proj); - if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) - return 0; + if (pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) + return false; } - return 1; + return true; +} + +/** + * returns true if it is assured, that the upper bits of a node are "clean" + * which means for a 16 or 8 bit value, that the upper bits in the register + * are 0 for unsigned and a copy of the last significant bit for signed + * numbers. + */ +static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode) +{ + assert(ia32_mode_needs_gp_reg(mode)); + if (get_mode_size_bits(mode) >= 32) + return true; + + if (is_Proj(transformed_node)) + return upper_bits_clean(get_Proj_pred(transformed_node), mode); + + if (is_ia32_Conv_I2I(transformed_node) + || is_ia32_Conv_I2I8Bit(transformed_node)) { + ir_mode *smaller_mode = get_ia32_ls_mode(transformed_node); + if (mode_is_signed(smaller_mode) != mode_is_signed(mode)) + return false; + if (get_mode_size_bits(smaller_mode) > get_mode_size_bits(mode)) + return false; + + return true; + } + + if (is_ia32_Shr(transformed_node) && !mode_is_signed(mode)) { + ir_node *right = get_irn_n(transformed_node, n_ia32_Shr_count); + if (is_ia32_Immediate(right) || is_ia32_Const(right)) { + const ia32_immediate_attr_t *attr + = get_ia32_immediate_attr_const(right); + if (attr->symconst == 0 + && (unsigned) attr->offset >= (32 - get_mode_size_bits(mode))) { + return true; + } + } + return upper_bits_clean(get_irn_n(transformed_node, n_ia32_Shr_val), mode); + } + + if (is_ia32_And(transformed_node) && !mode_is_signed(mode)) { + ir_node *right = get_irn_n(transformed_node, n_ia32_And_right); + if (is_ia32_Immediate(right) || is_ia32_Const(right)) { + const ia32_immediate_attr_t *attr + = get_ia32_immediate_attr_const(right); + if (attr->symconst == 0 + && (unsigned) attr->offset + <= (0xffffffff >> (32 - get_mode_size_bits(mode)))) { + return true; + } + } + /* TODO recurse? */ + } + + /* TODO recurse on Or, Xor, ... if appropriate? */ + + if (is_ia32_Immediate(transformed_node) + || is_ia32_Const(transformed_node)) { + const ia32_immediate_attr_t *attr + = get_ia32_immediate_attr_const(transformed_node); + if (mode_is_signed(mode)) { + long shifted = attr->offset >> (get_mode_size_bits(mode) - 1); + if (shifted == 0 || shifted == -1) + return true; + } else { + unsigned long shifted = (unsigned long) attr->offset; + shifted >>= get_mode_size_bits(mode); + if (shifted == 0) + return true; + } + } + + return false; } /** @@ -2741,7 +2751,7 @@ static ir_node *gen_Cmp(ir_node *node) ia32_address_t *addr = &am.addr; int cmp_unsigned; - if(mode_is_float(cmp_mode)) { + if (mode_is_float(cmp_mode)) { if (ia32_cg_config.use_sse2) { return create_Ucomi(node); } else { @@ -2760,22 +2770,35 @@ static ir_node *gen_Cmp(ir_node *node) /* Test(and_left, and_right) */ ir_node *and_left = get_And_left(left); ir_node *and_right = get_And_right(left); - ir_mode *mode = get_irn_mode(and_left); + + /* matze: code here used mode instead of cmd_mode, I think it is always + * the same as cmp_mode, but I leave this here to see if this is really + * true... + */ + assert(get_irn_mode(and_left) == cmp_mode); match_arguments(&am, block, and_left, and_right, NULL, match_commutative | match_am | match_8bit_am | match_16bit_am | match_am_and_immediates | match_immediate | match_8bit | match_16bit); - if (get_mode_size_bits(mode) == 8) { + + /* use 32bit compare mode if possible since the opcode is smaller */ + if (upper_bits_clean(am.new_op1, cmp_mode) && + upper_bits_clean(am.new_op2, cmp_mode)) { + cmp_mode = mode_is_signed(cmp_mode) ? mode_Is : mode_Iu; + } + + if (get_mode_size_bits(cmp_mode) == 8) { new_node = new_rd_ia32_Test8Bit(dbgi, irg, new_block, addr->base, - addr->index, addr->mem, am.new_op1, - am.new_op2, am.ins_permuted, - cmp_unsigned); + addr->index, addr->mem, am.new_op1, + am.new_op2, am.ins_permuted, + cmp_unsigned); } else { new_node = new_rd_ia32_Test(dbgi, irg, new_block, addr->base, - addr->index, addr->mem, am.new_op1, - am.new_op2, am.ins_permuted, cmp_unsigned); + addr->index, addr->mem, am.new_op1, + am.new_op2, am.ins_permuted, + cmp_unsigned); } } else { /* Cmp(left, right) */ @@ -2783,6 +2806,12 @@ static ir_node *gen_Cmp(ir_node *node) match_commutative | match_am | match_8bit_am | match_16bit_am | match_am_and_immediates | match_immediate | match_8bit | match_16bit); + /* use 32bit compare mode if possible since the opcode is smaller */ + if (upper_bits_clean(am.new_op1, cmp_mode) && + upper_bits_clean(am.new_op2, cmp_mode)) { + cmp_mode = mode_is_signed(cmp_mode) ? mode_Is : mode_Iu; + } + if (get_mode_size_bits(cmp_mode) == 8) { new_node = new_rd_ia32_Cmp8Bit(dbgi, irg, new_block, addr->base, addr->index, addr->mem, am.new_op1, @@ -2869,7 +2898,8 @@ static ir_node *create_set_32bit(dbg_info *dbgi, ir_node *new_block, /** * Create instruction for an unsigned Difference or Zero. */ -static ir_node *create_Doz(ir_node *psi, ir_node *a, ir_node *b) { +static ir_node *create_Doz(ir_node *psi, ir_node *a, ir_node *b) +{ ir_graph *irg = current_ir_graph; ir_mode *mode = get_irn_mode(psi); ir_node *new_node, *sub, *sbb, *eflags, *block, *noreg, *tmpreg, *nomem; @@ -3003,7 +3033,8 @@ need_cmov: /** * Create a conversion from x87 state register to general purpose. */ -static ir_node *gen_x87_fp_to_gp(ir_node *node) { +static ir_node *gen_x87_fp_to_gp(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *op = get_Conv_op(node); ir_node *new_op = be_transform_node(op); @@ -3022,7 +3053,7 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) { assert(get_mode_size_bits(mode) <= 32); /* exception we can only store signed 32 bit integers, so for unsigned we store a 64bit (signed) integer and load the lower bits */ - if(get_mode_size_bits(mode) == 32 && !mode_is_signed(mode)) { + if (get_mode_size_bits(mode) == 32 && !mode_is_signed(mode)) { set_ia32_ls_mode(fist, mode_Ls); } else { set_ia32_ls_mode(fist, mode_Is); @@ -3036,7 +3067,7 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) { set_ia32_use_frame(load); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_ls_mode(load, mode_Is); - if(get_ia32_ls_mode(fist) == mode_Ls) { + if (get_ia32_ls_mode(fist) == mode_Ls) { ia32_attr_t *attr = get_ia32_attr(load); attr->data.need_64bit_stackent = 1; } else { @@ -3081,7 +3112,8 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) /** * Create a conversion from general purpose to x87 register */ -static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { +static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) +{ ir_node *src_block = get_nodes_block(node); ir_node *block = be_transform_node(src_block); ir_graph *irg = current_ir_graph; @@ -3097,12 +3129,12 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { ir_node *new_node; int src_bits; - /* fild can use source AM if the operand is a signed 32bit integer */ - if (src_mode == mode_Is) { + /* fild can use source AM if the operand is a signed 16bit or 32bit integer */ + if (src_mode == mode_Is || src_mode == mode_Hs) { ia32_address_mode_t am; match_arguments(&am, src_block, NULL, op, NULL, - match_am | match_try_am); + match_am | match_try_am | match_16bit | match_16bit_am); if (am.op_type == ia32_AddrModeS) { ia32_address_t *addr = &am.addr; @@ -3119,7 +3151,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { return new_node; } } - if(new_op == NULL) { + if (new_op == NULL) { new_op = be_transform_node(op); } @@ -3152,7 +3184,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { set_ia32_ls_mode(store, mode_Iu); /* exception for 32bit unsigned, do a 64bit spill+load */ - if(!mode_is_signed(mode)) { + if (!mode_is_signed(mode)) { ir_node *in[2]; /* store a zero */ ir_node *zero_const = create_Immediate(NULL, 0, 0); @@ -3214,7 +3246,7 @@ static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, } #ifdef DEBUG_libfirm - if(is_Const(op)) { + if (is_Const(op)) { ir_fprintf(stderr, "Optimisation warning: conv after constant %+F\n", op); } @@ -3223,6 +3255,17 @@ static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, match_arguments(&am, block, NULL, op, NULL, match_8bit | match_16bit | match_am | match_8bit_am | match_16bit_am); + + if (upper_bits_clean(am.new_op2, smaller_mode)) { + /* unnecessary conv. in theory it shouldn't have been AM */ + assert(is_ia32_NoReg_GP(addr->base)); + assert(is_ia32_NoReg_GP(addr->index)); + assert(is_NoMem(addr->mem)); + assert(am.addr.offset == 0); + assert(am.addr.symconst_ent == NULL); + return am.new_op2; + } + if (smaller_bits == 8) { new_node = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, addr->base, addr->index, addr->mem, am.new_op2, @@ -3246,7 +3289,8 @@ static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, * * @return The created ia32 Conv node */ -static ir_node *gen_Conv(ir_node *node) { +static ir_node *gen_Conv(ir_node *node) +{ ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); ir_node *op = get_Conv_op(node); @@ -3285,7 +3329,7 @@ static ir_node *gen_Conv(ir_node *node) { new_op = be_transform_node(op); /* we convert from float ... */ if (mode_is_float(tgt_mode)) { - if(src_mode == mode_E && tgt_mode == mode_D + if (src_mode == mode_E && tgt_mode == mode_D && !get_Conv_strict(node)) { DB((dbg, LEVEL_1, "killed Conv(mode, mode) ...")); return new_op; @@ -3298,7 +3342,7 @@ static ir_node *gen_Conv(ir_node *node) { nomem, new_op); set_ia32_ls_mode(res, tgt_mode); } else { - if(get_Conv_strict(node)) { + if (get_Conv_strict(node)) { res = gen_x87_strict_conv(tgt_mode, new_op); SET_IA32_ORIG_NODE(get_Proj_pred(res), ia32_get_old_node_name(env_cg, node)); return res; @@ -3329,7 +3373,7 @@ static ir_node *gen_Conv(ir_node *node) { set_ia32_ls_mode(res, tgt_mode); } else { res = gen_x87_gp_to_fp(node, src_mode); - if(get_Conv_strict(node)) { + if (get_Conv_strict(node)) { /* The strict-Conv is only necessary, if the int mode has more bits * than the float mantissa */ size_t int_mantissa = get_mode_size_bits(src_mode) - (mode_is_signed(src_mode) ? 1 : 0); @@ -3338,7 +3382,8 @@ static ir_node *gen_Conv(ir_node *node) { switch (get_mode_size_bits(tgt_mode)) { case 32: float_mantissa = 23 + 1; break; // + 1 for implicit 1 case 64: float_mantissa = 52 + 1; break; - case 80: float_mantissa = 64 + 1; break; + case 80: + case 96: float_mantissa = 64; break; default: float_mantissa = 0; break; } if (float_mantissa < int_mantissa) { @@ -3348,7 +3393,7 @@ static ir_node *gen_Conv(ir_node *node) { } return res; } - } else if(tgt_mode == mode_b) { + } else if (tgt_mode == mode_b) { /* mode_b lowering already took care that we only have 0/1 values */ DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", src_mode, tgt_mode)); @@ -3382,7 +3427,8 @@ static ir_node *create_immediate_or_transform(ir_node *node, /** * Transforms a FrameAddr into an ia32 Add. */ -static ir_node *gen_be_FrameAddr(ir_node *node) { +static ir_node *gen_be_FrameAddr(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *op = be_get_FrameAddr_frame(node); ir_node *new_op = be_transform_node(op); @@ -3392,7 +3438,7 @@ static ir_node *gen_be_FrameAddr(ir_node *node) { ir_node *new_node; new_node = new_rd_ia32_Lea(dbgi, irg, block, new_op, noreg); - set_ia32_frame_ent(new_node, arch_get_frame_entity(env_cg->arch_env, node)); + set_ia32_frame_ent(new_node, arch_get_frame_entity(node)); set_ia32_use_frame(new_node); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); @@ -3403,7 +3449,8 @@ static ir_node *gen_be_FrameAddr(ir_node *node) { /** * In case SSE is used we need to copy the result from XMM0 to FPU TOS before return. */ -static ir_node *gen_be_Return(ir_node *node) { +static ir_node *gen_be_Return(ir_node *node) +{ ir_graph *irg = current_ir_graph; ir_node *ret_val = get_irn_n(node, be_pos_Return_val); ir_node *ret_mem = get_irn_n(node, be_pos_Return_mem); @@ -3496,7 +3543,6 @@ static ir_node *gen_be_Return(ir_node *node) { copy_node_attr(barrier, new_barrier); be_duplicate_deps(barrier, new_barrier); be_set_transformed_node(barrier, new_barrier); - mark_irn_visited(barrier); /* transform normally */ return be_duplicate_node(node); @@ -3510,7 +3556,8 @@ static ir_node *gen_be_AddSP(ir_node *node) ir_node *sz = get_irn_n(node, be_pos_AddSP_size); ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp); - return gen_binop(node, sp, sz, new_rd_ia32_SubSP, match_am); + return gen_binop(node, sp, sz, new_rd_ia32_SubSP, + match_am | match_immediate); } /** @@ -3521,25 +3568,27 @@ static ir_node *gen_be_SubSP(ir_node *node) ir_node *sz = get_irn_n(node, be_pos_SubSP_size); ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp); - return gen_binop(node, sp, sz, new_rd_ia32_AddSP, match_am); + return gen_binop(node, sp, sz, new_rd_ia32_AddSP, + match_am | match_immediate); } /** * Change some phi modes */ -static ir_node *gen_Phi(ir_node *node) { +static ir_node *gen_Phi(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_irn_mode(node); ir_node *phi; - if(ia32_mode_needs_gp_reg(mode)) { + if (ia32_mode_needs_gp_reg(mode)) { /* we shouldn't have any 64bit stuff around anymore */ assert(get_mode_size_bits(mode) <= 32); /* all integer operations are on 32bit registers now */ mode = mode_Iu; - } else if(mode_is_float(mode)) { + } else if (mode_is_float(mode)) { if (ia32_cg_config.use_sse2) { mode = mode_xmm; } else { @@ -3554,7 +3603,6 @@ static ir_node *gen_Phi(ir_node *node) { copy_node_attr(node, phi); be_duplicate_deps(node, phi); - be_set_transformed_node(node, phi); be_enqueue_preds(node); return phi; @@ -3625,81 +3673,6 @@ static ir_node *gen_Bound(ir_node *node) } -typedef ir_node *construct_load_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \ - ir_node *mem); - -typedef ir_node *construct_store_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \ - ir_node *val, ir_node *mem); - -/** - * Transforms a lowered Load into a "real" one. - */ -static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func) -{ - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *ptr = get_irn_n(node, 0); - ir_node *new_ptr = be_transform_node(ptr); - ir_node *mem = get_irn_n(node, 1); - ir_node *new_mem = be_transform_node(mem); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_ia32_ls_mode(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *new_op; - - new_op = func(dbgi, irg, block, new_ptr, noreg, new_mem); - - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_offs_int(new_op, get_ia32_am_offs_int(node)); - set_ia32_am_scale(new_op, get_ia32_am_scale(node)); - set_ia32_am_sc(new_op, get_ia32_am_sc(node)); - if (is_ia32_am_sc_sign(node)) - set_ia32_am_sc_sign(new_op); - set_ia32_ls_mode(new_op, mode); - if (is_ia32_use_frame(node)) { - set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); - set_ia32_use_frame(new_op); - } - - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - - return new_op; -} - -/** - * Transforms a lowered Store into a "real" one. - */ -static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func) -{ - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *ptr = get_irn_n(node, 0); - ir_node *new_ptr = be_transform_node(ptr); - ir_node *val = get_irn_n(node, 1); - ir_node *new_val = be_transform_node(val); - ir_node *mem = get_irn_n(node, 2); - ir_node *new_mem = be_transform_node(mem); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_mode *mode = get_ia32_ls_mode(node); - ir_node *new_op; - long am_offs; - - new_op = func(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); - - am_offs = get_ia32_am_offs_int(node); - add_ia32_am_offs_int(new_op, am_offs); - - set_ia32_op_type(new_op, ia32_AddrModeD); - set_ia32_ls_mode(new_op, mode); - set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); - set_ia32_use_frame(new_op); - - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - - return new_op; -} - static ir_node *gen_ia32_l_ShlDep(ir_node *node) { ir_node *left = get_irn_n(node, n_ia32_l_ShlDep_val); @@ -3725,14 +3698,15 @@ static ir_node *gen_ia32_l_SarDep(ir_node *node) match_immediate); } -static ir_node *gen_ia32_l_Add(ir_node *node) { +static ir_node *gen_ia32_l_Add(ir_node *node) +{ ir_node *left = get_irn_n(node, n_ia32_l_Add_left); ir_node *right = get_irn_n(node, n_ia32_l_Add_right); ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Add, match_commutative | match_am | match_immediate | match_mode_neutral); - if(is_Proj(lowered)) { + if (is_Proj(lowered)) { lowered = get_Proj_pred(lowered); } else { assert(is_ia32_Add(lowered)); @@ -3749,77 +3723,13 @@ static ir_node *gen_ia32_l_Adc(ir_node *node) match_mode_neutral); } -/** - * Transforms an ia32_l_vfild into a "real" ia32_vfild node - * - * @param node The node to transform - * @return the created ia32 vfild node - */ -static ir_node *gen_ia32_l_vfild(ir_node *node) { - return gen_lowered_Load(node, new_rd_ia32_vfild); -} - -/** - * Transforms an ia32_l_Load into a "real" ia32_Load node - * - * @param node The node to transform - * @return the created ia32 Load node - */ -static ir_node *gen_ia32_l_Load(ir_node *node) { - return gen_lowered_Load(node, new_rd_ia32_Load); -} - -/** - * Transforms an ia32_l_Store into a "real" ia32_Store node - * - * @param node The node to transform - * @return the created ia32 Store node - */ -static ir_node *gen_ia32_l_Store(ir_node *node) { - return gen_lowered_Store(node, new_rd_ia32_Store); -} - -/** - * Transforms a l_vfist into a "real" vfist node. - * - * @param node The node to transform - * @return the created ia32 vfist node - */ -static ir_node *gen_ia32_l_vfist(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *ptr = get_irn_n(node, 0); - ir_node *new_ptr = be_transform_node(ptr); - ir_node *val = get_irn_n(node, 1); - ir_node *new_val = be_transform_node(val); - ir_node *mem = get_irn_n(node, 2); - ir_node *new_mem = be_transform_node(mem); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_mode *mode = get_ia32_ls_mode(node); - ir_node *memres, *fist; - long am_offs; - - memres = gen_vfist(dbgi, irg, block, new_ptr, noreg, new_mem, new_val, &fist); - am_offs = get_ia32_am_offs_int(node); - add_ia32_am_offs_int(fist, am_offs); - - set_ia32_op_type(fist, ia32_AddrModeD); - set_ia32_ls_mode(fist, mode); - set_ia32_frame_ent(fist, get_ia32_frame_ent(node)); - set_ia32_use_frame(fist); - - SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(env_cg, node)); - - return memres; -} - /** * Transforms a l_MulS into a "real" MulS node. * * @return the created ia32 Mul node */ -static ir_node *gen_ia32_l_Mul(ir_node *node) { +static ir_node *gen_ia32_l_Mul(ir_node *node) +{ ir_node *left = get_binop_left(node); ir_node *right = get_binop_right(node); @@ -3832,7 +3742,8 @@ static ir_node *gen_ia32_l_Mul(ir_node *node) { * * @return the created ia32 IMul1OP node */ -static ir_node *gen_ia32_l_IMul(ir_node *node) { +static ir_node *gen_ia32_l_IMul(ir_node *node) +{ ir_node *left = get_binop_left(node); ir_node *right = get_binop_right(node); @@ -3840,13 +3751,14 @@ static ir_node *gen_ia32_l_IMul(ir_node *node) { match_commutative | match_am | match_mode_neutral); } -static ir_node *gen_ia32_l_Sub(ir_node *node) { +static ir_node *gen_ia32_l_Sub(ir_node *node) +{ ir_node *left = get_irn_n(node, n_ia32_l_Sub_minuend); ir_node *right = get_irn_n(node, n_ia32_l_Sub_subtrahend); ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Sub, match_am | match_immediate | match_mode_neutral); - if(is_Proj(lowered)) { + if (is_Proj(lowered)) { lowered = get_Proj_pred(lowered); } else { assert(is_ia32_Sub(lowered)); @@ -3856,7 +3768,8 @@ static ir_node *gen_ia32_l_Sub(ir_node *node) { return lowered; } -static ir_node *gen_ia32_l_Sbb(ir_node *node) { +static ir_node *gen_ia32_l_Sbb(ir_node *node) +{ return gen_binop_flags(node, new_rd_ia32_Sbb, match_am | match_immediate | match_mode_neutral); } @@ -3882,7 +3795,9 @@ static ir_node *gen_lowered_64bit_shifts(ir_node *node, ir_node *high, /* the shift amount can be any mode that is bigger than 5 bits, since all * other bits are ignored anyway */ - while (is_Conv(count) && get_irn_n_edges(count) == 1) { + while (is_Conv(count) && + get_irn_n_edges(count) == 1 && + mode_is_int(get_irn_mode(count))) { assert(get_mode_size_bits(get_irn_mode(count)) >= 5); count = get_Conv_op(count); } @@ -3916,7 +3831,8 @@ static ir_node *gen_ia32_l_ShrD(ir_node *node) return gen_lowered_64bit_shifts(node, high, low, count); } -static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) { +static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) +{ ir_node *src_block = get_nodes_block(node); ir_node *block = be_transform_node(src_block); ir_graph *irg = current_ir_graph; @@ -3934,7 +3850,7 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) { ir_node *store_low; ir_node *store_high; - if(!mode_is_signed(get_irn_mode(val_high))) { + if (!mode_is_signed(get_irn_mode(val_high))) { panic("unsigned long long -> float not supported yet (%+F)", node); } @@ -3970,7 +3886,8 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) { return new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); } -static ir_node *gen_ia32_l_FloattoLL(ir_node *node) { +static ir_node *gen_ia32_l_FloattoLL(ir_node *node) +{ ir_node *src_block = get_nodes_block(node); ir_node *block = be_transform_node(src_block); ir_graph *irg = current_ir_graph; @@ -3994,12 +3911,14 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node) { /** * the BAD transformer. */ -static ir_node *bad_transform(ir_node *node) { +static ir_node *bad_transform(ir_node *node) +{ panic("No transform function for %+F available.", node); return NULL; } -static ir_node *gen_Proj_l_FloattoLL(ir_node *node) { +static ir_node *gen_Proj_l_FloattoLL(ir_node *node) +{ ir_graph *irg = current_ir_graph; ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); @@ -4036,7 +3955,8 @@ static ir_node *gen_Proj_l_FloattoLL(ir_node *node) { /** * Transform the Projs of an AddSP. */ -static ir_node *gen_Proj_be_AddSP(ir_node *node) { +static ir_node *gen_Proj_be_AddSP(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); @@ -4047,23 +3967,23 @@ static ir_node *gen_Proj_be_AddSP(ir_node *node) { if (proj == pn_be_AddSP_sp) { ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_SubSP_stack); - arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_ESP]); + arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]); return res; - } else if(proj == pn_be_AddSP_res) { + } else if (proj == pn_be_AddSP_res) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_SubSP_addr); } else if (proj == pn_be_AddSP_M) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_SubSP_M); } - assert(0); - return new_rd_Unknown(irg, get_irn_mode(node)); + panic("No idea how to transform proj->AddSP"); } /** * Transform the Projs of a SubSP. */ -static ir_node *gen_Proj_be_SubSP(ir_node *node) { +static ir_node *gen_Proj_be_SubSP(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); @@ -4074,20 +3994,20 @@ static ir_node *gen_Proj_be_SubSP(ir_node *node) { if (proj == pn_be_SubSP_sp) { ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack); - arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_ESP]); + arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]); return res; } else if (proj == pn_be_SubSP_M) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_AddSP_M); } - assert(0); - return new_rd_Unknown(irg, get_irn_mode(node)); + panic("No idea how to transform proj->SubSP"); } /** * Transform and renumber the Projs from a Load. */ -static ir_node *gen_Proj_Load(ir_node *node) { +static ir_node *gen_Proj_Load(ir_node *node) +{ ir_node *new_pred; ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); @@ -4179,20 +4099,19 @@ static ir_node *gen_Proj_Load(ir_node *node) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, 1); } - assert(0); - return new_rd_Unknown(irg, get_irn_mode(node)); + panic("No idea how to transform proj"); } /** * Transform and renumber the Projs from a DivMod like instruction. */ -static ir_node *gen_Proj_DivMod(ir_node *node) { +static ir_node *gen_Proj_DivMod(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); long proj = get_Proj_proj(node); assert(is_ia32_Div(new_pred) || is_ia32_IDiv(new_pred)); @@ -4247,23 +4166,22 @@ static ir_node *gen_Proj_DivMod(ir_node *node) { break; } - assert(0); - return new_rd_Unknown(irg, mode); + panic("No idea how to transform proj->DivMod"); } /** * Transform and renumber the Projs from a CopyB. */ -static ir_node *gen_Proj_CopyB(ir_node *node) { +static ir_node *gen_Proj_CopyB(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); long proj = get_Proj_proj(node); - switch(proj) { + switch (proj) { case pn_CopyB_M_regular: if (is_ia32_CopyB_i(new_pred)) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_i_M); @@ -4275,23 +4193,22 @@ static ir_node *gen_Proj_CopyB(ir_node *node) { break; } - assert(0); - return new_rd_Unknown(irg, mode); + panic("No idea how to transform proj->CopyB"); } /** * Transform and renumber the Projs from a Quot. */ -static ir_node *gen_Proj_Quot(ir_node *node) { +static ir_node *gen_Proj_Quot(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); long proj = get_Proj_proj(node); - switch(proj) { + switch (proj) { case pn_Quot_M: if (is_ia32_xDiv(new_pred)) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xDiv_M); @@ -4312,18 +4229,33 @@ static ir_node *gen_Proj_Quot(ir_node *node) { break; } - assert(0); - return new_rd_Unknown(irg, mode); + panic("No idea how to transform proj->Quot"); } -static ir_node *gen_be_Call(ir_node *node) { - ir_node *res = be_duplicate_node(node); - ir_type *call_tp; - - be_node_add_flags(res, -1, arch_irn_flags_modify_flags); +static ir_node *gen_be_Call(ir_node *node) +{ + dbg_info *const dbgi = get_irn_dbg_info(node); + ir_graph *const irg = current_ir_graph; + ir_node *const src_block = get_nodes_block(node); + ir_node *const block = be_transform_node(src_block); + ir_node *const src_mem = get_irn_n(node, be_pos_Call_mem); + ir_node *const src_sp = get_irn_n(node, be_pos_Call_sp); + ir_node *const sp = be_transform_node(src_sp); + ir_node *const src_ptr = get_irn_n(node, be_pos_Call_ptr); + ir_node *const noreg = ia32_new_NoReg_gp(env_cg); + ia32_address_mode_t am; + ia32_address_t *const addr = &am.addr; + ir_node * mem; + ir_node * call; + int i; + ir_node * fpcw; + ir_node * eax = noreg; + ir_node * ecx = noreg; + ir_node * edx = noreg; + unsigned const pop = be_Call_get_pop(node); + ir_type *const call_tp = be_Call_get_type(node); /* Run the x87 simulator if the call returns a float value */ - call_tp = be_Call_get_type(node); if (get_method_n_ress(call_tp) > 0) { ir_type *const res_type = get_method_res_type(call_tp, 0); ir_mode *const res_mode = get_type_mode(res_type); @@ -4333,10 +4265,44 @@ static ir_node *gen_be_Call(ir_node *node) { } } - return res; + /* We do not want be_Call direct calls */ + assert(be_Call_get_entity(node) == NULL); + + match_arguments(&am, src_block, NULL, src_ptr, src_mem, + match_am | match_immediate); + + i = get_irn_arity(node) - 1; + fpcw = be_transform_node(get_irn_n(node, i--)); + for (; i >= be_pos_Call_first_arg; --i) { + arch_register_req_t const *const req = arch_get_register_req(node, i); + ir_node *const reg_parm = be_transform_node(get_irn_n(node, i)); + + assert(req->type == arch_register_req_type_limited); + assert(req->cls == &ia32_reg_classes[CLASS_ia32_gp]); + + switch (*req->limited) { + case 1 << REG_EAX: assert(eax == noreg); eax = reg_parm; break; + case 1 << REG_ECX: assert(ecx == noreg); ecx = reg_parm; break; + case 1 << REG_EDX: assert(edx == noreg); edx = reg_parm; break; + default: panic("Invalid GP register for register parameter"); + } + } + + mem = transform_AM_mem(irg, block, src_ptr, src_mem, addr->mem); + call = new_rd_ia32_Call(dbgi, irg, block, addr->base, addr->index, mem, + am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp); + set_am_attributes(call, &am); + call = fix_mem_proj(call, &am); + + if (get_irn_pinned(node) == op_pin_state_pinned) + set_irn_pinned(call, op_pin_state_pinned); + + SET_IA32_ORIG_NODE(call, ia32_get_old_node_name(env_cg, node)); + return call; } -static ir_node *gen_be_IncSP(ir_node *node) { +static ir_node *gen_be_IncSP(ir_node *node) +{ ir_node *res = be_duplicate_node(node); be_node_add_flags(res, -1, arch_irn_flags_modify_flags); @@ -4346,7 +4312,8 @@ static ir_node *gen_be_IncSP(ir_node *node) { /** * Transform the Projs from a be_Call. */ -static ir_node *gen_Proj_be_Call(ir_node *node) { +static ir_node *gen_Proj_be_Call(ir_node *node) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *call = get_Proj_pred(node); ir_node *new_call = be_transform_node(call); @@ -4358,6 +4325,7 @@ static ir_node *gen_Proj_be_Call(ir_node *node) { ir_mode *mode = get_irn_mode(node); ir_node *sse_load; const arch_register_class_t *cls; + ir_node *res; /* The following is kinda tricky: If we're using SSE, then we have to * move the result value of the call in floating point registers to an @@ -4376,9 +4344,9 @@ static ir_node *gen_Proj_be_Call(ir_node *node) { call_res_pred = get_Proj_pred(call_res_new); } - if (call_res_pred == NULL || be_is_Call(call_res_pred)) { + if (call_res_pred == NULL || is_ia32_Call(call_res_pred)) { return new_rd_Proj(dbgi, irg, block, new_call, mode_M, - pn_be_Call_M_regular); + n_ia32_Call_mem); } else { assert(is_ia32_xLoad(call_res_pred)); return new_rd_Proj(dbgi, irg, block, call_res_pred, mode_M, @@ -4421,11 +4389,51 @@ static ir_node *gen_Proj_be_Call(ir_node *node) { /* transform call modes */ if (mode_is_data(mode)) { - cls = arch_get_irn_reg_class(env_cg->arch_env, node, -1); + cls = arch_get_irn_reg_class(node, -1); mode = cls->mode; } - return new_rd_Proj(dbgi, irg, block, new_call, mode, proj); + /* Map from be_Call to ia32_Call proj number */ + if (proj == pn_be_Call_sp) { + proj = pn_ia32_Call_stack; + } else if (proj == pn_be_Call_M_regular) { + proj = pn_ia32_Call_M; + } else { + arch_register_req_t const *const req = arch_get_register_req(node, BE_OUT_POS(proj)); + int const n_outs = get_ia32_n_res(new_call); + int i; + + assert(proj >= pn_be_Call_first_res); + assert(req->type == arch_register_req_type_limited); + + for (i = 0; i < n_outs; ++i) { + arch_register_req_t const *const new_req = get_ia32_out_req(new_call, i); + + if (new_req->type != arch_register_req_type_limited || + new_req->cls != req->cls || + *new_req->limited != *req->limited) + continue; + + proj = i; + break; + } + assert(i < n_outs); + } + + res = new_rd_Proj(dbgi, irg, block, new_call, mode, proj); + + /* TODO arch_set_irn_register() only operates on Projs, need variant with index */ + switch (proj) { + case pn_ia32_Call_stack: + arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]); + break; + + case pn_ia32_Call_fpcw: + arch_set_irn_register(res, &ia32_fp_cw_regs[REG_FPCW]); + break; + } + + return res; } /** @@ -4483,7 +4491,8 @@ static ir_node *gen_Proj_ASM(ir_node *node) /** * Transform and potentially renumber Proj nodes. */ -static ir_node *gen_Proj(ir_node *node) { +static ir_node *gen_Proj(ir_node *node) +{ ir_node *pred = get_Proj_pred(node); long proj; @@ -4493,8 +4502,7 @@ static ir_node *gen_Proj(ir_node *node) { if (proj == pn_Store_M) { return be_transform_node(pred); } else { - assert(0); - return new_r_Bad(current_ir_graph); + panic("No idea how to transform proj->Store"); } case iro_Load: return gen_Proj_Load(node); @@ -4520,18 +4528,19 @@ static ir_node *gen_Proj(ir_node *node) { return gen_Proj_Bound(node); case iro_Start: proj = get_Proj_proj(node); - if (proj == pn_Start_X_initial_exec) { - ir_node *block = get_nodes_block(pred); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *jump; - - /* we exchange the ProjX with a jump */ - block = be_transform_node(block); - jump = new_rd_Jmp(dbgi, current_ir_graph, block); - return jump; - } - if (node == be_get_old_anchor(anchor_tls)) { - return gen_Proj_tls(node); + switch (proj) { + case pn_Start_X_initial_exec: { + ir_node *block = get_nodes_block(pred); + ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + /* we exchange the ProjX with a jump */ + ir_node *jump = new_rd_Jmp(dbgi, current_ir_graph, new_block); + + return jump; + } + + case pn_Start_P_tls: + return gen_Proj_tls(node); } break; @@ -4620,10 +4629,6 @@ static void register_transformers(void) GEN(ia32_l_ShrD); GEN(ia32_l_Sub); GEN(ia32_l_Sbb); - GEN(ia32_l_vfild); - GEN(ia32_l_Load); - GEN(ia32_l_vfist); - GEN(ia32_l_Store); GEN(ia32_l_LLtoFloat); GEN(ia32_l_FloattoLL); @@ -4666,8 +4671,9 @@ static void register_transformers(void) /** * Pre-transform all unknown and noreg nodes. */ -static void ia32_pretransform_node(void *arch_cg) { - ia32_code_gen_t *cg = arch_cg; +static void ia32_pretransform_node(void) +{ + ia32_code_gen_t *cg = env_cg; cg->unknown_gp = be_pre_transform_node(cg->unknown_gp); cg->unknown_vfp = be_pre_transform_node(cg->unknown_vfp); @@ -4690,25 +4696,30 @@ static void add_missing_keep_walker(ir_node *node, void *data) ir_mode *mode = get_irn_mode(node); ir_node *last_keep; (void) data; - if(mode != mode_T) + if (mode != mode_T) return; - if(!is_ia32_irn(node)) + if (!is_ia32_irn(node)) return; n_outs = get_ia32_n_res(node); - if(n_outs <= 0) + if (n_outs <= 0) return; - if(is_ia32_SwitchJmp(node)) + if (is_ia32_SwitchJmp(node)) return; assert(n_outs < (int) sizeof(unsigned) * 8); foreach_out_edge(node, edge) { ir_node *proj = get_edge_src_irn(edge); - int pn = get_Proj_proj(proj); + int pn; + + /* The node could be kept */ + if (is_End(proj)) + continue; if (get_irn_mode(proj) == mode_M) continue; + pn = get_Proj_proj(proj); assert(pn < n_outs); found_projs |= 1 << pn; } @@ -4716,33 +4727,33 @@ static void add_missing_keep_walker(ir_node *node, void *data) /* are keeps missing? */ last_keep = NULL; - for(i = 0; i < n_outs; ++i) { + for (i = 0; i < n_outs; ++i) { ir_node *block; ir_node *in[1]; const arch_register_req_t *req; const arch_register_class_t *cls; - if(found_projs & (1 << i)) { + if (found_projs & (1 << i)) { continue; } req = get_ia32_out_req(node, i); cls = req->cls; - if(cls == NULL) { + if (cls == NULL) { continue; } - if(cls == &ia32_reg_classes[CLASS_ia32_flags]) { + if (cls == &ia32_reg_classes[CLASS_ia32_flags]) { continue; } block = get_nodes_block(node); in[0] = new_r_Proj(current_ir_graph, block, node, arch_register_class_mode(cls), i); - if(last_keep != NULL) { + if (last_keep != NULL) { be_Keep_add_node(last_keep, cls, in[0]); } else { last_keep = be_new_Keep(cls, current_ir_graph, block, 1, in); - if(sched_is_scheduled(node)) { + if (sched_is_scheduled(node)) { sched_add_after(node, last_keep); } } @@ -4760,16 +4771,16 @@ void ia32_add_missing_keeps(ia32_code_gen_t *cg) } /* do the transformation */ -void ia32_transform_graph(ia32_code_gen_t *cg) { +void ia32_transform_graph(ia32_code_gen_t *cg) +{ int cse_last; - ir_graph *irg = cg->irg; register_transformers(); env_cg = cg; initial_fpcw = NULL; BE_TIMER_PUSH(t_heights); - heights = heights_new(irg); + heights = heights_new(cg->irg); BE_TIMER_POP(t_heights); ia32_calculate_non_address_mode_nodes(cg->birg); @@ -4778,7 +4789,7 @@ void ia32_transform_graph(ia32_code_gen_t *cg) { cse_last = get_opt_cse(); set_opt_cse(0); - be_transform_graph(cg->birg, ia32_pretransform_node, cg); + be_transform_graph(cg->birg, ia32_pretransform_node); set_opt_cse(cse_last);