X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;ds=sidebyside;f=ir%2Fbe%2Fia32%2Fbearch_ia32.c;h=5a3dfe128b4e4e6b5d75d330ef286722abbc50b9;hb=323267da3fcfb2a3029b19e17008645055d86590;hp=8f0594cb38aa3d1f5afb735db683f2849f3a2630;hpb=b334cdb542bb13dac5b787f196b7316c0ea3e630;p=libfirm diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index 8f0594cb3..5a3dfe128 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -21,6 +21,8 @@ #include #endif /* WITH_LIBCORE */ +#include + #include "pseudo_irg.h" #include "irgwalk.h" #include "irprog.h" @@ -29,8 +31,8 @@ #include "ircons.h" #include "irgmod.h" #include "irgopt.h" - -#include "bitset.h" +#include "irbitset.h" +#include "pdeq.h" #include "debug.h" #include "../beabi.h" /* the general register allocator interface */ @@ -38,6 +40,10 @@ #include "../belower.h" #include "../besched_t.h" #include "../be.h" +#include "../be_t.h" +#include "../beirgmod.h" +#include "../be_dbgout.h" +#include "../beblocksched.h" #include "bearch_ia32_t.h" #include "ia32_new_nodes.h" /* ia32 nodes interface */ @@ -49,15 +55,14 @@ #include "ia32_optimize.h" #include "ia32_x87.h" #include "ia32_dbg_stat.h" +#include "ia32_finish.h" +#include "ia32_util.h" #define DEBUG_MODULE "firm.be.ia32.isa" /* TODO: ugly */ static set *cur_reg_set = NULL; -#undef is_Start -#define is_Start(irn) (get_irn_opcode(irn) == iro_Start) - /* Creates the unique per irg GP NoReg node. */ ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) { return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]); @@ -69,6 +74,21 @@ ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) { USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]); } +/** + * Returns gp_noreg or fp_noreg, depending in input requirements. + */ +ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) { + arch_register_req_t req; + const arch_register_req_t *p_req; + + p_req = arch_get_register_req(cg->arch_env, &req, irn, pos); + assert(p_req && "Missing register requirements"); + if (p_req->cls == &ia32_reg_classes[CLASS_ia32_gp]) + return ia32_new_NoReg_gp(cg); + else + return ia32_new_NoReg_fp(cg); +} + /************************************************** * _ _ _ __ * | | | (_)/ _| @@ -80,13 +100,6 @@ ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) { * |___/ **************************************************/ -static ir_node *my_skip_proj(const ir_node *n) { - while (is_Proj(n)) - n = get_Proj_pred(n); - return (ir_node *)n; -} - - /** * Return register requirements for an ia32 node. * If the node returns a tuple (mode_T) then the proj's @@ -112,25 +125,19 @@ static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_re DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn)); if (is_Proj(irn)) { - if (pos == -1) { - node_pos = ia32_translate_proj_pos(irn); - } - else { - node_pos = pos; + if(pos >= 0) { + DBG((mod, LEVEL_1, "ignoring request IN requirements for node %+F\n", irn)); + return NULL; } - irn = my_skip_proj(irn); + node_pos = (pos == -1) ? get_Proj_proj(irn) : pos; + irn = skip_Proj(irn); DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos)); } if (is_ia32_irn(irn)) { - if (pos >= 0) { - irn_req = get_ia32_in_req(irn, pos); - } - else { - irn_req = get_ia32_out_req(irn, node_pos); - } + irn_req = (pos >= 0) ? get_ia32_in_req(irn, pos) : get_ia32_out_req(irn, node_pos); DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos)); @@ -185,8 +192,8 @@ static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn)); if (is_Proj(irn)) { - pos = ia32_translate_proj_pos(irn); - irn = my_skip_proj(irn); + pos = get_Proj_proj(irn); + irn = skip_Proj(irn); } if (is_ia32_irn(irn)) { @@ -210,14 +217,19 @@ static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node * return NULL; } - pos = ia32_translate_proj_pos(irn); - irn = my_skip_proj(irn); + pos = get_Proj_proj(irn); + irn = skip_Proj(irn); } if (is_ia32_irn(irn)) { - const arch_register_t **slots; - slots = get_ia32_slots(irn); - reg = slots[pos]; + /* retrieve "real" x87 register */ + if (ia32_has_x87_register(irn)) + reg = get_ia32_attr(irn)->x87[pos + 2]; + else { + const arch_register_t **slots; + slots = get_ia32_slots(irn); + reg = slots[pos]; + } } else { reg = ia32_get_firm_reg(irn, cur_reg_set); @@ -227,50 +239,89 @@ static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node * } static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) { - irn = my_skip_proj(irn); + arch_irn_class_t classification = arch_irn_class_normal; + + irn = skip_Proj(irn); + if (is_cfop(irn)) - return arch_irn_class_branch; - else if (is_ia32_Cnst(irn)) - return arch_irn_class_const; - else if (is_ia32_Ld(irn)) - return arch_irn_class_load; - else if (is_ia32_St(irn) || is_ia32_Store8Bit(irn)) - return arch_irn_class_store; - else if (is_ia32_irn(irn)) - return arch_irn_class_normal; - else - return 0; + classification |= arch_irn_class_branch; + + if (! is_ia32_irn(irn)) + return classification & ~arch_irn_class_normal; + + if (is_ia32_Cnst(irn)) + classification |= arch_irn_class_const; + + if (is_ia32_Ld(irn)) + classification |= arch_irn_class_load; + + if (is_ia32_St(irn) || is_ia32_Store8Bit(irn)) + classification |= arch_irn_class_store; + + if (is_ia32_got_reload(irn)) + classification |= arch_irn_class_reload; + + return classification; } static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) { - irn = my_skip_proj(irn); - if (is_ia32_irn(irn)) - return get_ia32_flags(irn); - else { - if (is_Unknown(irn)) - return arch_irn_flags_ignore; - return 0; + arch_irn_flags_t flags = arch_irn_flags_none; + + if (is_Proj(irn) && is_ia32_irn(get_Proj_pred(irn))) { + flags |= get_ia32_out_flags(irn, get_Proj_proj(irn)); } + + irn = skip_Proj(irn); + if (is_ia32_irn(irn)) + flags |= get_ia32_flags(irn); + else if (is_Unknown(irn)) + flags = arch_irn_flags_ignore; + + return flags; } +/** + * The IA32 ABI callback object. + */ +typedef struct { + be_abi_call_flags_bits_t flags; /**< The call flags. */ + const arch_isa_t *isa; /**< The ISA handle. */ + const arch_env_t *aenv; /**< The architecture environment. */ + ir_graph *irg; /**< The associated graph. */ +} ia32_abi_env_t; + static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) { return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL; } -static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) { +static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) { + set_ia32_frame_ent(irn, ent); +} + +static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) { char buf[64]; const ia32_irn_ops_t *ops = self; if (get_ia32_frame_ent(irn)) { ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn); + if(is_ia32_Pop(irn)) { + int omit_fp = be_abi_omit_fp(ops->cg->birg->abi); + if (omit_fp) { + /* Pop nodes modify the stack pointer before calculating the destination + * address, so fix this here + */ + bias -= 4; + } + } + DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias)); + snprintf(buf, sizeof(buf), "%d", bias); if (get_ia32_op_type(irn) == ia32_Normal) { set_ia32_cnst(irn, buf); - } - else { + } else { add_ia32_am_offs(irn, buf); am_flav |= ia32_O; set_ia32_am_flavour(irn, am_flav); @@ -278,22 +329,18 @@ static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) { } } -typedef struct { - be_abi_call_flags_bits_t flags; - const arch_isa_t *isa; - const arch_env_t *aenv; - ir_graph *irg; -} ia32_abi_env_t; +static int ia32_get_sp_bias(const void *self, const ir_node *irn) { + if(is_Proj(irn)) { + long proj = get_Proj_proj(irn); + ir_node *pred = get_Proj_pred(irn); -static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg) -{ - ia32_abi_env_t *env = xmalloc(sizeof(env[0])); - be_abi_call_flags_t fl = be_abi_call_get_flags(call); - env->flags = fl.bits; - env->irg = irg; - env->aenv = aenv; - env->isa = aenv->isa; - return env; + if (proj == pn_ia32_Push_stack && is_ia32_Push(pred)) + return 4; + if (proj == pn_ia32_Pop_stack && is_ia32_Pop(pred)) + return -4; + } + + return 0; } /** @@ -322,17 +369,17 @@ static void ia32_abi_dont_save_regs(void *self, pset *s) */ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map) { - ia32_abi_env_t *env = self; + ia32_abi_env_t *env = self; - if (!env->flags.try_omit_fp) { - int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode); - ir_node *bl = get_irg_start_block(env->irg); - ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); - ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + if (! env->flags.try_omit_fp) { + ir_node *bl = get_irg_start_block(env->irg); + ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); + ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]); ir_node *push; /* push ebp */ - push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem); + push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem); curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack); *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M); @@ -373,38 +420,39 @@ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap */ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map) { - ia32_abi_env_t *env = self; - ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); - ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + ia32_abi_env_t *env = self; + ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); + ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); if (env->flags.try_omit_fp) { /* simply remove the stack frame here */ - curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink); + curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK); + add_irn_dep(curr_sp, *mem); } else { - const ia32_isa_t *isa = (ia32_isa_t *)env->isa; - ir_mode *mode_bp = env->isa->bp->reg_class->mode; - int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode); + const ia32_isa_t *isa = (ia32_isa_t *)env->isa; + ir_mode *mode_bp = env->isa->bp->reg_class->mode; /* gcc always emits a leave at the end of a routine */ if (1 || ARCH_AMD(isa->opt_arch)) { ir_node *leave; /* leave */ - leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem); + leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp); set_ia32_flags(leave, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame); curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack); *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M); } else { + ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]); ir_node *pop; /* copy ebp to esp */ curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem); /* pop ebp */ - pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem); + pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem); set_ia32_flags(pop, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res); curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack); @@ -418,6 +466,32 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp); } +/** + * Initialize the callback object. + * @param call The call object. + * @param aenv The architecture environment. + * @param irg The graph with the method. + * @return Some pointer. This pointer is passed to all other callback functions as self object. + */ +static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg) +{ + ia32_abi_env_t *env = xmalloc(sizeof(env[0])); + be_abi_call_flags_t fl = be_abi_call_get_flags(call); + env->flags = fl.bits; + env->irg = irg; + env->aenv = aenv; + env->isa = aenv->isa; + return env; +} + +/** + * Destroy the callback object. + * @param self The callback object. + */ +static void ia32_abi_done(void *self) { + free(self); +} + /** * Produces the type which sits between the stack args and the locals on the stack. * it will contain the return address and space to store the old base pointer. @@ -425,35 +499,84 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ */ static ir_type *ia32_abi_get_between_type(void *self) { +#define IDENT(s) new_id_from_chars(s, sizeof(s)-1) static ir_type *omit_fp_between_type = NULL; static ir_type *between_type = NULL; ia32_abi_env_t *env = self; - if(!between_type) { + if ( !between_type) { entity *old_bp_ent; entity *ret_addr_ent; entity *omit_fp_ret_addr_ent; - ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P); - ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P); + ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P); + ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P); - between_type = new_type_class(new_id_from_str("ia32_between_type")); - old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type); - ret_addr_ent = new_entity(between_type, new_id_from_str("ret_addr"), ret_addr_type); + between_type = new_type_struct(IDENT("ia32_between_type")); + old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type); + ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type); set_entity_offset_bytes(old_bp_ent, 0); set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type)); set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type)); + set_type_state(between_type, layout_fixed); - omit_fp_between_type = new_type_class(new_id_from_str("ia32_between_type_omit_fp")); - omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, new_id_from_str("ret_addr"), ret_addr_type); + omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp")); + omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type); set_entity_offset_bytes(omit_fp_ret_addr_ent, 0); set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type)); + set_type_state(omit_fp_between_type, layout_fixed); } return env->flags.try_omit_fp ? omit_fp_between_type : between_type; +#undef IDENT +} + +/** + * Get the estimated cycle count for @p irn. + * + * @param self The this pointer. + * @param irn The node. + * + * @return The estimated cycle count for this operation + */ +static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn) +{ + int cost; + ia32_op_type_t op_tp; + const ia32_irn_ops_t *ops = self; + + if (is_Proj(irn)) + return 0; + + assert(is_ia32_irn(irn)); + + cost = get_ia32_latency(irn); + op_tp = get_ia32_op_type(irn); + + if (is_ia32_CopyB(irn)) { + cost = 250; + if (ARCH_INTEL(ops->cg->arch)) + cost += 150; + } + else if (is_ia32_CopyB_i(irn)) { + int size = get_tarval_long(get_ia32_Immop_tarval(irn)); + cost = 20 + (int)ceil((4/3) * size); + if (ARCH_INTEL(ops->cg->arch)) + cost += 150; + } + /* in case of address mode operations add additional cycles */ + else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) { + /* + In case of stack access add 5 cycles (we assume stack is in cache), + other memory operations cost 20 cycles. + */ + cost += is_ia32_use_frame(irn) ? 5 : 20; + } + + return cost; } /** @@ -511,14 +634,17 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in /* invers == sub with const */ inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); pnc = pn_ia32_Sub_res; - inverse->costs += 5; + inverse->costs += 2; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); } else { /* normal add: inverse == sub */ - inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i ^ 1), nomem); + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); + + inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem); pnc = pn_ia32_Sub_res; - inverse->costs += 5; + inverse->costs += 2; } break; case iro_ia32_Sub: @@ -532,11 +658,14 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in } else { /* normal sub */ + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); + if (i == 2) { - inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, 3), nomem); + inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem); } else { - inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node *)irn, nomem); + inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem); } pnc = pn_ia32_Sub_res; inverse->costs += 1; @@ -557,29 +686,90 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in inverse->costs += 1; } break; - case iro_ia32_Not: - inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem); + case iro_ia32_Not: { + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); + + inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem); pnc = pn_ia32_Not_res; inverse->costs += 1; break; - case iro_ia32_Minus: - inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem); - pnc = pn_ia32_Minus_res; + } + case iro_ia32_Minus: { + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); + + inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem); + pnc = pn_ia32_Minus_res; inverse->costs += 1; break; + } default: /* inverse operation not supported */ return NULL; } + set_ia32_res_mode(inverse->nodes[0], mode); inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc); return inverse; } +/** + * Check if irn can load it's operand at position i from memory (source addressmode). + * @param self Pointer to irn ops itself + * @param irn The irn to be checked + * @param i The operands position + * @return Non-Zero if operand can be loaded + */ +static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) { + if (! is_ia32_irn(irn) || /* must be an ia32 irn */ + get_irn_arity(irn) != 5 || /* must be a binary operation */ + get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */ + ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */ + (i != 2 && i != 3) || /* a "real" operand position must be requested */ + (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */ + is_ia32_use_frame(irn)) /* must not already use frame */ + return 0; + + return 1; +} + +static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) { + const ia32_irn_ops_t *ops = self; + ia32_code_gen_t *cg = ops->cg; + + assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change"); + + if (i == 2) { + ir_node *tmp = get_irn_n(irn, 3); + set_irn_n(irn, 3, get_irn_n(irn, 2)); + set_irn_n(irn, 2, tmp); + } + + set_ia32_am_support(irn, ia32_am_Source); + set_ia32_op_type(irn, ia32_AddrModeS); + set_ia32_am_flavour(irn, ia32_B); + set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i))); + set_ia32_use_frame(irn); + set_ia32_got_reload(irn); + + set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn))); + set_irn_n(irn, 4, spill); + + /* + Input at position one is index register, which is NoReg. + We would need cg object to get a real noreg, but we cannot + access it from here. + */ + set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3)); + + //FIXME DBG_OPT_AM_S(reload, irn); +} + static const be_abi_callbacks_t ia32_abi_callbacks = { ia32_abi_init, - free, + ia32_abi_done, ia32_abi_get_between_type, ia32_abi_dont_save_regs, ia32_abi_prologue, @@ -595,8 +785,13 @@ static const arch_irn_ops_if_t ia32_irn_ops_if = { ia32_classify, ia32_get_flags, ia32_get_frame_entity, - ia32_set_stack_bias, - ia32_get_inverse + ia32_set_frame_entity, + ia32_set_frame_offset, + ia32_get_sp_bias, + ia32_get_inverse, + ia32_get_op_estimated_cost, + ia32_possible_memory_operand, + ia32_perform_memory_operand, }; ia32_irn_ops_t ia32_irn_ops = { @@ -617,6 +812,32 @@ ia32_irn_ops_t ia32_irn_ops = { * |___/ **************************************************/ +static void ia32_kill_convs(ia32_code_gen_t *cg) { + ir_node *irn; + + /* BEWARE: the Projs are inserted in the set */ + foreach_nodeset(cg->kill_conv, irn) { + ir_node *in = get_irn_n(get_Proj_pred(irn), 2); + edges_reroute(irn, in, cg->birg->irg); + } +} + +/** + * Transform the Thread Local Store base. + */ +static void transform_tls(ir_graph *irg) { + ir_node *irn = get_irg_tls(irg); + + if (irn) { + dbg_info *dbg = get_irn_dbg_info(irn); + ir_node *blk = get_nodes_block(irn); + ir_node *newn; + newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn)); + + exchange(irn, newn); + } +} + /** * Transforms the standard firm graph into * an ia32 firm graph @@ -629,152 +850,108 @@ static void ia32_prepare_graph(void *self) { FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform"); /* 1st: transform constants and psi condition trees */ - irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_psi_cond_tree, cg); + ia32_pre_transform_phase(cg); /* 2nd: transform all remaining nodes */ ia32_register_transformers(); dom = be_compute_dominance_frontiers(cg->irg); + + cg->kill_conv = new_nodeset(5); + transform_tls(cg->irg); irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg); + ia32_kill_convs(cg); + del_nodeset(cg->kill_conv); + be_free_dominance_frontiers(dom); - be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched); + + if (cg->dump) + be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched); /* 3rd: optimize address mode */ FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am"); ia32_optimize_addressmode(cg); - be_dump(cg->irg, "-am", dump_ir_block_graph_sched); - DEBUG_ONLY(cg->mod = old_mod;) -} -static INLINE int need_constraint_copy(ir_node *irn) { - return \ - ! is_ia32_Lea(irn) && \ - ! is_ia32_Conv_I2I(irn) && \ - ! is_ia32_Conv_I2I8Bit(irn) && \ - ! is_ia32_CmpCMov(irn) && \ - ! is_ia32_CmpSet(irn); + if (cg->dump) + be_dump(cg->irg, "-am", dump_ir_block_graph_sched); + + DEBUG_ONLY(cg->mod = old_mod;) } /** - * Insert copies for all ia32 nodes where the should_be_same requirement - * is not fulfilled. - * Transform Sub into Neg -- Add if IN2 == OUT + * Dummy functions for hooks we don't need but which must be filled. */ -static void ia32_finish_node(ir_node *irn, void *env) { - ia32_code_gen_t *cg = env; - const ia32_register_req_t **reqs; - const arch_register_t *out_reg, *in_reg, *in2_reg; - int n_res, i; - ir_node *copy, *in_node, *block, *in2_node; - ia32_op_type_t op_tp; +static void ia32_before_sched(void *self) { +} - if (is_ia32_irn(irn)) { - /* AM Dest nodes don't produce any values */ - op_tp = get_ia32_op_type(irn); - if (op_tp == ia32_AddrModeD) - goto end; - - reqs = get_ia32_out_req_all(irn); - n_res = get_ia32_n_res(irn); - block = get_nodes_block(irn); - - /* check all OUT requirements, if there is a should_be_same */ - if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn)) - { - for (i = 0; i < n_res; i++) { - if (arch_register_req_is(&(reqs[i]->req), should_be_same)) { - /* get in and out register */ - out_reg = get_ia32_out_reg(irn, i); - in_node = get_irn_n(irn, reqs[i]->same_pos); - in_reg = arch_get_irn_register(cg->arch_env, in_node); - - /* don't copy ignore nodes */ - if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node)) - continue; - - /* check if in and out register are equal */ - if (! REGS_ARE_EQUAL(out_reg, in_reg)) { - /* in case of a commutative op: just exchange the in's */ - /* beware: the current op could be everything, so test for ia32 */ - /* commutativity first before getting the second in */ - if (is_ia32_commutative(irn)) { - in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1); - in2_reg = arch_get_irn_register(cg->arch_env, in2_node); - - if (REGS_ARE_EQUAL(out_reg, in2_reg)) { - set_irn_n(irn, reqs[i]->same_pos, in2_node); - set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node); - } - else - goto insert_copy; - } - else { -insert_copy: - DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos)); - /* create copy from in register */ - copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node); - - DBG_OPT_2ADDRCPY(copy); - - /* destination is the out register */ - arch_set_irn_register(cg->arch_env, copy, out_reg); - - /* insert copy before the node into the schedule */ - sched_add_before(irn, copy); - - /* set copy as in */ - set_irn_n(irn, reqs[i]->same_pos, copy); - } - } - } - } - } +static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) { + int i, arity; + ir_mode *mode; + ir_node *mem_proj = NULL; - /* If we have a CondJmp/CmpSet/xCmpSet with immediate, we need to */ - /* check if it's the right operand, otherwise we have */ - /* to change it, as CMP doesn't support immediate as */ - /* left operands. */ - if ((is_ia32_CondJmp(irn) || is_ia32_CmpSet(irn) || is_ia32_xCmpSet(irn)) && - (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) && - op_tp == ia32_AddrModeS) - { - set_ia32_op_type(irn, ia32_AddrModeD); - set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn))); - } + if (is_Block(irn)) + return; - /* check if there is a sub which need to be transformed */ - ia32_transform_sub_to_neg_add(irn, cg); + mode = get_irn_mode(irn); - /* transform a LEA into an Add if possible */ - ia32_transform_lea_to_add(irn, cg); - } -end: + /* check if we already saw this node or the node has more than one user */ + if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1) { + return; + }; - /* check for peephole optimization */ - ia32_peephole_optimization(irn, cg); -} + /* mark irn visited */ + bitset_add_irn(already_visited, irn); -static void ia32_finish_irg_walker(ir_node *block, void *env) { - ir_node *irn, *next; + /* non-Tuple nodes with one user: ok, return */ + if (get_irn_n_edges(irn) >= 1 && mode != mode_T) { + return; + } - for (irn = sched_first(block); !sched_is_end(irn); irn = next) { - next = sched_next(irn); - ia32_finish_node(irn, env); + /* tuple node has one user which is not the mem proj-> ok */ + if (mode == mode_T && get_irn_n_edges(irn) == 1) { + mem_proj = ia32_get_proj_for_mode(irn, mode_M); + if (mem_proj == NULL) { + return; + } } -} -/** - * Add Copy nodes for not fulfilled should_be_equal constraints - */ -static void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) { - irg_block_walk_graph(irg, NULL, ia32_finish_irg_walker, cg); -} + arity = get_irn_arity(irn); + for (i = 0; i < arity; ++i) { + ir_node *pred = get_irn_n(irn, i); + /* do not follow memory edges or we will accidentally remove stores */ + if (get_irn_mode(pred) == mode_M) { + if(mem_proj != NULL) { + edges_reroute(mem_proj, pred, get_irn_irg(mem_proj)); + mem_proj = NULL; + } + continue; + } + set_irn_n(irn, i, new_Bad()); -/** - * Dummy functions for hooks we don't need but which must be filled. - */ -static void ia32_before_sched(void *self) { + /* + The current node is about to be removed: if the predecessor + has only this node as user, it need to be removed as well. + */ + if (get_irn_n_edges(pred) <= 1) + remove_unused_nodes(pred, already_visited); + } + + // we need to set the presd to Bad again to also get the memory edges + arity = get_irn_arity(irn); + for (i = 0; i < arity; ++i) { + set_irn_n(irn, i, new_Bad()); + } + + if (sched_is_scheduled(irn)) { + sched_remove(irn); + } +} + +static void remove_unused_loads_walker(ir_node *irn, void *env) { + bitset_t *already_visited = env; + if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn)) + remove_unused_nodes(irn, env); } /** @@ -783,9 +960,16 @@ static void ia32_before_sched(void *self) { * simulator and the emitter. */ static void ia32_before_ra(void *self) { - ia32_code_gen_t *cg = self; - - cg->blk_sched = sched_create_block_schedule(cg->irg); + ia32_code_gen_t *cg = self; + bitset_t *already_visited = bitset_irg_alloca(cg->irg); + + /* + Handle special case: + There are sometimes unused loads, only pinned by memory. + We need to remove those Loads and all other nodes which won't be used + after removing the Load from schedule. + */ + irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited); } @@ -814,9 +998,8 @@ static void transform_to_Load(ia32_transform_env_t *env) { else new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); } - else { + else new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem); - } set_ia32_am_support(new_op, ia32_am_Source); set_ia32_op_type(new_op, ia32_AddrModeS); @@ -827,7 +1010,7 @@ static void transform_to_Load(ia32_transform_env_t *env) { DBG_OPT_RELOAD2LD(irn, new_op); - proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res); + proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_ia32_Load_res); if (sched_point) { sched_add_after(sched_point, new_op); @@ -889,8 +1072,6 @@ static void transform_to_Store(ia32_transform_env_t *env) { if (sched_point) { sched_add_after(sched_point, new_op); - sched_add_after(new_op, proj); - sched_remove(irn); } @@ -899,6 +1080,130 @@ static void transform_to_Store(ia32_transform_env_t *env) { exchange(irn, proj); } +static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent) { + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *frame = get_irg_frame(env->irg); + + ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, frame, noreg, noreg, sp, mem); + + set_ia32_frame_ent(push, ent); + set_ia32_use_frame(push); + set_ia32_op_type(push, ia32_AddrModeS); + set_ia32_am_flavour(push, ia32_B); + set_ia32_ls_mode(push, mode_Is); + + sched_add_before(schedpoint, push); + return push; +} + +static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent) { + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *frame = get_irg_frame(env->irg); + + ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, frame, noreg, sp, new_NoMem()); + + set_ia32_frame_ent(pop, ent); + set_ia32_use_frame(pop); + set_ia32_op_type(pop, ia32_AddrModeD); + set_ia32_am_flavour(pop, ia32_B); + set_ia32_ls_mode(pop, mode_Is); + + sched_add_before(schedpoint, pop); + + return pop; +} + +static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint) { + ir_mode *spmode = mode_Iu; + const arch_register_t *spreg = &ia32_gp_regs[REG_ESP]; + ir_node *sp; + + sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos); + arch_set_irn_register(env->cg->arch_env, sp, spreg); + sched_add_before(schedpoint, sp); + + return sp; +} + +/** + * Transform memperm, currently we do this the ugly way and produce + * push/pop into/from memory cascades. This is possible without using + * any registers. + */ +static void transform_MemPerm(ia32_transform_env_t *env) { + ir_node *node = env->irn; + int i, arity; + ir_node *sp = be_abi_get_ignore_irn(env->cg->birg->abi, &ia32_gp_regs[REG_ESP]); + const ir_edge_t *edge; + const ir_edge_t *next; + ir_node **pops; + + arity = be_get_MemPerm_entity_arity(node); + pops = alloca(arity * sizeof(pops[0])); + + // create pushs + for(i = 0; i < arity; ++i) { + entity *ent = be_get_MemPerm_in_entity(node, i); + ir_type *enttype = get_entity_type(ent); + int entbits = get_type_size_bits(enttype); + ir_node *mem = get_irn_n(node, i + 1); + ir_node *push; + + assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); + + push = create_push(env, node, sp, mem, ent); + sp = create_spproj(env, push, 0, node); + if(entbits == 64) { + // add another push after the first one + push = create_push(env, node, sp, mem, ent); + add_ia32_am_offs_int(push, 4); + sp = create_spproj(env, push, 0, node); + } + + set_irn_n(node, i, new_Bad()); + } + + // create pops + for(i = arity - 1; i >= 0; --i) { + entity *ent = be_get_MemPerm_out_entity(node, i); + ir_type *enttype = get_entity_type(ent); + int entbits = get_type_size_bits(enttype); + + ir_node *pop; + + assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); + + pop = create_pop(env, node, sp, ent); + if(entbits == 64) { + // add another pop after the first one + sp = create_spproj(env, pop, 1, node); + pop = create_pop(env, node, sp, ent); + add_ia32_am_offs_int(pop, 4); + } + sp = create_spproj(env, pop, 1, node); + + pops[i] = pop; + } + + // exchange memprojs + foreach_out_edge_safe(node, edge, next) { + ir_node *proj = get_edge_src_irn(edge); + int p = get_Proj_proj(proj); + + assert(p < arity); + + set_Proj_pred(proj, pops[p]); + set_Proj_proj(proj, 3); + } + + // remove memperm + arity = get_irn_arity(node); + for(i = 0; i < arity; ++i) { + set_irn_n(node, i, new_Bad()); + } + sched_remove(node); +} + /** * Fix the mode of Spill/Reload */ @@ -939,12 +1244,18 @@ static void ia32_after_ra_walker(ir_node *block, void *env) { transform_to_Load(&tenv); } else if (be_is_Spill(node)) { + ir_node *spillval = get_irn_n(node, be_pos_Spill_val); /* we always spill the whole register */ tenv.dbg = get_irn_dbg_info(node); tenv.irn = node; - tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node))); + tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval)); transform_to_Store(&tenv); } + else if(be_is_MemPerm(node)) { + tenv.dbg = get_irn_dbg_info(node); + tenv.irn = node; + transform_MemPerm(&tenv); + } } } @@ -957,14 +1268,32 @@ static void ia32_after_ra_walker(ir_node *block, void *env) { */ static void ia32_after_ra(void *self) { ia32_code_gen_t *cg = self; - irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self); + ir_graph *irg = cg->irg; + + irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg); + + ia32_finish_irg(irg, cg); +} + +/** + * Last touchups for the graph before emit + */ +static void ia32_finish(void *self) { + ia32_code_gen_t *cg = self; + ir_graph *irg = cg->irg; + + //be_remove_empty_blocks(irg); + cg->blk_sched = be_create_block_schedule(irg, cg->birg->execfreqs); + + //cg->blk_sched = sched_create_block_schedule(cg->irg, cg->birg->execfreqs); /* if we do x87 code generation, rewrite all the virtual instructions and registers */ if (cg->used_fp == fp_x87 || cg->force_sim) { - x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched); + x87_simulate_graph(cg->arch_env, irg, cg->blk_sched); } -} + ia32_peephole_optimization(irg, cg); +} /** * Emits the code, closes the output file and frees @@ -974,8 +1303,6 @@ static void ia32_codegen(void *self) { ia32_code_gen_t *cg = self; ir_graph *irg = cg->irg; - ia32_finish_irg(irg, cg); - be_dump(irg, "-finished", dump_ir_block_graph_sched); ia32_gen_routine(cg->isa->out, irg, cg); cur_reg_set = NULL; @@ -985,8 +1312,7 @@ static void ia32_codegen(void *self) { /* de-allocate code generator */ del_set(cg->reg_set); - free(self); - + free(cg); } static void *ia32_cg_init(const be_irg_t *birg); @@ -998,6 +1324,7 @@ static const arch_code_generator_if_t ia32_code_gen_if = { ia32_before_sched, /* before scheduling hook */ ia32_before_ra, /* before register allocation hook */ ia32_after_ra, /* after register allocation hook */ + ia32_finish, /* called before codegen */ ia32_codegen /* emit && done */ }; @@ -1019,11 +1346,14 @@ static void *ia32_cg_init(const be_irg_t *birg) { cg->gp_to_fp = NULL; cg->fp_kind = isa->fp_kind; cg->used_fp = fp_none; + cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0; FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg"); /* copy optimizations from isa for easier access */ - cg->opt = isa->opt; + cg->opt = isa->opt; + cg->arch = isa->arch; + cg->opt_arch = isa->opt_arch; /* enter it */ isa->cg = cg; @@ -1056,6 +1386,31 @@ static void *ia32_cg_init(const be_irg_t *birg) { * *****************************************************************/ +/** + * Set output modes for GCC + */ +static const tarval_mode_info mo_integer = { + TVO_DECIMAL, + NULL, + NULL, +}; + +/* + * set the tarval output mode of all integer modes to decimal + */ +static void set_tarval_output_modes(void) +{ + int i; + + for (i = get_irp_n_modes() - 1; i >= 0; --i) { + ir_mode *mode = get_irp_mode(i); + + if (mode_is_int(mode)) + set_tarval_mode_output_option(mode, &mo_integer); + } +} + + /** * The template that generates a new ISA object. * Note that this template can be changed by command line @@ -1067,6 +1422,7 @@ static ia32_isa_t ia32_isa_template = { &ia32_gp_regs[REG_ESP], /* stack pointer register */ &ia32_gp_regs[REG_EBP], /* base pointer register */ -1, /* stack direction */ + NULL, /* main environment */ }, NULL, /* 16bit register names */ NULL, /* 8bit register names */ @@ -1083,6 +1439,7 @@ static ia32_isa_t ia32_isa_template = { arch_pentium_4, /* optimize for architecture */ fp_sse2, /* use sse2 unit */ NULL, /* current code generator */ + NULL, /* output file */ #ifndef NDEBUG NULL, /* name obstack */ 0 /* name obst size */ @@ -1099,6 +1456,8 @@ static void *ia32_init(FILE *file_handle) { if (inited) return NULL; + set_tarval_output_modes(); + isa = xmalloc(sizeof(*isa)); memcpy(isa, &ia32_isa_template, sizeof(*isa)); @@ -1126,14 +1485,14 @@ static void *ia32_init(FILE *file_handle) { /* patch register names of x87 registers */ if (USE_x87(isa)) { - ia32_st_regs[0].name = "st"; - ia32_st_regs[1].name = "st(1)"; - ia32_st_regs[2].name = "st(2)"; - ia32_st_regs[3].name = "st(3)"; - ia32_st_regs[4].name = "st(4)"; - ia32_st_regs[5].name = "st(5)"; - ia32_st_regs[6].name = "st(6)"; - ia32_st_regs[7].name = "st(7)"; + ia32_st_regs[0].name = "st"; + ia32_st_regs[1].name = "st(1)"; + ia32_st_regs[2].name = "st(2)"; + ia32_st_regs[3].name = "st(3)"; + ia32_st_regs[4].name = "st(4)"; + ia32_st_regs[5].name = "st(5)"; + ia32_st_regs[6].name = "st(6)"; + ia32_st_regs[7].name = "st(7)"; } #ifndef NDEBUG @@ -1143,9 +1502,13 @@ static void *ia32_init(FILE *file_handle) { #endif /* NDEBUG */ ia32_handle_intrinsics(); - ia32_switch_section(NULL, NO_SECTION); + ia32_switch_section(isa->out, NO_SECTION); fprintf(isa->out, "\t.intel_syntax\n"); + /* needed for the debug support */ + ia32_switch_section(isa->out, SECTION_TEXT); + fprintf(isa->out, ".Ltext0:\n"); + inited = 1; return isa; @@ -1160,7 +1523,7 @@ static void ia32_done(void *self) { ia32_isa_t *isa = self; /* emit now all global declarations */ - ia32_gen_decls(isa->out); + ia32_gen_decls(isa->out, isa->arch_isa.main_env); pmap_destroy(isa->regs_16bit); pmap_destroy(isa->regs_8bit); @@ -1191,7 +1554,6 @@ static int ia32_get_n_reg_class(const void *self) { * Return the register class for index i. */ static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) { - const ia32_isa_t *isa = self; assert(i >= 0 && i < 3 && "Invalid ia32 register class requested."); if (i == 0) return &ia32_reg_classes[CLASS_ia32_gp]; @@ -1273,7 +1635,8 @@ static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_cal /* set stack parameters */ for (i = stack_idx; i < n; i++) { - be_abi_call_param_stack(abi, i, 1, 0, 0); + /* parameters on the stack are 32 bit aligned */ + be_abi_call_param_stack(abi, i, 4, 0, 0); } @@ -1292,7 +1655,7 @@ static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_cal tp = get_method_res_type(method_type, 1); mode = get_type_mode(tp); - assert(!mode_is_float(mode) && "two FP results not supported"); + assert(!mode_is_float(mode) && "mixed INT, FP results not supported"); be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]); be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]); @@ -1304,9 +1667,7 @@ static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_cal assert(is_atomic_type(tp)); mode = get_type_mode(tp); - reg = mode_is_float(mode) ? - (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) : - &ia32_gp_regs[REG_EAX]; + reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX]; be_abi_call_res_reg(abi, 0, reg); } @@ -1326,7 +1687,7 @@ const arch_irn_handler_t *ia32_get_irn_handler(const void *self) { } int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) { - return is_ia32_irn(irn); + return is_ia32_irn(irn) ? 1 : -1; } /** @@ -1336,14 +1697,22 @@ static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) { return &ia32_code_gen_if; } +/** + * Returns the estimated execution time of an ia32 irn. + */ +static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) { + const arch_env_t *arch_env = env; + return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1; +} + list_sched_selector_t ia32_sched_selector; /** * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded */ -static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) { -// memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t)); - memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t)); +static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) { + memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector)); + ia32_sched_selector.exectime = ia32_sched_exectime; ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule; return &ia32_sched_selector; }