X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fbearch_ia32.c;h=6ad60e21a82bb5159d5fd6ce51d34c04cbd67ab1;hb=c47d58635db2e401ec79ef179faf986f22ea0a31;hp=c83eb297effbfd9971d41631114189d6987e40ce;hpb=fd636a3ed524e0dd797ca0c952d366d7695f3772;p=libfirm diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index c83eb297e..6ad60e21a 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -33,6 +33,7 @@ #include "irgopt.h" #include "irbitset.h" #include "pdeq.h" +#include "pset.h" #include "debug.h" #include "../beabi.h" /* the general register allocator interface */ @@ -43,10 +44,17 @@ #include "../be_t.h" #include "../beirgmod.h" #include "../be_dbgout.h" +#include "../beblocksched.h" +#include "../bemachine.h" +#include "../beilpsched.h" +#include "../bespillslots.h" +#include "../bemodule.h" + #include "bearch_ia32_t.h" #include "ia32_new_nodes.h" /* ia32 nodes interface */ #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */ +#include "gen_ia32_machine.h" #include "ia32_gen_decls.h" /* interface declaration emitter */ #include "ia32_transform.h" #include "ia32_emitter.h" @@ -62,9 +70,6 @@ /* TODO: ugly */ static set *cur_reg_set = NULL; -#undef is_Start -#define is_Start(irn) (get_irn_opcode(irn) == iro_Start) - /* Creates the unique per irg GP NoReg node. */ ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) { return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]); @@ -76,6 +81,21 @@ ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) { USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]); } +/** + * Returns gp_noreg or fp_noreg, depending in input requirements. + */ +ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) { + arch_register_req_t req; + const arch_register_req_t *p_req; + + p_req = arch_get_register_req(cg->arch_env, &req, irn, pos); + assert(p_req && "Missing register requirements"); + if (p_req->cls == &ia32_reg_classes[CLASS_ia32_gp]) + return ia32_new_NoReg_gp(cg); + else + return ia32_new_NoReg_fp(cg); +} + /************************************************** * _ _ _ __ * | | | (_)/ _| @@ -87,13 +107,6 @@ ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) { * |___/ **************************************************/ -static ir_node *my_skip_proj(const ir_node *n) { - while (is_Proj(n)) - n = get_Proj_pred(n); - return (ir_node *)n; -} - - /** * Return register requirements for an ia32 node. * If the node returns a tuple (mode_T) then the proj's @@ -106,7 +119,7 @@ static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_re ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn); FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE); - if (is_Block(irn) || mode == mode_M || mode == mode_X) { + if (is_Block(irn) || mode == mode_X) { DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn)); return NULL; } @@ -119,24 +132,25 @@ static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_re DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn)); if (is_Proj(irn)) { - if (pos == -1) { - node_pos = ia32_translate_proj_pos(irn); - } - else { - node_pos = pos; + if(mode == mode_M) + return NULL; + + if(pos >= 0) { + DBG((mod, LEVEL_1, "ignoring request IN requirements for node %+F\n", irn)); + return NULL; } - irn = my_skip_proj(irn); + node_pos = (pos == -1) ? get_Proj_proj(irn) : pos; + irn = skip_Proj_const(irn); DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos)); } if (is_ia32_irn(irn)) { - if (pos >= 0) { - irn_req = get_ia32_in_req(irn, pos); - } - else { - irn_req = get_ia32_out_req(irn, node_pos); + irn_req = (pos >= 0) ? get_ia32_in_req(irn, pos) : get_ia32_out_req(irn, node_pos); + if (irn_req == NULL) { + /* no requirements */ + return NULL; } DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos)); @@ -192,8 +206,8 @@ static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn)); if (is_Proj(irn)) { - pos = ia32_translate_proj_pos(irn); - irn = my_skip_proj(irn); + pos = get_Proj_proj(irn); + irn = skip_Proj(irn); } if (is_ia32_irn(irn)) { @@ -217,8 +231,8 @@ static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node * return NULL; } - pos = ia32_translate_proj_pos(irn); - irn = my_skip_proj(irn); + pos = get_Proj_proj(irn); + irn = skip_Proj_const(irn); } if (is_ia32_irn(irn)) { @@ -236,7 +250,7 @@ static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node * static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) { arch_irn_class_t classification = arch_irn_class_normal; - irn = my_skip_proj(irn); + irn = skip_Proj_const(irn); if (is_cfop(irn)) classification |= arch_irn_class_branch; @@ -260,100 +274,80 @@ static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) { } static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) { + arch_irn_flags_t flags; + ir_node *pred = is_Proj(irn) && mode_is_datab(get_irn_mode(irn)) ? get_Proj_pred(irn) : NULL; - if(is_Proj(irn)) { - ir_node *pred = get_Proj_pred(irn); - if(is_ia32_Push(pred) && get_Proj_proj(irn) == pn_ia32_Push_stack) { - /* Push modifies always ESP, this cannot be changed */ - return arch_irn_flags_modify_sp | arch_irn_flags_ignore; - } - if(is_ia32_Pop(pred) && get_Proj_proj(irn) == pn_ia32_Pop_stack) { - return arch_irn_flags_modify_sp | arch_irn_flags_ignore; - } - if(is_ia32_AddSP(pred) && get_Proj_proj(irn) == pn_ia32_AddSP_stack) { - /* AddSP modifies always ESP, this cannot be changed */ - return arch_irn_flags_modify_sp | arch_irn_flags_ignore; - } - if(is_ia32_SubSP(pred) && get_Proj_proj(irn) == pn_ia32_SubSP_stack) { - /* SubSP modifies always ESP, this cannot be changed */ - return arch_irn_flags_modify_sp | arch_irn_flags_ignore; - } - } - - irn = my_skip_proj(irn); - if (is_ia32_irn(irn)) - return get_ia32_flags(irn); + if (is_Unknown(irn)) + flags = arch_irn_flags_ignore; else { - if (is_Unknown(irn)) - return arch_irn_flags_ignore; - return 0; + /* pred is only set, if we have a Proj */ + flags = pred && is_ia32_irn(pred) ? get_ia32_out_flags(pred, get_Proj_proj(irn)) : arch_irn_flags_none; + + irn = skip_Proj_const(irn); + if (is_ia32_irn(irn)) + flags |= get_ia32_flags(irn); } + + return flags; } +/** + * The IA32 ABI callback object. + */ typedef struct { - be_abi_call_flags_bits_t flags; - const arch_isa_t *isa; - const arch_env_t *aenv; - ir_graph *irg; + be_abi_call_flags_bits_t flags; /**< The call flags. */ + const arch_isa_t *isa; /**< The ISA handle. */ + const arch_env_t *aenv; /**< The architecture environment. */ + ir_graph *irg; /**< The associated graph. */ } ia32_abi_env_t; -static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg) -{ - ia32_abi_env_t *env = xmalloc(sizeof(env[0])); - be_abi_call_flags_t fl = be_abi_call_get_flags(call); - env->flags = fl.bits; - env->irg = irg; - env->aenv = aenv; - env->isa = aenv->isa; - return env; -} - -static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) { +static ir_entity *ia32_get_frame_entity(const void *self, const ir_node *irn) { return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL; } -static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) { +static void ia32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) { set_ia32_frame_ent(irn, ent); } static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) { - char buf[64]; const ia32_irn_ops_t *ops = self; if (get_ia32_frame_ent(irn)) { - ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn); - - /* Pop nodes modify the stack pointer before calculating the destination - * address, so fix this here - */ if(is_ia32_Pop(irn)) { - ia32_abi_env_t *cb_env = get_abi_cb(ops->cg->birg->abi); - if (cb_env->flags.try_omit_fp) + int omit_fp = be_abi_omit_fp(ops->cg->birg->abi); + if (omit_fp) { + /* Pop nodes modify the stack pointer before calculating the destination + * address, so fix this here + */ bias -= 4; + } } DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias)); - snprintf(buf, sizeof(buf), "%d", bias); - if (get_ia32_op_type(irn) == ia32_Normal) { + // Matze: When does this case happen? + char buf[64]; + snprintf(buf, sizeof(buf), "%d", bias); set_ia32_cnst(irn, buf); } else { - add_ia32_am_offs(irn, buf); + ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn); am_flav |= ia32_O; set_ia32_am_flavour(irn, am_flav); + + add_ia32_am_offs_int(irn, bias); } } } static int ia32_get_sp_bias(const void *self, const ir_node *irn) { if(is_Proj(irn)) { - int proj = get_Proj_proj(irn); + long proj = get_Proj_proj(irn); ir_node *pred = get_Proj_pred(irn); - if(is_ia32_Push(pred) && proj == 0) + if (is_ia32_Push(pred) && proj == pn_ia32_Push_stack) return 4; - if(is_ia32_Pop(pred) && proj == 1) + if (is_ia32_Pop(pred) && proj == pn_ia32_Pop_stack) return -4; } @@ -392,10 +386,11 @@ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap ir_node *bl = get_irg_start_block(env->irg); ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]); ir_node *push; /* push ebp */ - push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem); + push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem); curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack); *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M); @@ -461,13 +456,14 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M); } else { + ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]); ir_node *pop; /* copy ebp to esp */ curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem); /* pop ebp */ - pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem); + pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem); set_ia32_flags(pop, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res); curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack); @@ -481,6 +477,32 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp); } +/** + * Initialize the callback object. + * @param call The call object. + * @param aenv The architecture environment. + * @param irg The graph with the method. + * @return Some pointer. This pointer is passed to all other callback functions as self object. + */ +static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg) +{ + ia32_abi_env_t *env = xmalloc(sizeof(env[0])); + be_abi_call_flags_t fl = be_abi_call_get_flags(call); + env->flags = fl.bits; + env->irg = irg; + env->aenv = aenv; + env->isa = aenv->isa; + return env; +} + +/** + * Destroy the callback object. + * @param self The callback object. + */ +static void ia32_abi_done(void *self) { + free(self); +} + /** * Produces the type which sits between the stack args and the locals on the stack. * it will contain the return address and space to store the old base pointer. @@ -494,10 +516,10 @@ static ir_type *ia32_abi_get_between_type(void *self) ia32_abi_env_t *env = self; - if ( !between_type) { - entity *old_bp_ent; - entity *ret_addr_ent; - entity *omit_fp_ret_addr_ent; + if (! between_type) { + ir_entity *old_bp_ent; + ir_entity *ret_addr_ent; + ir_entity *omit_fp_ret_addr_ent; ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P); ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P); @@ -506,15 +528,15 @@ static ir_type *ia32_abi_get_between_type(void *self) old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type); ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type); - set_entity_offset_bytes(old_bp_ent, 0); - set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type)); + set_entity_offset(old_bp_ent, 0); + set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type)); set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type)); set_type_state(between_type, layout_fixed); omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp")); omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type); - set_entity_offset_bytes(omit_fp_ret_addr_ent, 0); + set_entity_offset(omit_fp_ret_addr_ent, 0); set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type)); set_type_state(omit_fp_between_type, layout_fixed); } @@ -538,7 +560,9 @@ static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn) const ia32_irn_ops_t *ops = self; if (is_Proj(irn)) - return 0; + return 0; + if (!is_ia32_irn(irn)) + return 0; assert(is_ia32_irn(irn)); @@ -580,8 +604,9 @@ static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn) static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) { ir_graph *irg; ir_mode *mode; + ir_mode *irn_mode; ir_node *block, *noreg, *nomem; - int pnc; + dbg_info *dbg; /* we cannot invert non-ia32 irns */ if (! is_ia32_irn(irn)) @@ -595,24 +620,25 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in if (get_ia32_op_type(irn) != ia32_Normal) return NULL; - irg = get_irn_irg(irn); - block = get_nodes_block(irn); - mode = get_ia32_res_mode(irn); - noreg = get_irn_n(irn, 0); - nomem = new_r_NoMem(irg); + irg = get_irn_irg(irn); + block = get_nodes_block(irn); + mode = get_ia32_res_mode(irn); + irn_mode = get_irn_mode(irn); + noreg = get_irn_n(irn, 0); + nomem = new_r_NoMem(irg); + dbg = get_irn_dbg_info(irn); /* initialize structure */ inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0])); inverse->costs = 0; - inverse->n = 2; + inverse->n = 1; switch (get_ia32_irn_opcode(irn)) { case iro_ia32_Add: if (get_ia32_immop_type(irn) == ia32_ImmConst) { /* we have an add with a const here */ /* invers == add with negated const */ - inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); - pnc = pn_ia32_Add_res; + inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); inverse->costs += 1; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn))); @@ -621,18 +647,13 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) { /* we have an add with a symconst here */ /* invers == sub with const */ - inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); - pnc = pn_ia32_Sub_res; + inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); inverse->costs += 2; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); } else { /* normal add: inverse == sub */ - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - - inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem); - pnc = pn_ia32_Sub_res; + inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem, irn_mode); inverse->costs += 2; } break; @@ -640,56 +661,41 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in if (get_ia32_immop_type(irn) != ia32_ImmNone) { /* we have a sub with a const/symconst here */ /* invers == add with this const */ - inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); - pnc = pn_ia32_Add_res; + inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); } else { /* normal sub */ - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - if (i == 2) { - inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem); + inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem, irn_mode); } else { - inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem); + inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem, irn_mode); } - pnc = pn_ia32_Sub_res; inverse->costs += 1; } break; case iro_ia32_Eor: if (get_ia32_immop_type(irn) != ia32_ImmNone) { /* xor with const: inverse = xor */ - inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); - pnc = pn_ia32_Eor_res; + inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); } else { /* normal xor */ - inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem); - pnc = pn_ia32_Eor_res; + inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem, irn_mode); inverse->costs += 1; } break; case iro_ia32_Not: { - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - - inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem); - pnc = pn_ia32_Not_res; + inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem, irn_mode); inverse->costs += 1; break; } case iro_ia32_Minus: { - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - - inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem); - pnc = pn_ia32_Minus_res; + inverse->nodes[0] = new_rd_ia32_Minus(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem, irn_mode); inverse->costs += 1; break; } @@ -699,11 +705,47 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in } set_ia32_res_mode(inverse->nodes[0], mode); - inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc); return inverse; } +/** + * Get the mode that should be used for spilling value node + */ +static ir_mode *get_spill_mode(ia32_code_gen_t *cg, const ir_node *node) +{ + ir_mode *mode = get_irn_mode(node); + if (mode_is_float(mode)) { +#if 0 + // super exact spilling... + if (USE_SSE2(cg)) + return mode_D; + else + return mode_E; +#else + return mode; +#endif + } + else + return mode_Is; + + assert(0); + return mode; +} + +/** + * Checks wether an addressmode reload for a node with mode mode is compatible + * with a spillslot of mode spill_mode + */ +static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode) +{ + if(mode_is_float(mode)) { + return mode == spillmode; + } else { + return 1; + } +} + /** * Check if irn can load it's operand at position i from memory (source addressmode). * @param self Pointer to irn ops itself @@ -712,10 +754,17 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in * @return Non-Zero if operand can be loaded */ static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) { + const ia32_irn_ops_t *ops = self; + ia32_code_gen_t *cg = ops->cg; + ir_node *op = get_irn_n(irn, i); + const ir_mode *mode = get_irn_mode(op); + const ir_mode *spillmode = get_spill_mode(cg, op); + if (! is_ia32_irn(irn) || /* must be an ia32 irn */ get_irn_arity(irn) != 5 || /* must be a binary operation */ get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */ ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */ + ! ia32_is_spillmode_compatible(mode, spillmode) || (i != 2 && i != 3) || /* a "real" operand position must be requested */ (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */ is_ia32_use_frame(irn)) /* must not already use frame */ @@ -725,6 +774,9 @@ static int ia32_possible_memory_operand(const void *self, const ir_node *irn, un } static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) { + const ia32_irn_ops_t *ops = self; + ia32_code_gen_t *cg = ops->cg; + assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change"); if (i == 2) { @@ -737,27 +789,19 @@ static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node set_ia32_op_type(irn, ia32_AddrModeS); set_ia32_am_flavour(irn, ia32_B); set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i))); - //TODO this will fail, if spill is a PhiM (give PhiMs entities?) - set_ia32_frame_ent(irn, be_get_frame_entity(spill)); set_ia32_use_frame(irn); set_ia32_got_reload(irn); set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn))); + set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3)); set_irn_n(irn, 4, spill); - /* - Input at position one is index register, which is NoReg. - We would need cg object to get a real noreg, but we cannot - access it from here. - */ - set_irn_n(irn, 3, get_irn_n(irn, 1)); - //FIXME DBG_OPT_AM_S(reload, irn); } static const be_abi_callbacks_t ia32_abi_callbacks = { ia32_abi_init, - free, + ia32_abi_done, ia32_abi_get_between_type, ia32_abi_dont_save_regs, ia32_abi_prologue, @@ -803,9 +847,8 @@ ia32_irn_ops_t ia32_irn_ops = { static void ia32_kill_convs(ia32_code_gen_t *cg) { ir_node *irn; - /* BEWARE: the Projs are inserted in the set */ foreach_nodeset(cg->kill_conv, irn) { - ir_node *in = get_irn_n(get_Proj_pred(irn), 2); + ir_node *in = get_irn_n(irn, 2); edges_reroute(irn, in, cg->birg->irg); } } @@ -823,6 +866,7 @@ static void transform_tls(ir_graph *irg) { newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn)); exchange(irn, newn); + set_irg_tls(irg, newn); } } @@ -832,7 +876,6 @@ static void transform_tls(ir_graph *irg) { */ static void ia32_prepare_graph(void *self) { ia32_code_gen_t *cg = self; - dom_front_info_t *dom; DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;) FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform"); @@ -842,16 +885,15 @@ static void ia32_prepare_graph(void *self) { /* 2nd: transform all remaining nodes */ ia32_register_transformers(); - dom = be_compute_dominance_frontiers(cg->irg); cg->kill_conv = new_nodeset(5); transform_tls(cg->irg); + edges_deactivate(cg->irg); + edges_activate(cg->irg); irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg); ia32_kill_convs(cg); del_nodeset(cg->kill_conv); - be_free_dominance_frontiers(dom); - if (cg->dump) be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched); @@ -962,17 +1004,17 @@ static void ia32_before_ra(void *self) { /** - * Transforms a be node into a Load. + * Transforms a be_Reload into a ia32 Load. */ static void transform_to_Load(ia32_transform_env_t *env) { ir_node *irn = env->irn; - entity *ent = be_get_frame_entity(irn); - ir_mode *mode = env->mode; + ir_entity *ent = be_get_frame_entity(irn); + ir_mode *mode = get_irn_mode(irn); + ir_mode *spillmode = get_spill_mode(env->cg, irn); ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_rd_NoMem(env->irg); ir_node *sched_point = NULL; - ir_node *ptr = get_irn_n(irn, 0); - ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem; + ir_node *ptr = get_irg_frame(env->irg); + ir_node *mem = get_irn_n(irn, be_pos_Reload_mem); ir_node *new_op, *proj; const arch_register_t *reg; @@ -980,26 +1022,25 @@ static void transform_to_Load(ia32_transform_env_t *env) { sched_point = sched_prev(irn); } - if (mode_is_float(mode)) { + if (mode_is_float(spillmode)) { if (USE_SSE2(env->cg)) new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem); else new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); } - else { + else new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem); - } set_ia32_am_support(new_op, ia32_am_Source); set_ia32_op_type(new_op, ia32_AddrModeS); set_ia32_am_flavour(new_op, ia32_B); - set_ia32_ls_mode(new_op, mode); + set_ia32_ls_mode(new_op, spillmode); set_ia32_frame_ent(new_op, ent); set_ia32_use_frame(new_op); DBG_OPT_RELOAD2LD(irn, new_op); - proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res); + proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_ia32_Load_res); if (sched_point) { sched_add_after(sched_point, new_op); @@ -1018,17 +1059,18 @@ static void transform_to_Load(ia32_transform_env_t *env) { } /** - * Transforms a be node into a Store. + * Transforms a be_Spill node into a ia32 Store. */ static void transform_to_Store(ia32_transform_env_t *env) { ir_node *irn = env->irn; - entity *ent = be_get_frame_entity(irn); - ir_mode *mode = env->mode; + ir_entity *ent = be_get_frame_entity(irn); + const ir_node *spillval = get_irn_n(irn, be_pos_Spill_val); + ir_mode *mode = get_spill_mode(env->cg, spillval); ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_node *nomem = new_rd_NoMem(env->irg); - ir_node *ptr = get_irn_n(irn, 0); - ir_node *val = get_irn_n(irn, 1); - ir_node *new_op, *proj; + ir_node *ptr = get_irg_frame(env->irg); + ir_node *val = get_irn_n(irn, be_pos_Spill_val); + ir_node *store; ir_node *sched_point = NULL; if (sched_is_scheduled(irn)) { @@ -1037,74 +1079,71 @@ static void transform_to_Store(ia32_transform_env_t *env) { if (mode_is_float(mode)) { if (USE_SSE2(env->cg)) - new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + store = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); else - new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + store = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); } else if (get_mode_size_bits(mode) == 8) { - new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + store = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); } else { - new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + store = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); } - set_ia32_am_support(new_op, ia32_am_Dest); - set_ia32_op_type(new_op, ia32_AddrModeD); - set_ia32_am_flavour(new_op, ia32_B); - set_ia32_ls_mode(new_op, mode); - set_ia32_frame_ent(new_op, ent); - set_ia32_use_frame(new_op); - - DBG_OPT_SPILL2ST(irn, new_op); + set_ia32_am_support(store, ia32_am_Dest); + set_ia32_op_type(store, ia32_AddrModeD); + set_ia32_am_flavour(store, ia32_B); + set_ia32_ls_mode(store, mode); + set_ia32_frame_ent(store, ent); + set_ia32_use_frame(store); - proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M); + DBG_OPT_SPILL2ST(irn, store); + SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(env->cg, irn)); if (sched_point) { - sched_add_after(sched_point, new_op); + sched_add_after(sched_point, store); sched_remove(irn); } - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn)); - - exchange(irn, proj); + exchange(irn, store); } -static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) { +static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) { ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *frame = get_irg_frame(env->irg); - ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem); + ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, frame, noreg, noreg, sp, mem); set_ia32_frame_ent(push, ent); set_ia32_use_frame(push); set_ia32_op_type(push, ia32_AddrModeS); set_ia32_am_flavour(push, ia32_B); set_ia32_ls_mode(push, mode_Is); - if(offset != NULL) - add_ia32_am_offs(push, offset); sched_add_before(schedpoint, push); return push; } -static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) { - ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem()); +static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_entity *ent) { + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *frame = get_irg_frame(env->irg); + + ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, frame, noreg, sp, new_NoMem()); set_ia32_frame_ent(pop, ent); set_ia32_use_frame(pop); set_ia32_op_type(pop, ia32_AddrModeD); - set_ia32_am_flavour(pop, ia32_B); + set_ia32_am_flavour(pop, ia32_am_OB); set_ia32_ls_mode(pop, mode_Is); - if(offset != NULL) - add_ia32_am_offs(pop, offset); sched_add_before(schedpoint, pop); return pop; } -static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint, const ir_node *oldsp) { - ir_mode *spmode = get_irn_mode(oldsp); - const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp); +static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint) { + ir_mode *spmode = mode_Iu; + const arch_register_t *spreg = &ia32_gp_regs[REG_ESP]; ir_node *sp; sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos); @@ -1114,15 +1153,15 @@ static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, return sp; } +/** + * Transform memperm, currently we do this the ugly way and produce + * push/pop into/from memory cascades. This is possible without using + * any registers. + */ static void transform_MemPerm(ia32_transform_env_t *env) { - /* - * Transform memperm, currently we do this the ugly way and produce - * push/pop into/from memory cascades. This is possible without using - * any registers. - */ ir_node *node = env->irn; int i, arity; - ir_node *sp = get_irn_n(node, 0); + ir_node *sp = be_abi_get_ignore_irn(env->cg->birg->abi, &ia32_gp_regs[REG_ESP]); const ir_edge_t *edge; const ir_edge_t *next; ir_node **pops; @@ -1132,7 +1171,7 @@ static void transform_MemPerm(ia32_transform_env_t *env) { // create pushs for(i = 0; i < arity; ++i) { - entity *ent = be_get_MemPerm_in_entity(node, i); + ir_entity *ent = be_get_MemPerm_in_entity(node, i); ir_type *enttype = get_entity_type(ent); int entbits = get_type_size_bits(enttype); ir_node *mem = get_irn_n(node, i + 1); @@ -1140,12 +1179,13 @@ static void transform_MemPerm(ia32_transform_env_t *env) { assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); - push = create_push(env, node, sp, mem, ent, NULL); - sp = create_spproj(env, push, 0, node, sp); + push = create_push(env, node, sp, mem, ent); + sp = create_spproj(env, push, 0, node); if(entbits == 64) { // add another push after the first one - push = create_push(env, node, sp, mem, ent, "4"); - sp = create_spproj(env, push, 0, node, sp); + push = create_push(env, node, sp, mem, ent); + add_ia32_am_offs_int(push, 4); + sp = create_spproj(env, push, 0, node); } set_irn_n(node, i, new_Bad()); @@ -1153,7 +1193,7 @@ static void transform_MemPerm(ia32_transform_env_t *env) { // create pops for(i = arity - 1; i >= 0; --i) { - entity *ent = be_get_MemPerm_out_entity(node, i); + ir_entity *ent = be_get_MemPerm_out_entity(node, i); ir_type *enttype = get_entity_type(ent); int entbits = get_type_size_bits(enttype); @@ -1161,15 +1201,14 @@ static void transform_MemPerm(ia32_transform_env_t *env) { assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); - pop = create_pop(env, node, sp, ent, NULL); + pop = create_pop(env, node, sp, ent); if(entbits == 64) { // add another pop after the first one - sp = create_spproj(env, pop, 1, node, sp); - pop = create_pop(env, node, sp, ent, "4"); + sp = create_spproj(env, pop, 1, node); + pop = create_pop(env, node, sp, ent); + add_ia32_am_offs_int(pop, 4); } - //if(i != 0) { - sp = create_spproj(env, pop, 1, node, sp); - //} + sp = create_spproj(env, pop, 1, node); pops[i] = pop; } @@ -1193,22 +1232,6 @@ static void transform_MemPerm(ia32_transform_env_t *env) { sched_remove(node); } -/** - * Fix the mode of Spill/Reload - */ -static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode) -{ - if (mode_is_float(mode)) { - if (USE_SSE2(cg)) - mode = mode_D; - else - mode = mode_E; - } - else - mode = mode_Is; - return mode; -} - /** * Block-Walker: Calls the transform functions Spill and Reload. */ @@ -1225,39 +1248,58 @@ static void ia32_after_ra_walker(ir_node *block, void *env) { /* beware: the schedule is changed here */ for (node = sched_last(block); !sched_is_begin(node); node = prev) { prev = sched_prev(node); + tenv.dbg = get_irn_dbg_info(node); + tenv.irn = node; + tenv.mode = get_irn_mode(node); + if (be_is_Reload(node)) { - /* we always reload the whole register */ - tenv.dbg = get_irn_dbg_info(node); - tenv.irn = node; - tenv.mode = fix_spill_mode(cg, get_irn_mode(node)); transform_to_Load(&tenv); - } - else if (be_is_Spill(node)) { - ir_node *spillval = get_irn_n(node, be_pos_Spill_val); - /* we always spill the whole register */ - tenv.dbg = get_irn_dbg_info(node); - tenv.irn = node; - tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval)); + } else if (be_is_Spill(node)) { transform_to_Store(&tenv); - } - else if(be_is_MemPerm(node)) { - tenv.dbg = get_irn_dbg_info(node); - tenv.irn = node; + } else if(be_is_MemPerm(node)) { transform_MemPerm(&tenv); } } } +/** + * Collects nodes that need frame entities assigned. + */ +static void ia32_collect_frame_entity_nodes(ir_node *node, void *data) +{ + be_fec_env_t *env = data; + + if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) { + const ir_mode *mode = get_irn_mode(node); + int align = get_mode_size_bytes(mode); + be_node_needs_frame_entity(env, node, mode, align); + } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL + && is_ia32_use_frame(node)) { + if (is_ia32_Load(node)) { + const ir_mode *mode = get_ia32_ls_mode(node); + int align = get_mode_size_bytes(mode); + be_node_needs_frame_entity(env, node, mode, align); + } else if (is_ia32_vfild(node)) { + const ir_mode *mode = get_ia32_ls_mode(node); + int align = 4; + be_node_needs_frame_entity(env, node, mode, align); + } + } +} + /** * We transform Spill and Reload here. This needs to be done before * stack biasing otherwise we would miss the corrected offset for these nodes. - * - * If x87 instruction should be emitted, run the x87 simulator and patch - * the virtual instructions. This must obviously be done after register allocation. */ static void ia32_after_ra(void *self) { ia32_code_gen_t *cg = self; ir_graph *irg = cg->irg; + be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg); + + /* create and coalesce frame entities */ + irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env); + be_assign_entities(fec_env); + be_free_frame_entity_coalescer(fec_env); irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg); @@ -1265,23 +1307,24 @@ static void ia32_after_ra(void *self) { } /** - * Last touchups for the graph before emit + * Last touchups for the graph before emit: x87 simulation to replace the + * virtual with real x87 instructions, creating a block schedule and peephole + * optimisations. */ static void ia32_finish(void *self) { ia32_code_gen_t *cg = self; ir_graph *irg = cg->irg; - // Matze: disabled for now, as the irextbb algo sometimes returns extbb in - // the wrong order if the graph has critical edges - be_remove_empty_blocks(irg); - - cg->blk_sched = sched_create_block_schedule(cg->irg, cg->birg->execfreqs); - /* if we do x87 code generation, rewrite all the virtual instructions and registers */ if (cg->used_fp == fp_x87 || cg->force_sim) { - x87_simulate_graph(cg->arch_env, irg, cg->blk_sched); + x87_simulate_graph(cg->arch_env, cg->birg); } + /* create block schedule, this also removes empty blocks which might + * produce critical edges */ + cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq); + + /* do peephole optimisations */ ia32_peephole_optimization(irg, cg); } @@ -1305,12 +1348,13 @@ static void ia32_codegen(void *self) { free(cg); } -static void *ia32_cg_init(const be_irg_t *birg); +static void *ia32_cg_init(be_irg_t *birg); static const arch_code_generator_if_t ia32_code_gen_if = { ia32_cg_init, NULL, /* before abi introduce hook */ ia32_prepare_graph, + NULL, /* spill */ ia32_before_sched, /* before scheduling hook */ ia32_before_ra, /* before register allocation hook */ ia32_after_ra, /* after register allocation hook */ @@ -1321,7 +1365,7 @@ static const arch_code_generator_if_t ia32_code_gen_if = { /** * Initializes a IA32 code generator. */ -static void *ia32_cg_init(const be_irg_t *birg) { +static void *ia32_cg_init(be_irg_t *birg) { ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa; ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg)); @@ -1332,8 +1376,6 @@ static void *ia32_cg_init(const be_irg_t *birg) { cg->isa = isa; cg->birg = birg; cg->blk_sched = NULL; - cg->fp_to_gp = NULL; - cg->gp_to_fp = NULL; cg->fp_kind = isa->fp_kind; cg->used_fp = fp_none; cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0; @@ -1349,9 +1391,7 @@ static void *ia32_cg_init(const be_irg_t *birg) { isa->cg = cg; #ifndef NDEBUG - if (isa->name_obst_size) { - //printf("freed %d bytes from name obst\n", isa->name_obst_size); - isa->name_obst_size = 0; + if (isa->name_obst) { obstack_free(isa->name_obst, NULL); obstack_init(isa->name_obst); } @@ -1400,6 +1440,7 @@ static void set_tarval_output_modes(void) } } +const arch_isa_if_t ia32_isa_if; /** * The template that generates a new ISA object. @@ -1419,12 +1460,13 @@ static ia32_isa_t ia32_isa_template = { NULL, /* types */ NULL, /* tv_ents */ (0 | - IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */ - IA32_OPT_DOAM | /* optimize address mode default: on */ - IA32_OPT_LEA | /* optimize for LEAs default: on */ - IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */ - IA32_OPT_IMMOPS | /* operations can use immediates, default: on */ - IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */ + IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */ + IA32_OPT_DOAM | /* optimize address mode default: on */ + IA32_OPT_LEA | /* optimize for LEAs default: on */ + IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */ + IA32_OPT_IMMOPS | /* operations can use immediates, default: on */ + IA32_OPT_EXTBB | /* use extended basic block scheduling, default: on */ + IA32_OPT_PUSHARGS), /* create pushs for function argument passing, default: on */ arch_pentium_4, /* instruction architecture */ arch_pentium_4, /* optimize for architecture */ fp_sse2, /* use sse2 unit */ @@ -1453,6 +1495,7 @@ static void *ia32_init(FILE *file_handle) { ia32_register_init(isa); ia32_create_opcodes(); + ia32_register_copy_attr_func(); if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) || (ARCH_AMD(isa->arch) && isa->arch < arch_athlon)) @@ -1469,26 +1512,24 @@ static void *ia32_init(FILE *file_handle) { isa->types = pmap_create(); isa->tv_ent = pmap_create(); isa->out = file_handle; + isa->cpu = ia32_init_machine_description(); ia32_build_16bit_reg_map(isa->regs_16bit); ia32_build_8bit_reg_map(isa->regs_8bit); /* patch register names of x87 registers */ - if (USE_x87(isa)) { - ia32_st_regs[0].name = "st"; - ia32_st_regs[1].name = "st(1)"; - ia32_st_regs[2].name = "st(2)"; - ia32_st_regs[3].name = "st(3)"; - ia32_st_regs[4].name = "st(4)"; - ia32_st_regs[5].name = "st(5)"; - ia32_st_regs[6].name = "st(6)"; - ia32_st_regs[7].name = "st(7)"; - } + ia32_st_regs[0].name = "st"; + ia32_st_regs[1].name = "st(1)"; + ia32_st_regs[2].name = "st(2)"; + ia32_st_regs[3].name = "st(3)"; + ia32_st_regs[4].name = "st(4)"; + ia32_st_regs[5].name = "st(5)"; + ia32_st_regs[6].name = "st(6)"; + ia32_st_regs[7].name = "st(7)"; #ifndef NDEBUG isa->name_obst = xmalloc(sizeof(*isa->name_obst)); obstack_init(isa->name_obst); - isa->name_obst_size = 0; #endif /* NDEBUG */ ia32_handle_intrinsics(); @@ -1521,7 +1562,6 @@ static void ia32_done(void *self) { pmap_destroy(isa->types); #ifndef NDEBUG - //printf("name obst size = %d bytes\n", isa->name_obst_size); obstack_free(isa->name_obst, NULL); #endif /* NDEBUG */ @@ -1645,7 +1685,7 @@ static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_cal tp = get_method_res_type(method_type, 1); mode = get_type_mode(tp); - assert(!mode_is_float(mode) && "two FP results not supported"); + assert(!mode_is_float(mode) && "mixed INT, FP results not supported"); be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]); be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]); @@ -1657,9 +1697,7 @@ static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_cal assert(is_atomic_type(tp)); mode = get_type_mode(tp); - reg = mode_is_float(mode) ? - (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) : - &ia32_gp_regs[REG_EAX]; + reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX]; be_abi_call_res_reg(abi, 0, reg); } @@ -1709,6 +1747,10 @@ static const list_sched_selector_t *ia32_get_list_sched_selector(const void *sel return &ia32_sched_selector; } +static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self) { + return NULL; +} + /** * Returns the necessary byte alignment for storing a register of given class. */ @@ -1721,19 +1763,130 @@ static int ia32_get_reg_class_alignment(const void *self, const arch_register_cl return bytes; } -static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL }; +static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void *self, const ir_node *irn) { + static const be_execution_unit_t *_allowed_units_BRANCH[] = { + &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1], + &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2], + NULL, + }; + static const be_execution_unit_t *_allowed_units_ALU[] = { + &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU1], + &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU2], + &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU3], + &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU4], + NULL, + }; + static const be_execution_unit_t *_allowed_units_DUMMY[] = { + &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY1], + &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY2], + &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY3], + &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY4], + NULL, + }; + static const be_execution_unit_t **_units_callret[] = { + _allowed_units_BRANCH, + NULL + }; + static const be_execution_unit_t **_units_other[] = { + _allowed_units_ALU, + NULL + }; + static const be_execution_unit_t **_units_dummy[] = { + _allowed_units_DUMMY, + NULL + }; + const be_execution_unit_t ***ret; + + if (is_ia32_irn(irn)) { + ret = get_ia32_exec_units(irn); + } + else if (is_be_node(irn)) { + if (be_is_Call(irn) || be_is_Return(irn)) { + ret = _units_callret; + } + else if (be_is_Barrier(irn)) { + ret = _units_dummy; + } + else { + ret = _units_other; + } + } + else { + ret = _units_dummy; + } + + return ret; +} + +/** + * Return the abstract ia32 machine. + */ +static const be_machine_t *ia32_get_machine(const void *self) { + const ia32_isa_t *isa = self; + return isa->cpu; +} + +/** + * Allows or disallows the creation of Psi nodes for the given Phi nodes. + * @return 1 if allowed, 0 otherwise + */ +static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j) +{ + ir_node *cmp, *cmp_a, *phi; + ir_mode *mode; + +/* we don't want long long an floating point Psi */ +#define IS_BAD_PSI_MODE(mode) (mode_is_float(mode) || get_mode_size_bits(mode) > 32) + + if (get_irn_mode(sel) != mode_b) + return 0; + + cmp = get_Proj_pred(sel); + cmp_a = get_Cmp_left(cmp); + mode = get_irn_mode(cmp_a); + + if (IS_BAD_PSI_MODE(mode)) + return 0; + + /* check the Phi nodes */ + for (phi = phi_list; phi; phi = get_irn_link(phi)) { + ir_node *pred_i = get_irn_n(phi, i); + ir_node *pred_j = get_irn_n(phi, j); + ir_mode *mode_i = get_irn_mode(pred_i); + ir_mode *mode_j = get_irn_mode(pred_j); + + if (IS_BAD_PSI_MODE(mode_i) || IS_BAD_PSI_MODE(mode_j)) + return 0; + } + +#undef IS_BAD_PSI_MODE + + return 1; +} + +static ia32_intrinsic_env_t intrinsic_env = { + NULL, /**< the irg, these entities belong to */ + NULL, /**< entity for first div operand (move into FPU) */ + NULL, /**< entity for second div operand (move into FPU) */ + NULL, /**< entity for converts ll -> d */ + NULL, /**< entity for converts d -> ll */ +}; /** * Returns the libFirm configuration parameter for this backend. */ static const backend_params *ia32_get_libfirm_params(void) { + static const opt_if_conv_info_t ifconv = { + 4, /* maxdepth, doesn't matter for Psi-conversion */ + ia32_is_psi_allowed /* allows or disallows Psi creation for given selector */ + }; static const arch_dep_params_t ad = { - 1, /* also use subs */ - 4, /* maximum shifts */ + 1, /* also use subs */ + 4, /* maximum shifts */ 31, /* maximum shift amount */ - 1, /* allow Mulhs */ - 1, /* allow Mulus */ + 1, /* allow Mulhs */ + 1, /* allow Mulus */ 32 /* Mulh allowed up to 32 bit */ }; static backend_params p = { @@ -1742,9 +1895,11 @@ static const backend_params *ia32_get_libfirm_params(void) { 1, /* need dword lowering */ ia32_create_intrinsic_fkt, &intrinsic_env, /* context for ia32_create_intrinsic_fkt */ + NULL, /* will be set later */ }; - p.dep_param = &ad; + p.dep_param = &ad; + p.if_conv_info = &ifconv; return &p; } #ifdef WITH_LIBCORE @@ -1811,31 +1966,10 @@ static const lc_opt_table_entry_t ia32_options[] = { LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST), LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS), LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB), + LC_OPT_ENT_NEGBIT("nopushargs", "do not create pushs for function arguments", &ia32_isa_template.opt, IA32_OPT_PUSHARGS), LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var), { NULL } }; - -/** - * Register command line options for the ia32 backend. - * - * Options so far: - * - * ia32-arch=arch create instruction for arch - * ia32-opt=arch optimize for run on arch - * ia32-fpunit=unit select floating point unit (x87 or SSE2) - * ia32-incdec optimize for inc/dec - * ia32-noaddrmode do not use address mode - * ia32-nolea do not optimize for LEAs - * ia32-noplacecnst do not place constants, - * ia32-noimmop no operations with immediates - * ia32-noextbb do not use extended basic block scheduling - * ia32-gasmode set the GAS compatibility mode - */ -static void ia32_register_options(lc_opt_entry_t *ent) -{ - lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32"); - lc_opt_add_table(be_grp_ia32, ia32_options); -} #endif /* WITH_LIBCORE */ const arch_isa_if_t ia32_isa_if = { @@ -1848,9 +1982,20 @@ const arch_isa_if_t ia32_isa_if = { ia32_get_irn_handler, ia32_get_code_generator_if, ia32_get_list_sched_selector, + ia32_get_ilp_sched_selector, ia32_get_reg_class_alignment, ia32_get_libfirm_params, -#ifdef WITH_LIBCORE - ia32_register_options -#endif + ia32_get_allowed_execution_units, + ia32_get_machine, }; + +void be_init_arch_ia32(void) +{ + lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); + lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32"); + + lc_opt_add_table(ia32_grp, ia32_options); + be_register_isa_if("ia32", &ia32_isa_if); +} + +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);