X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fbearch_ia32.c;h=699df5cded667aab7b1718d0b61bf31228a8259f;hb=6981dd3274e6753e50f66c8cbe17b37bd41708e5;hp=3148224d7af0965038d32d9b60691852c45c79e5;hpb=719c4b4724a3f7deaeac0e4b484bdb504e5293b7;p=libfirm diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index 3148224d7..699df5cde 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -3,9 +3,8 @@ * @author Christian Wuerdig * $Id$ */ - #ifdef HAVE_CONFIG_H -#include "config.h" +#include #endif #ifdef HAVE_MALLOC_H @@ -16,10 +15,8 @@ #include #endif -#ifdef WITH_LIBCORE #include #include -#endif /* WITH_LIBCORE */ #include @@ -48,6 +45,7 @@ #include "../bemachine.h" #include "../beilpsched.h" #include "../bespillslots.h" +#include "../bemodule.h" #include "bearch_ia32_t.h" @@ -69,17 +67,75 @@ /* TODO: ugly */ static set *cur_reg_set = NULL; +typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block); + +static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place, + create_const_node_func func, arch_register_t* reg) +{ + ir_node *block, *res; + ir_node *in[1]; + ir_node *startnode; + ir_node *keep; + + if(*place != NULL) + return *place; + + block = get_irg_start_block(cg->irg); + res = func(NULL, cg->irg, block); + arch_set_irn_register(cg->arch_env, res, reg); + *place = res; + + /* keep the node so it isn't accidently removed when unused ... */ + in[0] = res; + keep = be_new_Keep(arch_register_get_class(reg), cg->irg, block, 1, in); + + /* schedule the node if we already have a scheduled program */ + startnode = get_irg_start(cg->irg); + if(sched_is_scheduled(startnode)) { + sched_add_after(startnode, res); + sched_add_after(res, keep); + } + + return res; +} + /* Creates the unique per irg GP NoReg node. */ ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) { - return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]); + return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP, + &ia32_gp_regs[REG_GP_NOREG]); +} + +ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) { + return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP, + &ia32_vfp_regs[REG_VFP_NOREG]); +} + +ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) { + return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM, + &ia32_xmm_regs[REG_XMM_NOREG]); } /* Creates the unique per irg FP NoReg node. */ ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) { - return be_abi_get_callee_save_irn(cg->birg->abi, - USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]); + return USE_SSE2(cg) ? ia32_new_NoReg_xmm(cg) : ia32_new_NoReg_vfp(cg); } +ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) { + return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP, + &ia32_gp_regs[REG_GP_UKNWN]); +} + +ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) { + return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP, + &ia32_vfp_regs[REG_VFP_UKNWN]); +} + +ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) { + return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM, + &ia32_xmm_regs[REG_XMM_UKNWN]); +} + + /** * Returns gp_noreg or fp_noreg, depending in input requirements. */ @@ -266,25 +322,30 @@ static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) { if (is_ia32_St(irn) || is_ia32_Store8Bit(irn)) classification |= arch_irn_class_store; - if (is_ia32_got_reload(irn)) + if (is_ia32_need_stackent(irn)) classification |= arch_irn_class_reload; return classification; } static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) { - arch_irn_flags_t flags; - ir_node *pred = is_Proj(irn) && mode_is_datab(get_irn_mode(irn)) ? get_Proj_pred(irn) : NULL; + arch_irn_flags_t flags = arch_irn_flags_none; if (is_Unknown(irn)) - flags = arch_irn_flags_ignore; - else { - /* pred is only set, if we have a Proj */ - flags = pred && is_ia32_irn(pred) ? get_ia32_out_flags(pred, get_Proj_proj(irn)) : arch_irn_flags_none; + return arch_irn_flags_ignore; - irn = skip_Proj_const(irn); - if (is_ia32_irn(irn)) - flags |= get_ia32_flags(irn); + if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) { + ir_node *pred = get_Proj_pred(irn); + + if(is_ia32_irn(pred)) { + flags = get_ia32_out_flags(pred, get_Proj_proj(irn)); + } + + irn = pred; + } + + if (is_ia32_irn(irn)) { + flags |= get_ia32_flags(irn); } return flags; @@ -312,7 +373,9 @@ static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) { const ia32_irn_ops_t *ops = self; if (get_ia32_frame_ent(irn)) { - if(is_ia32_Pop(irn)) { + ia32_am_flavour_t am_flav; + + if (is_ia32_Pop(irn)) { int omit_fp = be_abi_omit_fp(ops->cg->birg->abi); if (omit_fp) { /* Pop nodes modify the stack pointer before calculating the destination @@ -324,18 +387,11 @@ static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) { DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias)); - if (get_ia32_op_type(irn) == ia32_Normal) { - // Matze: When does this case happen? - char buf[64]; - snprintf(buf, sizeof(buf), "%d", bias); - set_ia32_cnst(irn, buf); - } else { - ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn); - am_flav |= ia32_O; - set_ia32_am_flavour(irn, am_flav); + am_flav = get_ia32_am_flavour(irn); + am_flav |= ia32_O; + set_ia32_am_flavour(irn, am_flav); - add_ia32_am_offs_int(irn, bias); - } + add_ia32_am_offs_int(irn, bias); } } @@ -366,6 +422,161 @@ static void ia32_abi_dont_save_regs(void *self, pset *s) pset_insert_ptr(s, env->isa->bp); } +#if 0 +static unsigned count_callee_saves(ia32_code_gen_t *cg) +{ + unsigned callee_saves = 0; + int c, num_reg_classes; + arch_isa_if_t *isa; + + num_reg_classes = arch_isa_get_n_reg_class(isa); + for(c = 0; c < num_reg_classes; ++c) { + int r, num_registers; + arch_register_class_t *regclass = arch_isa_get_reg_class(isa, c); + + num_registers = arch_register_class_n_regs(regclass); + for(r = 0; r < num_registers; ++r) { + arch_register_t *reg = arch_register_for_index(regclass, r); + if(arch_register_type_is(reg, callee_save)) + callee_saves++; + } + } + + return callee_saves; +} + +static void create_callee_save_regprojs(ia32_code_gen_t *cg, ir_node *regparams) +{ + int c, num_reg_classes; + arch_isa_if_t *isa; + long n = 0; + + num_reg_classes = arch_isa_get_n_reg_class(isa); + cg->initial_regs = obstack_alloc(cg->obst, + num_reg_classes * sizeof(cg->initial_regs[0])); + + for(c = 0; c < num_reg_classes; ++c) { + int r, num_registers; + ir_node **initial_regclass; + arch_register_class_t *regclass = arch_isa_get_reg_class(isa, c); + + num_registers = arch_register_class_n_regs(regclass); + initial_regclass = obstack_alloc(num_registers * sizeof(initial_regclass[0])); + for(r = 0; r < num_registers; ++r) { + ir_node *proj; + arch_register_t *reg = arch_register_for_index(regclass, r); + if(!arch_register_type_is(reg, callee_save)) + continue; + + proj = new_r_Proj(irg, start_block, regparams, n); + be_set_constr_single_reg(regparams, n, reg); + arch_set_irn_register(cg->arch_env, proj, reg); + + initial_regclass[r] = proj; + n++; + } + cg->initial_regs[c] = initial_regclass; + } +} + +static void callee_saves_obstack_grow(ia32_code_gen_t *cg) +{ + int c, num_reg_classes; + arch_isa_if_t *isa; + + for(c = 0; c < num_reg_classes; ++c) { + int r, num_registers; + + num_registers = arch_register_class_n_regs(regclass); + for(r = 0; r < num_registers; ++r) { + ir_node *proj; + arch_register_t *reg = arch_register_for_index(regclass, r); + if(!arch_register_type_is(reg, callee_save)) + continue; + + proj = cg->initial_regs[c][r]; + obstack_ptr_grow(cg->obst, proj); + } + } +} + +static unsigned count_parameters_in_regs(ia32_code_gen_t *cg) +{ + return 0; +} + +static void ia32_gen_prologue(ia32_code_gen_t *cg) +{ + ir_graph *irg = cg->irg; + ir_node *start_block = get_irg_start_block(irg); + ir_node *sp; + ir_node *regparams; + int n_regparams_out; + + /* Create the regparams node */ + n_regparams_out = count_callee_saves(cg) + count_parameters_in_regs(cg); + regparams = be_new_RegParams(irg, start_block, n_regparams_out); + + create_callee_save_regprojs(cg, regparams); + + /* Setup the stack */ + if(!omit_fp) { + ir_node *bl = get_irg_start_block(env->irg); + ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); + ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *push; + + /* push ebp */ + push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem); + curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack); + *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M); + + /* the push must have SP out register */ + arch_set_irn_register(env->aenv, curr_sp, env->isa->sp); + set_ia32_flags(push, arch_irn_flags_ignore); + + /* move esp to ebp */ + curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp); + be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp); + arch_set_irn_register(env->aenv, curr_bp, env->isa->bp); + be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore); + + /* beware: the copy must be done before any other sp use */ + curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp)); + be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp); + arch_set_irn_register(env->aenv, curr_sp, env->isa->sp); + be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore); + + be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp); + be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp); + } + + sp = be_new_IncSP(sp, irg, start_block, initialsp, BE_STACK_FRAME_SIZE_EXPAND); + set_irg_frame(irg, sp); +} + +static void ia32_gen_epilogue(ia32_code_gen_t *cg) +{ + int n_callee_saves = count_callee_saves(cg); + int n_results_regs = 0; + int barrier_size; + ir_node *barrier; + ir_node *end_block = get_irg_end_block(irg); + ir_node **in; + + /* We have to make sure that all reloads occur before the stack frame + gets destroyed, so we create a barrier for all callee-save and return + values here */ + barrier_size = n_callee_saves + n_results_regs; + barrier = be_new_Barrier(irg, end_block, barrier_size, + + /* simply remove the stack frame here */ + curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK); + add_irn_dep(curr_sp, *mem); +} +#endif + /** * Generate the routine prologue. * @@ -380,12 +591,14 @@ static void ia32_abi_dont_save_regs(void *self, pset *s) static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map) { ia32_abi_env_t *env = self; + const ia32_isa_t *isa = (ia32_isa_t *)env->isa; + ia32_code_gen_t *cg = isa->cg; if (! env->flags.try_omit_fp) { ir_node *bl = get_irg_start_block(env->irg); ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); - ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]); + ir_node *noreg = ia32_new_NoReg_gp(cg); ir_node *push; /* push ebp */ @@ -438,9 +651,9 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ /* simply remove the stack frame here */ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK); add_irn_dep(curr_sp, *mem); - } - else { + } else { const ia32_isa_t *isa = (ia32_isa_t *)env->isa; + ia32_code_gen_t *cg = isa->cg; ir_mode *mode_bp = env->isa->bp->reg_class->mode; /* gcc always emits a leave at the end of a routine */ @@ -452,10 +665,8 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ set_ia32_flags(leave, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame); curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack); - *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M); - } - else { - ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]); + } else { + ir_node *noreg = ia32_new_NoReg_gp(cg); ir_node *pop; /* copy ebp to esp */ @@ -466,7 +677,8 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ set_ia32_flags(pop, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res); curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack); - *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M); + + *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M); } arch_set_irn_register(env->aenv, curr_sp, env->isa->sp); arch_set_irn_register(env->aenv, curr_bp, env->isa->bp); @@ -520,8 +732,8 @@ static ir_type *ia32_abi_get_between_type(void *self) ir_entity *ret_addr_ent; ir_entity *omit_fp_ret_addr_ent; - ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P); - ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P); + ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu); + ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu); between_type = new_type_struct(IDENT("ia32_between_type")); old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type); @@ -621,7 +833,7 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in irg = get_irn_irg(irn); block = get_nodes_block(irn); - mode = get_ia32_res_mode(irn); + mode = get_irn_mode(irn); irn_mode = get_irn_mode(irn); noreg = get_irn_n(irn, 0); nomem = new_r_NoMem(irg); @@ -637,7 +849,7 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in if (get_ia32_immop_type(irn) == ia32_ImmConst) { /* we have an add with a const here */ /* invers == add with negated const */ - inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); inverse->costs += 1; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn))); @@ -646,16 +858,13 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) { /* we have an add with a symconst here */ /* invers == sub with const */ - inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); inverse->costs += 2; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); } else { /* normal add: inverse == sub */ - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - - inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem); inverse->costs += 2; } break; @@ -663,50 +872,41 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in if (get_ia32_immop_type(irn) != ia32_ImmNone) { /* we have a sub with a const/symconst here */ /* invers == add with this const */ - inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); } else { /* normal sub */ - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - if (i == 2) { - inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem); } else { - inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem); } inverse->costs += 1; } break; - case iro_ia32_Eor: + case iro_ia32_Xor: if (get_ia32_immop_type(irn) != ia32_ImmNone) { /* xor with const: inverse = xor */ - inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1; copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); } else { /* normal xor */ - inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem); inverse->costs += 1; } break; case iro_ia32_Not: { - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - - inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, proj, nomem, irn_mode); + inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem); inverse->costs += 1; break; } - case iro_ia32_Minus: { - ir_node *proj = ia32_get_res_proj(irn); - assert(proj); - - inverse->nodes[0] = new_rd_ia32_Minus(dbg, irg, block, noreg, noreg, proj, nomem, irn_mode); + case iro_ia32_Neg: { + inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem); inverse->costs += 1; break; } @@ -715,33 +915,24 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in return NULL; } - set_ia32_res_mode(inverse->nodes[0], mode); - return inverse; } +static ir_mode *get_spill_mode_mode(const ir_mode *mode) +{ + if(mode_is_float(mode)) + return mode_D; + + return mode_Iu; +} + /** * Get the mode that should be used for spilling value node */ -static ir_mode *get_spill_mode(ia32_code_gen_t *cg, const ir_node *node) +static ir_mode *get_spill_mode(const ir_node *node) { ir_mode *mode = get_irn_mode(node); - if (mode_is_float(mode)) { -#if 0 - // super exact spilling... - if (USE_SSE2(cg)) - return mode_D; - else - return mode_E; -#else - return mode; -#endif - } - else - return mode_Is; - - assert(0); - return mode; + return get_spill_mode_mode(mode); } /** @@ -765,11 +956,9 @@ static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spil * @return Non-Zero if operand can be loaded */ static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) { - const ia32_irn_ops_t *ops = self; - ia32_code_gen_t *cg = ops->cg; ir_node *op = get_irn_n(irn, i); const ir_mode *mode = get_irn_mode(op); - const ir_mode *spillmode = get_spill_mode(cg, op); + const ir_mode *spillmode = get_spill_mode(op); if (! is_ia32_irn(irn) || /* must be an ia32 irn */ get_irn_arity(irn) != 5 || /* must be a binary operation */ @@ -801,7 +990,7 @@ static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node set_ia32_am_flavour(irn, ia32_B); set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i))); set_ia32_use_frame(irn); - set_ia32_got_reload(irn); + set_ia32_need_stackent(irn); set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn))); set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3)); @@ -855,31 +1044,6 @@ ia32_irn_ops_t ia32_irn_ops = { * |___/ **************************************************/ -static void ia32_kill_convs(ia32_code_gen_t *cg) { - ir_node *irn; - - foreach_nodeset(cg->kill_conv, irn) { - ir_node *in = get_irn_n(irn, 2); - edges_reroute(irn, in, cg->birg->irg); - } -} - -/** - * Transform the Thread Local Store base. - */ -static void transform_tls(ir_graph *irg) { - ir_node *irn = get_irg_tls(irg); - - if (irn) { - dbg_info *dbg = get_irn_dbg_info(irn); - ir_node *blk = get_nodes_block(irn); - ir_node *newn; - newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn)); - - exchange(irn, newn); - } -} - /** * Transforms the standard firm graph into * an ia32 firm graph @@ -890,19 +1054,16 @@ static void ia32_prepare_graph(void *self) { FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform"); - /* 1st: transform constants and psi condition trees */ + /* 1st: transform psi condition trees */ ia32_pre_transform_phase(cg); /* 2nd: transform all remaining nodes */ - ia32_register_transformers(); - - cg->kill_conv = new_nodeset(5); - transform_tls(cg->irg); - edges_deactivate(cg->irg); - edges_activate(cg->irg); - irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg); - ia32_kill_convs(cg); - del_nodeset(cg->kill_conv); + ia32_transform_graph(cg); + // Matze: disabled for now. Because after transformation start block has no + // self-loop anymore so it might be merged with its successor block. This + // will bring several nodes to the startblock which sometimes get scheduled + // before the initial IncSP/Barrier + //local_optimize_graph(cg->irg); if (cg->dump) be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched); @@ -1016,30 +1177,32 @@ static void ia32_before_ra(void *self) { /** * Transforms a be_Reload into a ia32 Load. */ -static void transform_to_Load(ia32_transform_env_t *env) { - ir_node *irn = env->irn; - ir_entity *ent = be_get_frame_entity(irn); - ir_mode *mode = get_irn_mode(irn); - ir_mode *spillmode = get_spill_mode(env->cg, irn); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); +static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) { + ir_graph *irg = get_irn_irg(node); + dbg_info *dbg = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_entity *ent = be_get_frame_entity(node); + ir_mode *mode = get_irn_mode(node); + ir_mode *spillmode = get_spill_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(cg); ir_node *sched_point = NULL; - ir_node *ptr = get_irg_frame(env->irg); - ir_node *mem = get_irn_n(irn, be_pos_Reload_mem); + ir_node *ptr = get_irg_frame(irg); + ir_node *mem = get_irn_n(node, be_pos_Reload_mem); ir_node *new_op, *proj; const arch_register_t *reg; - if (sched_is_scheduled(irn)) { - sched_point = sched_prev(irn); + if (sched_is_scheduled(node)) { + sched_point = sched_prev(node); } if (mode_is_float(spillmode)) { - if (USE_SSE2(env->cg)) - new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem); + if (USE_SSE2(cg)) + new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem); else - new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); + new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem); } else - new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem); + new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem); set_ia32_am_support(new_op, ia32_am_Source); set_ia32_op_type(new_op, ia32_AddrModeS); @@ -1048,56 +1211,58 @@ static void transform_to_Load(ia32_transform_env_t *env) { set_ia32_frame_ent(new_op, ent); set_ia32_use_frame(new_op); - DBG_OPT_RELOAD2LD(irn, new_op); + DBG_OPT_RELOAD2LD(node, new_op); - proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_ia32_Load_res); + proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res); if (sched_point) { sched_add_after(sched_point, new_op); sched_add_after(new_op, proj); - sched_remove(irn); + sched_remove(node); } /* copy the register from the old node to the new Load */ - reg = arch_get_irn_register(env->cg->arch_env, irn); - arch_set_irn_register(env->cg->arch_env, new_op, reg); + reg = arch_get_irn_register(cg->arch_env, node); + arch_set_irn_register(cg->arch_env, new_op, reg); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn)); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); - exchange(irn, proj); + exchange(node, proj); } /** * Transforms a be_Spill node into a ia32 Store. */ -static void transform_to_Store(ia32_transform_env_t *env) { - ir_node *irn = env->irn; - ir_entity *ent = be_get_frame_entity(irn); - const ir_node *spillval = get_irn_n(irn, be_pos_Spill_val); - ir_mode *mode = get_spill_mode(env->cg, spillval); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_rd_NoMem(env->irg); - ir_node *ptr = get_irg_frame(env->irg); - ir_node *val = get_irn_n(irn, be_pos_Spill_val); +static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) { + ir_graph *irg = get_irn_irg(node); + dbg_info *dbg = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_entity *ent = be_get_frame_entity(node); + const ir_node *spillval = get_irn_n(node, be_pos_Spill_val); + ir_mode *mode = get_spill_mode(spillval); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_rd_NoMem(irg); + ir_node *ptr = get_irg_frame(irg); + ir_node *val = get_irn_n(node, be_pos_Spill_val); ir_node *store; ir_node *sched_point = NULL; - if (sched_is_scheduled(irn)) { - sched_point = sched_prev(irn); + if (sched_is_scheduled(node)) { + sched_point = sched_prev(node); } if (mode_is_float(mode)) { - if (USE_SSE2(env->cg)) - store = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + if (USE_SSE2(cg)) + store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, val, nomem); else - store = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, val, nomem); } else if (get_mode_size_bits(mode) == 8) { - store = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, val, nomem); } else { - store = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem); + store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, val, nomem); } set_ia32_am_support(store, ia32_am_Dest); @@ -1107,22 +1272,25 @@ static void transform_to_Store(ia32_transform_env_t *env) { set_ia32_frame_ent(store, ent); set_ia32_use_frame(store); - DBG_OPT_SPILL2ST(irn, store); - SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(env->cg, irn)); + DBG_OPT_SPILL2ST(node, store); + SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node)); if (sched_point) { sched_add_after(sched_point, store); - sched_remove(irn); + sched_remove(node); } - exchange(irn, store); + exchange(node, store); } -static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) { - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *frame = get_irg_frame(env->irg); +static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) { + ir_graph *irg = get_irn_irg(node); + dbg_info *dbg = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *frame = get_irg_frame(irg); - ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, frame, noreg, noreg, sp, mem); + ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, noreg, sp, mem); set_ia32_frame_ent(push, ent); set_ia32_use_frame(push); @@ -1134,11 +1302,14 @@ static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_n return push; } -static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_entity *ent) { - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *frame = get_irg_frame(env->irg); +static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) { + ir_graph *irg = get_irn_irg(node); + dbg_info *dbg = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *frame = get_irg_frame(irg); - ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, frame, noreg, sp, new_NoMem()); + ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, sp, new_NoMem()); set_ia32_frame_ent(pop, ent); set_ia32_use_frame(pop); @@ -1151,13 +1322,16 @@ static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_no return pop; } -static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint) { +static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos, ir_node *schedpoint) { + ir_graph *irg = get_irn_irg(node); + dbg_info *dbg = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); ir_mode *spmode = mode_Iu; const arch_register_t *spreg = &ia32_gp_regs[REG_ESP]; ir_node *sp; - sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos); - arch_set_irn_register(env->cg->arch_env, sp, spreg); + sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos); + arch_set_irn_register(cg->arch_env, sp, spreg); sched_add_before(schedpoint, sp); return sp; @@ -1168,10 +1342,13 @@ static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, * push/pop into/from memory cascades. This is possible without using * any registers. */ -static void transform_MemPerm(ia32_transform_env_t *env) { - ir_node *node = env->irn; +static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) { + ir_graph *irg = get_irn_irg(node); + ir_node *block = get_nodes_block(node); + ir_node *in[1]; + ir_node *keep; int i, arity; - ir_node *sp = be_abi_get_ignore_irn(env->cg->birg->abi, &ia32_gp_regs[REG_ESP]); + ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]); const ir_edge_t *edge; const ir_edge_t *next; ir_node **pops; @@ -1189,13 +1366,13 @@ static void transform_MemPerm(ia32_transform_env_t *env) { assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); - push = create_push(env, node, sp, mem, ent); - sp = create_spproj(env, push, 0, node); + push = create_push(cg, node, node, sp, mem, ent); + sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node); if(entbits == 64) { // add another push after the first one - push = create_push(env, node, sp, mem, ent); + push = create_push(cg, node, node, sp, mem, ent); add_ia32_am_offs_int(push, 4); - sp = create_spproj(env, push, 0, node); + sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node); } set_irn_n(node, i, new_Bad()); @@ -1211,18 +1388,23 @@ static void transform_MemPerm(ia32_transform_env_t *env) { assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); - pop = create_pop(env, node, sp, ent); + pop = create_pop(cg, node, node, sp, ent); + sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node); if(entbits == 64) { - // add another pop after the first one - sp = create_spproj(env, pop, 1, node); - pop = create_pop(env, node, sp, ent); add_ia32_am_offs_int(pop, 4); + + // add another pop after the first one + pop = create_pop(cg, node, node, sp, ent); + sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node); } - sp = create_spproj(env, pop, 1, node); pops[i] = pop; } + in[0] = sp; + keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in); + sched_add_before(node, keep); + // exchange memprojs foreach_out_edge_safe(node, edge, next) { ir_node *proj = get_edge_src_irn(edge); @@ -1248,26 +1430,17 @@ static void transform_MemPerm(ia32_transform_env_t *env) { static void ia32_after_ra_walker(ir_node *block, void *env) { ir_node *node, *prev; ia32_code_gen_t *cg = env; - ia32_transform_env_t tenv; - - tenv.block = block; - tenv.irg = current_ir_graph; - tenv.cg = cg; - DEBUG_ONLY(tenv.mod = cg->mod;) /* beware: the schedule is changed here */ for (node = sched_last(block); !sched_is_begin(node); node = prev) { prev = sched_prev(node); - tenv.dbg = get_irn_dbg_info(node); - tenv.irn = node; - tenv.mode = get_irn_mode(node); if (be_is_Reload(node)) { - transform_to_Load(&tenv); + transform_to_Load(cg, node); } else if (be_is_Spill(node)) { - transform_to_Store(&tenv); + transform_to_Store(cg, node); } else if(be_is_MemPerm(node)) { - transform_MemPerm(&tenv); + transform_MemPerm(cg, node); } } } @@ -1280,18 +1453,33 @@ static void ia32_collect_frame_entity_nodes(ir_node *node, void *data) be_fec_env_t *env = data; if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) { - const ir_mode *mode = get_irn_mode(node); + const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node)); int align = get_mode_size_bytes(mode); be_node_needs_frame_entity(env, node, mode, align); - } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL) { - if (is_ia32_Load(node)) { + } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL + && is_ia32_use_frame(node)) { + if (is_ia32_need_stackent(node) || is_ia32_Load(node)) { const ir_mode *mode = get_ia32_ls_mode(node); int align = get_mode_size_bytes(mode); be_node_needs_frame_entity(env, node, mode, align); - } else if (is_ia32_vfild(node)) { + } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)) { const ir_mode *mode = get_ia32_ls_mode(node); int align = 4; be_node_needs_frame_entity(env, node, mode, align); + } else if (is_ia32_SetST0(node)) { + const ir_mode *mode = get_ia32_ls_mode(node); + int align = 4; + be_node_needs_frame_entity(env, node, mode, align); + } else { +#ifndef NDEBUG + if(!is_ia32_Store(node) + && !is_ia32_xStore(node) + && !is_ia32_xStoreSimple(node) + && !is_ia32_vfist(node) + && !is_ia32_GetST0(node)) { + assert(0); + } +#endif } } } @@ -1345,7 +1533,7 @@ static void ia32_codegen(void *self) { ia32_code_gen_t *cg = self; ir_graph *irg = cg->irg; - ia32_gen_routine(cg->isa->out, irg, cg); + ia32_gen_routine(cg, cg->isa->out, irg); cur_reg_set = NULL; @@ -1449,6 +1637,7 @@ static void set_tarval_output_modes(void) } } +const arch_isa_if_t ia32_isa_if; /** * The template that generates a new ISA object. @@ -1473,7 +1662,6 @@ static ia32_isa_t ia32_isa_template = { IA32_OPT_LEA | /* optimize for LEAs default: on */ IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */ IA32_OPT_IMMOPS | /* operations can use immediates, default: on */ - IA32_OPT_EXTBB | /* use extended basic block scheduling, default: on */ IA32_OPT_PUSHARGS), /* create pushs for function argument passing, default: on */ arch_pentium_4, /* instruction architecture */ arch_pentium_4, /* optimize for architecture */ @@ -1542,7 +1730,6 @@ static void *ia32_init(FILE *file_handle) { ia32_handle_intrinsics(); ia32_switch_section(isa->out, NO_SECTION); - fprintf(isa->out, "\t.intel_syntax\n"); /* needed for the debug support */ ia32_switch_section(isa->out, SECTION_TEXT); @@ -1777,18 +1964,18 @@ static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2], NULL, }; - static const be_execution_unit_t *_allowed_units_ALU[] = { - &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU1], - &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU2], - &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU3], - &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU4], + static const be_execution_unit_t *_allowed_units_GP[] = { + &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX], + &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX], + &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX], + &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX], + &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI], + &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI], + &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP], NULL, }; static const be_execution_unit_t *_allowed_units_DUMMY[] = { - &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY1], - &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY2], - &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY3], - &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY4], + &be_machine_execution_units_DUMMY[0], NULL, }; static const be_execution_unit_t **_units_callret[] = { @@ -1796,7 +1983,7 @@ static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void NULL }; static const be_execution_unit_t **_units_other[] = { - _allowed_units_ALU, + _allowed_units_GP, NULL }; static const be_execution_unit_t **_units_dummy[] = { @@ -1834,6 +2021,13 @@ static const be_machine_t *ia32_get_machine(const void *self) { return isa->cpu; } +/** + * Return irp irgs in the desired order. + */ +static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list) { + return NULL; +} + /** * Allows or disallows the creation of Psi nodes for the given Phi nodes. * @return 1 if allowed, 0 otherwise @@ -1910,7 +2104,6 @@ static const backend_params *ia32_get_libfirm_params(void) { p.if_conv_info = &ifconv; return &p; } -#ifdef WITH_LIBCORE /* instruction set architectures. */ static const lc_opt_enum_int_items_t arch_items[] = { @@ -1973,36 +2166,11 @@ static const lc_opt_table_entry_t ia32_options[] = { LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA), LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST), LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS), - LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB), LC_OPT_ENT_NEGBIT("nopushargs", "do not create pushs for function arguments", &ia32_isa_template.opt, IA32_OPT_PUSHARGS), LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var), { NULL } }; -/** - * Register command line options for the ia32 backend. - * - * Options so far: - * - * ia32-arch=arch create instruction for arch - * ia32-opt=arch optimize for run on arch - * ia32-fpunit=unit select floating point unit (x87 or SSE2) - * ia32-incdec optimize for inc/dec - * ia32-noaddrmode do not use address mode - * ia32-nolea do not optimize for LEAs - * ia32-noplacecnst do not place constants, - * ia32-noimmop no operations with immediates - * ia32-noextbb do not use extended basic block scheduling - * ia32-nopushargs do not create pushs for function argument passing - * ia32-gasmode set the GAS compatibility mode - */ -static void ia32_register_options(lc_opt_entry_t *ent) -{ - lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32"); - lc_opt_add_table(be_grp_ia32, ia32_options); -} -#endif /* WITH_LIBCORE */ - const arch_isa_if_t ia32_isa_if = { ia32_init, ia32_done, @@ -2018,7 +2186,16 @@ const arch_isa_if_t ia32_isa_if = { ia32_get_libfirm_params, ia32_get_allowed_execution_units, ia32_get_machine, -#ifdef WITH_LIBCORE - ia32_register_options -#endif + ia32_get_irg_list, }; + +void be_init_arch_ia32(void) +{ + lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); + lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32"); + + lc_opt_add_table(ia32_grp, ia32_options); + be_register_isa_if("ia32", &ia32_isa_if); +} + +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);