X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fbearch_ia32.c;h=14d8fc55b4b3eae04ece26711041a7b0afa87482;hb=48071aea23fabc99044488d12757f274bc956fae;hp=73c8d785a8d00bc3bfb2744e63e32ff91e078286;hpb=29681e70b073ec2ecf9d6dd8cd37f05439ada3cc;p=libfirm diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index 73c8d785a..14d8fc55b 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -21,6 +21,8 @@ #include #endif /* WITH_LIBCORE */ +#include + #include "pseudo_irg.h" #include "irgwalk.h" #include "irprog.h" @@ -38,6 +40,7 @@ #include "../belower.h" #include "../besched_t.h" #include "../be.h" +#include "../be_t.h" #include "bearch_ia32_t.h" #include "ia32_new_nodes.h" /* ia32 nodes interface */ @@ -229,19 +232,29 @@ static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node * } static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) { + arch_irn_class_t classification = arch_irn_class_normal; + irn = my_skip_proj(irn); + if (is_cfop(irn)) - return arch_irn_class_branch; - else if (is_ia32_Cnst(irn)) - return arch_irn_class_const; - else if (is_ia32_Ld(irn)) - return arch_irn_class_load; - else if (is_ia32_St(irn) || is_ia32_Store8Bit(irn)) - return arch_irn_class_store; - else if (is_ia32_irn(irn)) - return arch_irn_class_normal; - else - return 0; + classification |= arch_irn_class_branch; + + if (! is_ia32_irn(irn)) + return classification & ~arch_irn_class_normal; + + if (is_ia32_Cnst(irn)) + classification |= arch_irn_class_const; + + if (is_ia32_Ld(irn)) + classification |= arch_irn_class_load; + + if (is_ia32_St(irn) || is_ia32_Store8Bit(irn)) + classification |= arch_irn_class_store; + + if (is_ia32_got_reload(irn)) + classification |= arch_irn_class_reload; + + return classification; } static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) { @@ -259,6 +272,10 @@ static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) { return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL; } +static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) { + set_ia32_frame_ent(irn, ent); +} + static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) { char buf[64]; const ia32_irn_ops_t *ops = self; @@ -324,12 +341,12 @@ static void ia32_abi_dont_save_regs(void *self, pset *s) */ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map) { - ia32_abi_env_t *env = self; + ia32_abi_env_t *env = self; - if (!env->flags.try_omit_fp) { - ir_node *bl = get_irg_start_block(env->irg); - ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); - ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + if (! env->flags.try_omit_fp) { + ir_node *bl = get_irg_start_block(env->irg); + ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); + ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); ir_node *push; /* push ebp */ @@ -374,24 +391,24 @@ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap */ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map) { - ia32_abi_env_t *env = self; - ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); - ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + ia32_abi_env_t *env = self; + ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); + ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); if (env->flags.try_omit_fp) { /* simply remove the stack frame here */ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink); } else { - const ia32_isa_t *isa = (ia32_isa_t *)env->isa; - ir_mode *mode_bp = env->isa->bp->reg_class->mode; + const ia32_isa_t *isa = (ia32_isa_t *)env->isa; + ir_mode *mode_bp = env->isa->bp->reg_class->mode; /* gcc always emits a leave at the end of a routine */ if (1 || ARCH_AMD(isa->opt_arch)) { ir_node *leave; /* leave */ - leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem); + leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem); set_ia32_flags(leave, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame); curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack); @@ -404,7 +421,7 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem); /* pop ebp */ - pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem); + pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem); set_ia32_flags(pop, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res); curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack); @@ -471,45 +488,35 @@ static ir_type *ia32_abi_get_between_type(void *self) static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn) { int cost; + ia32_op_type_t op_tp; + const ia32_irn_ops_t *ops = self; - if(is_Proj(irn)) + if (is_Proj(irn)) return 0; - switch (get_ia32_irn_opcode(irn)) { - case iro_ia32_xDiv: - case iro_ia32_DivMod: - cost = 8; - break; - - case iro_ia32_xLoad: - case iro_ia32_l_Load: - case iro_ia32_Load: - cost = 25; - break; - - case iro_ia32_Push: - case iro_ia32_Pop: - cost = 5; - break; - - case iro_ia32_xStore: - case iro_ia32_l_Store: - case iro_ia32_Store: - case iro_ia32_Store8Bit: - cost = 50; - break; - - case iro_ia32_MulS: - case iro_ia32_Mul: - case iro_ia32_Mulh: - case iro_ia32_xMul: - case iro_ia32_l_MulS: - case iro_ia32_l_Mul: - cost = 2; - break; - - default: - cost = 1; + assert(is_ia32_irn(irn)); + + cost = get_ia32_latency(irn); + op_tp = get_ia32_op_type(irn); + + if (is_ia32_CopyB(irn)) { + cost = 250; + if (ARCH_INTEL(ops->cg->arch)) + cost += 150; + } + else if (is_ia32_CopyB_i(irn)) { + int size = get_tarval_long(get_ia32_Immop_tarval(irn)); + cost = 20 + (int)ceil((4/3) * size); + if (ARCH_INTEL(ops->cg->arch)) + cost += 150; + } + /* in case of address mode operations add additional cycles */ + else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) { + /* + In case of stack access add 5 cycles (we assume stack is in cache), + other memory operations cost 20 cycles. + */ + cost += is_ia32_use_frame(irn) ? 5 : 20; } return cost; @@ -690,6 +697,7 @@ static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node set_ia32_ls_mode(irn, get_irn_mode(reload)); set_ia32_frame_ent(irn, be_get_frame_entity(reload)); set_ia32_use_frame(irn); + set_ia32_got_reload(irn); set_irn_n(irn, 0, be_get_Reload_frame(reload)); set_irn_n(irn, 4, be_get_Reload_mem(reload)); @@ -722,6 +730,7 @@ static const arch_irn_ops_if_t ia32_irn_ops_if = { ia32_classify, ia32_get_flags, ia32_get_frame_entity, + ia32_set_frame_entity, ia32_set_stack_bias, ia32_get_inverse, ia32_get_op_estimated_cost, @@ -747,6 +756,16 @@ ia32_irn_ops_t ia32_irn_ops = { * |___/ **************************************************/ +static void ia32_kill_convs(ia32_code_gen_t *cg) { + ir_node *irn; + + /* BEWARE: the Projs are inserted in the set */ + foreach_nodeset(cg->kill_conv, irn) { + ir_node *in = get_irn_n(get_Proj_pred(irn), 2); + edges_reroute(irn, in, cg->birg->irg); + } +} + /** * Transforms the standard firm graph into * an ia32 firm graph @@ -764,7 +783,12 @@ static void ia32_prepare_graph(void *self) { /* 2nd: transform all remaining nodes */ ia32_register_transformers(); dom = be_compute_dominance_frontiers(cg->irg); + + cg->kill_conv = new_nodeset(5); irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg); + ia32_kill_convs(cg); + del_nodeset(cg->kill_conv); + be_free_dominance_frontiers(dom); if (cg->dump) @@ -971,12 +995,10 @@ static void transform_to_Store(ia32_transform_env_t *env) { exchange(irn, proj); } -static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node **sp, ir_node *mem, entity *ent, const char *offset) { +static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) { ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_mode *spmode = get_irn_mode(*sp); - const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, *sp); - ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, *sp, noreg, mem); + ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem); set_ia32_frame_ent(push, ent); set_ia32_use_frame(push); @@ -987,19 +1009,11 @@ static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_n add_ia32_am_offs(push, offset); sched_add_before(schedpoint, push); - - *sp = new_rd_Proj(env->dbg, env->irg, env->block, push, spmode, 0); - sched_add_before(schedpoint, *sp); - arch_set_irn_register(env->cg->arch_env, *sp, spreg); - return push; } -static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node **sp, entity *ent, const char *offset) { - ir_mode *spmode = get_irn_mode(*sp); - const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, *sp); - - ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, *sp, new_NoMem()); +static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) { + ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem()); set_ia32_frame_ent(pop, ent); set_ia32_use_frame(pop); @@ -1011,13 +1025,21 @@ static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_no sched_add_before(schedpoint, pop); - *sp = new_rd_Proj(env->dbg, env->irg, env->block, pop, spmode, 0); - arch_set_irn_register(env->cg->arch_env, *sp, spreg); - sched_add_before(schedpoint, *sp); - return pop; } +static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, ir_node *schedpoint, const ir_node *oldsp) { + ir_mode *spmode = get_irn_mode(oldsp); + const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp); + ir_node *sp; + + sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, 0); + arch_set_irn_register(env->cg->arch_env, sp, spreg); + sched_add_before(schedpoint, sp); + + return sp; +} + static void transform_MemPerm(ia32_transform_env_t *env) { /* * Transform memperm, currently we do this the ugly way and produce @@ -1026,13 +1048,10 @@ static void transform_MemPerm(ia32_transform_env_t *env) { */ ir_node *node = env->irn; int i, arity; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_node *sp = get_irn_n(node, 0); - const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, sp); const ir_edge_t *edge; const ir_edge_t *next; ir_node **pops; - ir_mode *spmode = get_irn_mode(sp); arity = be_get_MemPerm_entity_arity(node); pops = alloca(arity * sizeof(pops[0])); @@ -1043,13 +1062,16 @@ static void transform_MemPerm(ia32_transform_env_t *env) { ir_type *enttype = get_entity_type(ent); int entbits = get_type_size_bits(enttype); ir_node *mem = get_irn_n(node, i + 1); + ir_node *push; assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); - create_push(env, node, &sp, mem, ent, NULL); + push = create_push(env, node, sp, mem, ent, NULL); + sp = create_spproj(env, push, node, sp); if(entbits == 64) { // add another push after the first one - create_push(env, node, &sp, mem, ent, "4"); + push = create_push(env, node, sp, mem, ent, "4"); + sp = create_spproj(env, push, node, sp); } set_irn_n(node, i, new_Bad()); @@ -1065,10 +1087,14 @@ static void transform_MemPerm(ia32_transform_env_t *env) { assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); - pop = create_pop(env, node, &sp, ent, NULL); + pop = create_pop(env, node, sp, ent, NULL); if(entbits == 64) { - // add another push after the first one - pop = create_pop(env, node, &sp, ent, "4"); + // add another pop after the first one + sp = create_spproj(env, pop, node, sp); + pop = create_pop(env, node, sp, ent, "4"); + } + if(i != 0) { + sp = create_spproj(env, pop, node, sp); } pops[i] = pop; @@ -1571,6 +1597,14 @@ static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) { return &ia32_code_gen_if; } +/** + * Returns the estimated execution time of an ia32 irn. + */ +static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) { + const arch_env_t *arch_env = env; + return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1; +} + list_sched_selector_t ia32_sched_selector; /** @@ -1579,6 +1613,7 @@ list_sched_selector_t ia32_sched_selector; static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) { // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t)); memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t)); + ia32_sched_selector.exectime = ia32_sched_exectime; ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule; return &ia32_sched_selector; }