X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fbearch_ia32.c;h=14d8fc55b4b3eae04ece26711041a7b0afa87482;hb=0e64f62835b97abadd567f673957fe049211a183;hp=e3eca751a8676e820a9e9f407126308b9a96f15d;hpb=afbef33b94c8e5a687bbdd40f6f342b4b949e8e4;p=libfirm diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index e3eca751a..14d8fc55b 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -21,6 +21,8 @@ #include #endif /* WITH_LIBCORE */ +#include + #include "pseudo_irg.h" #include "irgwalk.h" #include "irprog.h" @@ -38,6 +40,7 @@ #include "../belower.h" #include "../besched_t.h" #include "../be.h" +#include "../be_t.h" #include "bearch_ia32_t.h" #include "ia32_new_nodes.h" /* ia32 nodes interface */ @@ -49,6 +52,8 @@ #include "ia32_optimize.h" #include "ia32_x87.h" #include "ia32_dbg_stat.h" +#include "ia32_finish.h" +#include "ia32_util.h" #define DEBUG_MODULE "firm.be.ia32.isa" @@ -69,21 +74,6 @@ ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) { USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]); } -/* returns the first Proj with given mode from mode_T node */ -static ir_node *get_proj_for_mode(ir_node *node, ir_mode *mode) { - const ir_edge_t *edge; - - assert(get_irn_mode(node) == mode_T && "Need mode_T node."); - - foreach_out_edge(node, edge) { - ir_node *proj = get_edge_src_irn(edge); - if (get_irn_mode(proj) == mode) - return proj; - } - - return NULL; -} - /************************************************** * _ _ _ __ * | | | (_)/ _| @@ -242,19 +232,29 @@ static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node * } static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) { + arch_irn_class_t classification = arch_irn_class_normal; + irn = my_skip_proj(irn); + if (is_cfop(irn)) - return arch_irn_class_branch; - else if (is_ia32_Cnst(irn)) - return arch_irn_class_const; - else if (is_ia32_Ld(irn)) - return arch_irn_class_load; - else if (is_ia32_St(irn) || is_ia32_Store8Bit(irn)) - return arch_irn_class_store; - else if (is_ia32_irn(irn)) - return arch_irn_class_normal; - else - return 0; + classification |= arch_irn_class_branch; + + if (! is_ia32_irn(irn)) + return classification & ~arch_irn_class_normal; + + if (is_ia32_Cnst(irn)) + classification |= arch_irn_class_const; + + if (is_ia32_Ld(irn)) + classification |= arch_irn_class_load; + + if (is_ia32_St(irn) || is_ia32_Store8Bit(irn)) + classification |= arch_irn_class_store; + + if (is_ia32_got_reload(irn)) + classification |= arch_irn_class_reload; + + return classification; } static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) { @@ -272,6 +272,10 @@ static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) { return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL; } +static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) { + set_ia32_frame_ent(irn, ent); +} + static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) { char buf[64]; const ia32_irn_ops_t *ops = self; @@ -337,13 +341,12 @@ static void ia32_abi_dont_save_regs(void *self, pset *s) */ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map) { - ia32_abi_env_t *env = self; + ia32_abi_env_t *env = self; - if (!env->flags.try_omit_fp) { - int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode); - ir_node *bl = get_irg_start_block(env->irg); - ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); - ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + if (! env->flags.try_omit_fp) { + ir_node *bl = get_irg_start_block(env->irg); + ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); + ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); ir_node *push; /* push ebp */ @@ -388,25 +391,24 @@ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap */ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map) { - ia32_abi_env_t *env = self; - ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); - ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); + ia32_abi_env_t *env = self; + ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp); + ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp); if (env->flags.try_omit_fp) { /* simply remove the stack frame here */ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink); } else { - const ia32_isa_t *isa = (ia32_isa_t *)env->isa; - ir_mode *mode_bp = env->isa->bp->reg_class->mode; - int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode); + const ia32_isa_t *isa = (ia32_isa_t *)env->isa; + ir_mode *mode_bp = env->isa->bp->reg_class->mode; /* gcc always emits a leave at the end of a routine */ if (1 || ARCH_AMD(isa->opt_arch)) { ir_node *leave; /* leave */ - leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem); + leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem); set_ia32_flags(leave, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame); curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack); @@ -419,7 +421,7 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_ curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem); /* pop ebp */ - pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem); + pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem); set_ia32_flags(pop, arch_irn_flags_ignore); curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res); curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack); @@ -446,7 +448,7 @@ static ir_type *ia32_abi_get_between_type(void *self) ia32_abi_env_t *env = self; - if(!between_type) { + if ( !between_type) { entity *old_bp_ent; entity *ret_addr_ent; entity *omit_fp_ret_addr_ent; @@ -463,8 +465,8 @@ static ir_type *ia32_abi_get_between_type(void *self) set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type)); set_type_state(between_type, layout_fixed); - omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp")); - omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type); + omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp")); + omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type); set_entity_offset_bytes(omit_fp_ret_addr_ent, 0); set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type)); @@ -485,42 +487,39 @@ static ir_type *ia32_abi_get_between_type(void *self) */ static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn) { - int cost; - switch (get_ia32_irn_opcode(irn)) { - case iro_ia32_xDiv: - case iro_ia32_DivMod: - cost = 8; - break; - - case iro_ia32_xLoad: - case iro_ia32_l_Load: - case iro_ia32_Load: - case iro_ia32_Push: - case iro_ia32_Pop: - cost = 10; - break; - - case iro_ia32_xStore: - case iro_ia32_l_Store: - case iro_ia32_Store: - case iro_ia32_Store8Bit: - cost = 50; - break; - - case iro_ia32_MulS: - case iro_ia32_Mul: - case iro_ia32_Mulh: - case iro_ia32_xMul: - case iro_ia32_l_MulS: - case iro_ia32_l_Mul: - cost = 2; - break; - - default: - cost = 1; - } - - return cost; + int cost; + ia32_op_type_t op_tp; + const ia32_irn_ops_t *ops = self; + + if (is_Proj(irn)) + return 0; + + assert(is_ia32_irn(irn)); + + cost = get_ia32_latency(irn); + op_tp = get_ia32_op_type(irn); + + if (is_ia32_CopyB(irn)) { + cost = 250; + if (ARCH_INTEL(ops->cg->arch)) + cost += 150; + } + else if (is_ia32_CopyB_i(irn)) { + int size = get_tarval_long(get_ia32_Immop_tarval(irn)); + cost = 20 + (int)ceil((4/3) * size); + if (ARCH_INTEL(ops->cg->arch)) + cost += 150; + } + /* in case of address mode operations add additional cycles */ + else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) { + /* + In case of stack access add 5 cycles (we assume stack is in cache), + other memory operations cost 20 cycles. + */ + cost += is_ia32_use_frame(irn) ? 5 : 20; + } + + return cost; } /** @@ -583,8 +582,8 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in } else { /* normal add: inverse == sub */ - ir_node *proj = get_irn_out_edge_first(irn)->src; - assert(proj && is_Proj(proj)); + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem); pnc = pn_ia32_Sub_res; @@ -602,8 +601,8 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in } else { /* normal sub */ - ir_node *proj = get_irn_out_edge_first(irn)->src; - assert(proj && is_Proj(proj)); + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); if (i == 2) { inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem); @@ -630,16 +629,24 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in inverse->costs += 1; } break; - case iro_ia32_Not: - inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem); + case iro_ia32_Not: { + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); + + inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem); pnc = pn_ia32_Not_res; inverse->costs += 1; break; - case iro_ia32_Minus: - inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem); - pnc = pn_ia32_Minus_res; + } + case iro_ia32_Minus: { + ir_node *proj = ia32_get_res_proj(irn); + assert(proj); + + inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem); + pnc = pn_ia32_Minus_res; inverse->costs += 1; break; + } default: /* inverse operation not supported */ return NULL; @@ -651,6 +658,60 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in return inverse; } +/** + * Check if irn can load it's operand at position i from memory (source addressmode). + * @param self Pointer to irn ops itself + * @param irn The irn to be checked + * @param i The operands position + * @return Non-Zero if operand can be loaded + */ +static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) { + if (! is_ia32_irn(irn) || /* must be an ia32 irn */ + get_irn_arity(irn) != 5 || /* must be a binary operation */ + get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */ + ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */ + (i != 2 && i != 3) || /* a "real" operand position must be requested */ + (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */ + is_ia32_use_frame(irn)) /* must not already use frame */ + return 0; + + return 1; +} + +static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *reload, unsigned int i) { + assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change"); + assert(get_nodes_block(reload) == get_nodes_block(irn) && "Reload must be in same block as irn."); + + if (get_irn_n_edges(reload) > 1) + return; + + if (i == 2) { + ir_node *tmp = get_irn_n(irn, 3); + set_irn_n(irn, 3, get_irn_n(irn, 2)); + set_irn_n(irn, 2, tmp); + } + + set_ia32_am_support(irn, ia32_am_Source); + set_ia32_op_type(irn, ia32_AddrModeS); + set_ia32_am_flavour(irn, ia32_B); + set_ia32_ls_mode(irn, get_irn_mode(reload)); + set_ia32_frame_ent(irn, be_get_frame_entity(reload)); + set_ia32_use_frame(irn); + set_ia32_got_reload(irn); + + set_irn_n(irn, 0, be_get_Reload_frame(reload)); + set_irn_n(irn, 4, be_get_Reload_mem(reload)); + + /* + Input at position one is index register, which is NoReg. + We would need cg object to get a real noreg, but we cannot + access it from here. + */ + set_irn_n(irn, 3, get_irn_n(irn, 1)); + + DBG_OPT_AM_S(reload, irn); +} + static const be_abi_callbacks_t ia32_abi_callbacks = { ia32_abi_init, free, @@ -669,9 +730,12 @@ static const arch_irn_ops_if_t ia32_irn_ops_if = { ia32_classify, ia32_get_flags, ia32_get_frame_entity, + ia32_set_frame_entity, ia32_set_stack_bias, ia32_get_inverse, - ia32_get_op_estimated_cost + ia32_get_op_estimated_cost, + ia32_possible_memory_operand, + ia32_perform_memory_operand, }; ia32_irn_ops_t ia32_irn_ops = { @@ -692,6 +756,16 @@ ia32_irn_ops_t ia32_irn_ops = { * |___/ **************************************************/ +static void ia32_kill_convs(ia32_code_gen_t *cg) { + ir_node *irn; + + /* BEWARE: the Projs are inserted in the set */ + foreach_nodeset(cg->kill_conv, irn) { + ir_node *in = get_irn_n(get_Proj_pred(irn), 2); + edges_reroute(irn, in, cg->birg->irg); + } +} + /** * Transforms the standard firm graph into * an ia32 firm graph @@ -704,12 +778,17 @@ static void ia32_prepare_graph(void *self) { FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform"); /* 1st: transform constants and psi condition trees */ - irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_psi_cond_tree, cg); + ia32_pre_transform_phase(cg); /* 2nd: transform all remaining nodes */ ia32_register_transformers(); dom = be_compute_dominance_frontiers(cg->irg); + + cg->kill_conv = new_nodeset(5); irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg); + ia32_kill_convs(cg); + del_nodeset(cg->kill_conv); + be_free_dominance_frontiers(dom); if (cg->dump) @@ -725,147 +804,6 @@ static void ia32_prepare_graph(void *self) { DEBUG_ONLY(cg->mod = old_mod;) } -static INLINE int need_constraint_copy(ir_node *irn) { - return \ - ! is_ia32_Lea(irn) && \ - ! is_ia32_Conv_I2I(irn) && \ - ! is_ia32_Conv_I2I8Bit(irn) && \ - ! is_ia32_CmpCMov(irn) && \ - ! is_ia32_CmpSet(irn); -} - -/** - * Insert copies for all ia32 nodes where the should_be_same requirement - * is not fulfilled. - * Transform Sub into Neg -- Add if IN2 == OUT - */ -static void ia32_finish_node(ir_node *irn, void *env) { - ia32_code_gen_t *cg = env; - const ia32_register_req_t **reqs; - const arch_register_t *out_reg, *in_reg, *in2_reg; - int n_res, i; - ir_node *copy, *in_node, *block, *in2_node; - ia32_op_type_t op_tp; - - if (is_ia32_irn(irn)) { - /* AM Dest nodes don't produce any values */ - op_tp = get_ia32_op_type(irn); - if (op_tp == ia32_AddrModeD) - goto end; - - reqs = get_ia32_out_req_all(irn); - n_res = get_ia32_n_res(irn); - block = get_nodes_block(irn); - - /* check all OUT requirements, if there is a should_be_same */ - if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn)) - { - for (i = 0; i < n_res; i++) { - if (arch_register_req_is(&(reqs[i]->req), should_be_same)) { - /* get in and out register */ - out_reg = get_ia32_out_reg(irn, i); - in_node = get_irn_n(irn, reqs[i]->same_pos); - in_reg = arch_get_irn_register(cg->arch_env, in_node); - - /* don't copy ignore nodes */ - if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node)) - continue; - - /* check if in and out register are equal */ - if (! REGS_ARE_EQUAL(out_reg, in_reg)) { - /* in case of a commutative op: just exchange the in's */ - /* beware: the current op could be everything, so test for ia32 */ - /* commutativity first before getting the second in */ - if (is_ia32_commutative(irn)) { - in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1); - in2_reg = arch_get_irn_register(cg->arch_env, in2_node); - - if (REGS_ARE_EQUAL(out_reg, in2_reg)) { - set_irn_n(irn, reqs[i]->same_pos, in2_node); - set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node); - } - else - goto insert_copy; - } - else { -insert_copy: - DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos)); - /* create copy from in register */ - copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node); - - DBG_OPT_2ADDRCPY(copy); - - /* destination is the out register */ - arch_set_irn_register(cg->arch_env, copy, out_reg); - - /* insert copy before the node into the schedule */ - sched_add_before(irn, copy); - - /* set copy as in */ - set_irn_n(irn, reqs[i]->same_pos, copy); - } - } - } - } - } - - /* If we have a CondJmp/CmpSet/xCmpSet with immediate, we need to */ - /* check if it's the right operand, otherwise we have */ - /* to change it, as CMP doesn't support immediate as */ - /* left operands. */ - if ((is_ia32_CondJmp(irn) || is_ia32_CmpSet(irn) || is_ia32_xCmpSet(irn)) && - (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) && - op_tp == ia32_AddrModeS) - { - set_ia32_op_type(irn, ia32_AddrModeD); - set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn))); - } - - /* check if there is a sub which need to be transformed */ - ia32_transform_sub_to_neg_add(irn, cg); - - /* transform a LEA into an Add if possible */ - ia32_transform_lea_to_add(irn, cg); - } -end: - - /* check for peephole optimization */ - ia32_peephole_optimization(irn, cg); -} - -static void ia32_finish_irg_walker(ir_node *block, void *env) { - ir_node *irn, *next; - - for (irn = sched_first(block); ! sched_is_end(irn); irn = next) { - next = sched_next(irn); - ia32_finish_node(irn, env); - } -} - -static void ia32_push_on_queue_walker(ir_node *block, void *env) { - waitq *wq = env; - waitq_put(wq, block); -} - - -/** - * Add Copy nodes for not fulfilled should_be_equal constraints - */ -static void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) { - waitq *wq = new_waitq(); - - /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */ - irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq); - - while (! waitq_empty(wq)) { - ir_node *block = waitq_get(wq); - ia32_finish_irg_walker(block, cg); - } - del_waitq(wq); -} - - - /** * Dummy functions for hooks we don't need but which must be filled. */ @@ -895,7 +833,7 @@ static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) { /* tuple node has one user which is not the mem proj-> ok */ if (mode == mode_T && get_irn_n_edges(irn) == 1) { - mem_proj = get_proj_for_mode(irn, mode_M); + mem_proj = ia32_get_proj_for_mode(irn, mode_M); if (! mem_proj) return; } @@ -1057,6 +995,125 @@ static void transform_to_Store(ia32_transform_env_t *env) { exchange(irn, proj); } +static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) { + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + + ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem); + + set_ia32_frame_ent(push, ent); + set_ia32_use_frame(push); + set_ia32_op_type(push, ia32_AddrModeS); + set_ia32_am_flavour(push, ia32_B); + set_ia32_ls_mode(push, mode_Is); + if(offset != NULL) + add_ia32_am_offs(push, offset); + + sched_add_before(schedpoint, push); + return push; +} + +static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) { + ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem()); + + set_ia32_frame_ent(pop, ent); + set_ia32_use_frame(pop); + set_ia32_op_type(pop, ia32_AddrModeD); + set_ia32_am_flavour(pop, ia32_B); + set_ia32_ls_mode(pop, mode_Is); + if(offset != NULL) + add_ia32_am_offs(pop, offset); + + sched_add_before(schedpoint, pop); + + return pop; +} + +static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, ir_node *schedpoint, const ir_node *oldsp) { + ir_mode *spmode = get_irn_mode(oldsp); + const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp); + ir_node *sp; + + sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, 0); + arch_set_irn_register(env->cg->arch_env, sp, spreg); + sched_add_before(schedpoint, sp); + + return sp; +} + +static void transform_MemPerm(ia32_transform_env_t *env) { + /* + * Transform memperm, currently we do this the ugly way and produce + * push/pop into/from memory cascades. This is possible without using + * any registers. + */ + ir_node *node = env->irn; + int i, arity; + ir_node *sp = get_irn_n(node, 0); + const ir_edge_t *edge; + const ir_edge_t *next; + ir_node **pops; + + arity = be_get_MemPerm_entity_arity(node); + pops = alloca(arity * sizeof(pops[0])); + + // create pushs + for(i = 0; i < arity; ++i) { + entity *ent = be_get_MemPerm_in_entity(node, i); + ir_type *enttype = get_entity_type(ent); + int entbits = get_type_size_bits(enttype); + ir_node *mem = get_irn_n(node, i + 1); + ir_node *push; + + assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); + + push = create_push(env, node, sp, mem, ent, NULL); + sp = create_spproj(env, push, node, sp); + if(entbits == 64) { + // add another push after the first one + push = create_push(env, node, sp, mem, ent, "4"); + sp = create_spproj(env, push, node, sp); + } + + set_irn_n(node, i, new_Bad()); + } + + // create pops + for(i = arity - 1; i >= 0; --i) { + entity *ent = be_get_MemPerm_out_entity(node, i); + ir_type *enttype = get_entity_type(ent); + int entbits = get_type_size_bits(enttype); + + ir_node *pop; + + assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit"); + + pop = create_pop(env, node, sp, ent, NULL); + if(entbits == 64) { + // add another pop after the first one + sp = create_spproj(env, pop, node, sp); + pop = create_pop(env, node, sp, ent, "4"); + } + if(i != 0) { + sp = create_spproj(env, pop, node, sp); + } + + pops[i] = pop; + } + + // exchange memprojs + foreach_out_edge_safe(node, edge, next) { + ir_node *proj = get_edge_src_irn(edge); + int p = get_Proj_proj(proj); + + assert(p < arity); + + set_Proj_pred(proj, pops[p]); + set_Proj_proj(proj, 3); + } + + sched_remove(node); +} + /** * Fix the mode of Spill/Reload */ @@ -1097,12 +1154,18 @@ static void ia32_after_ra_walker(ir_node *block, void *env) { transform_to_Load(&tenv); } else if (be_is_Spill(node)) { + ir_node *spillval = get_irn_n(node, be_pos_Spill_val); /* we always spill the whole register */ tenv.dbg = get_irn_dbg_info(node); tenv.irn = node; - tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node))); + tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval)); transform_to_Store(&tenv); } + else if(be_is_MemPerm(node)) { + tenv.dbg = get_irn_dbg_info(node); + tenv.irn = node; + transform_MemPerm(&tenv); + } } } @@ -1115,6 +1178,7 @@ static void ia32_after_ra_walker(ir_node *block, void *env) { */ static void ia32_after_ra(void *self) { ia32_code_gen_t *cg = self; + irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self); /* if we do x87 code generation, rewrite all the virtual instructions and registers */ @@ -1123,6 +1187,15 @@ static void ia32_after_ra(void *self) { } } +/** + * Last touchups for the graph before emit + */ +static void ia32_finish(void *self) { + ia32_code_gen_t *cg = self; + ir_graph *irg = cg->irg; + + ia32_finish_irg(irg, cg); +} /** * Emits the code, closes the output file and frees @@ -1132,9 +1205,6 @@ static void ia32_codegen(void *self) { ia32_code_gen_t *cg = self; ir_graph *irg = cg->irg; - ia32_finish_irg(irg, cg); - if (cg->dump) - be_dump(irg, "-finished", dump_ir_block_graph_sched); ia32_gen_routine(cg->isa->out, irg, cg); cur_reg_set = NULL; @@ -1145,7 +1215,6 @@ static void ia32_codegen(void *self) { /* de-allocate code generator */ del_set(cg->reg_set); free(self); - } static void *ia32_cg_init(const be_irg_t *birg); @@ -1157,6 +1226,7 @@ static const arch_code_generator_if_t ia32_code_gen_if = { ia32_before_sched, /* before scheduling hook */ ia32_before_ra, /* before register allocation hook */ ia32_after_ra, /* after register allocation hook */ + ia32_finish, /* called before codegen */ ia32_codegen /* emit && done */ }; @@ -1183,7 +1253,9 @@ static void *ia32_cg_init(const be_irg_t *birg) { FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg"); /* copy optimizations from isa for easier access */ - cg->opt = isa->opt; + cg->opt = isa->opt; + cg->arch = isa->arch; + cg->opt_arch = isa->opt_arch; /* enter it */ isa->cg = cg; @@ -1381,7 +1453,6 @@ static int ia32_get_n_reg_class(const void *self) { * Return the register class for index i. */ static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) { - const ia32_isa_t *isa = self; assert(i >= 0 && i < 3 && "Invalid ia32 register class requested."); if (i == 0) return &ia32_reg_classes[CLASS_ia32_gp]; @@ -1526,6 +1597,14 @@ static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) { return &ia32_code_gen_if; } +/** + * Returns the estimated execution time of an ia32 irn. + */ +static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) { + const arch_env_t *arch_env = env; + return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1; +} + list_sched_selector_t ia32_sched_selector; /** @@ -1534,6 +1613,7 @@ list_sched_selector_t ia32_sched_selector; static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) { // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t)); memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t)); + ia32_sched_selector.exectime = ia32_sched_exectime; ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule; return &ia32_sched_selector; }