X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeabi.c;h=9ab4e696fef6ba20c5d85d90c60e4cf89a79b0b9;hb=f7bb0868bf4510296dc85c5dc3dab4227638720e;hp=5dcb8dd6a7cb61c2a34733bb076194d3976a1969;hpb=d60e46b8da3aec0fec718da439811942360a6a20;p=libfirm diff --git a/ir/be/beabi.c b/ir/be/beabi.c index 5dcb8dd6a..9ab4e696f 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -383,7 +383,7 @@ static INLINE int is_on_stack(be_abi_call_t *call, int pos) * @param curr_sp The stack pointer node to use. * @return The stack pointer after the call. */ -static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) +static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy) { ir_graph *irg = env->birg->irg; const arch_isa_t *isa = env->birg->main_env->arch_env->isa; @@ -400,7 +400,6 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) const arch_register_t *sp = arch_isa_sp(isa); ir_mode *mach_mode = sp->reg_class->mode; struct obstack *obst = &env->obst; - ir_node *no_mem = get_irg_no_mem(irg); int no_alloc = call->flags.bits.frame_is_setup_on_call; ir_node *res_proj = NULL; @@ -471,7 +470,18 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * moving the stack pointer along the stack's direction. */ if(stack_dir < 0 && !do_seq && !no_alloc) { - curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand); + curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + } + + if(!do_seq) { + obstack_ptr_grow(obst, get_Call_mem(irn)); + curr_mem = new_NoMem(); + } else { + curr_mem = get_Call_mem(irn); } assert(mode_is_reference(mach_mode) && "machine mode must be pointer"); @@ -491,8 +501,12 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) */ if (do_seq) { curr_ofs = 0; - addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem, - param_size + arg->space_before, be_stack_dir_expand); + addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + add_irn_dep(curr_sp, curr_mem); } else { curr_ofs += arg->space_before; @@ -507,15 +521,18 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) /* Insert a store for primitive arguments. */ if (is_atomic_type(param_type)) { - mem = new_r_Store(irg, bl, curr_mem, addr, param); - mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M); + ir_node *store; + store = new_r_Store(irg, bl, curr_mem, addr, param); + mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M); } /* Make a mem copy for compound arguments. */ else { + ir_node *copy; + assert(mode_is_reference(get_irn_mode(param))); - mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type); - mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular); + copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type); + mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular); } curr_ofs += param_size; @@ -529,8 +546,13 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) in = (ir_node **) obstack_finish(obst); /* We need the sync only, if we didn't build the stores sequentially. */ - if(!do_seq) - curr_mem = new_r_Sync(irg, bl, n_pos, in); + if(!do_seq) { + if(n_pos >= 1) { + curr_mem = new_r_Sync(irg, bl, n_pos + 1, in); + } else { + curr_mem = get_Call_mem(irn); + } + } obstack_free(obst, in); } @@ -686,12 +708,20 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) } } - if(!mem_proj) + if(!mem_proj) { mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M); + keep_alive(mem_proj); + } /* Clean up the stack frame if we allocated it */ - if(!no_alloc) - curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink); + if(!no_alloc) { + curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size); + add_irn_dep(curr_sp, mem_proj); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + } } be_abi_call_free(call); @@ -706,7 +736,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * Adjust an alloca. * The alloca is transformed into a back end alloca node and connected to the stack nodes. */ -static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp) +static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy) { if (get_Alloc_where(alloc) == stack_alloc) { ir_node *bl = get_nodes_block(alloc); @@ -716,6 +746,8 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp const ir_edge_t *edge; ir_node *new_alloc; + ir_node *addr; + ir_node *copy; foreach_out_edge(alloc, edge) { ir_node *irn = get_edge_src_irn(edge); @@ -736,7 +768,7 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp /* Beware: currently Alloc nodes without a result might happen, only escape analysis kills them and this phase runs only for object oriented source. We kill the Alloc here. */ - if (alloc_res == NULL) { + if (alloc_res == NULL && alloc_mem) { exchange(alloc_mem, get_Alloc_mem(alloc)); return curr_sp; } @@ -746,12 +778,27 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp env->call->flags.bits.try_omit_fp = 0; new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc)); - exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp); + exchange(alloc, new_alloc); if(alloc_mem != NULL) - exchange(alloc_mem, new_r_NoMem(irg)); + set_Proj_proj(alloc_mem, pn_be_AddSP_M); + + /* fix projnum of alloca res */ + set_Proj_proj(alloc_res, pn_be_AddSP_res); - curr_sp = new_alloc; + addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp; + + /* copy the address away, since it could be used after further stack pointer modifications. */ + /* Let it point curr_sp just for the moment, I'll reroute it in a second. */ + *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp); + + /* Let all users of the Alloc() result now point to the copy. */ + edges_reroute(alloc_res, copy, irg); + + /* Rewire the copy appropriately. */ + set_irn_n(copy, be_pos_Copy_op, addr); + + curr_sp = alloc_res; } return curr_sp; @@ -829,12 +876,13 @@ static int cmp_call_dependecy(const void *c1, const void *c2) */ static void link_calls_in_block_walker(ir_node *irn, void *data) { - if(is_Call(irn)) { + if(is_Call(irn) || (get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)) { be_abi_irg_t *env = data; ir_node *bl = get_nodes_block(irn); void *save = get_irn_link(bl); - env->call->flags.bits.irg_is_leaf = 0; + if (is_Call(irn)) + env->call->flags.bits.irg_is_leaf = 0; set_irn_link(irn, save); set_irn_link(bl, irn); @@ -862,6 +910,7 @@ static void process_calls_in_block(ir_node *bl, void *data) if(n > 0) { ir_node *keep; ir_node **nodes; + ir_node *copy = NULL; int i; nodes = obstack_finish(&env->obst); @@ -875,10 +924,10 @@ static void process_calls_in_block(ir_node *bl, void *data) DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn)); switch(get_irn_opcode(irn)) { case iro_Call: - curr_sp = adjust_call(env, irn, curr_sp); + curr_sp = adjust_call(env, irn, curr_sp, copy); break; case iro_Alloc: - curr_sp = adjust_alloc(env, irn, curr_sp); + curr_sp = adjust_alloc(env, irn, curr_sp, ©); break; default: break; @@ -936,7 +985,7 @@ static ir_node *setup_frame(be_abi_irg_t *env) int stack_nr = get_Proj_proj(stack); if(flags.try_omit_fp) { - stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND); frame = stack; } @@ -950,7 +999,7 @@ static ir_node *setup_frame(be_abi_irg_t *env) arch_set_irn_register(env->birg->main_env->arch_env, frame, bp); } - stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND); } be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore); @@ -975,7 +1024,7 @@ static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct pmap_entry *ent; if(env->call->flags.bits.try_omit_fp) { - stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink); + stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK); } else { @@ -1454,7 +1503,6 @@ static void modify_irg(be_abi_irg_t *env) ir_graph *irg = env->birg->irg; ir_node *bl = get_irg_start_block(irg); ir_node *end = get_irg_end_block(irg); - ir_node *no_mem = get_irg_no_mem(irg); ir_node *mem = get_irg_initial_mem(irg); ir_type *method_type = get_entity_type(get_irg_entity(irg)); pset *dont_save = pset_new_ptr(8); @@ -1593,7 +1641,7 @@ static void modify_irg(be_abi_irg_t *env) /* do the stack allocation BEFORE the barrier, or spill code might be added before it */ env->init_sp = be_abi_reg_map_get(env->regs, sp); - env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND); be_abi_reg_map_set(env->regs, sp, env->init_sp); barrier = create_barrier(env, bl, &mem, env->regs, 0); @@ -1700,14 +1748,18 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg) restore_optimization_state(&state); FIRM_DBG_REGISTER(env->dbg, "firm.be.abi"); - env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg); - memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler)); env->irn_ops.impl = &abi_irn_ops; /* Lower all call nodes in the IRG. */ process_calls(env); + /* + Beware: init backend abi call object after processing calls, + otherwise some information might be not yet available. + */ + env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg); + /* Process the IRG */ modify_irg(env); @@ -1772,8 +1824,13 @@ static void collect_stack_nodes_walker(ir_node *irn, void *data) { struct fix_stack_walker_info *info = data; - if(arch_irn_is(info->aenv, irn, modify_sp)) + if (is_Block(irn)) + return; + + if (arch_irn_is(info->aenv, irn, modify_sp)) { + assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T); pset_insert_ptr(info->nodes, irn); + } } void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv) @@ -1796,54 +1853,45 @@ void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv) be_free_dominance_frontiers(df); } -/** - * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand) - * into -1 or 1, respectively. - * @param irn The node. - * @return 1, if the direction of the IncSP was along, -1 if against. - */ -static int get_dir(ir_node *irn) -{ - return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink); -} - static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias) { - const arch_env_t *aenv = env->birg->main_env->arch_env; + const arch_env_t *arch_env = env->birg->main_env->arch_env; int omit_fp = env->call->flags.bits.try_omit_fp; ir_node *irn; sched_foreach(bl, irn) { /* - If the node modifies the stack pointer by a constant offset, - record that in the bias. - */ - if(be_is_IncSP(irn)) { - int ofs = be_get_IncSP_offset(irn); - int dir = get_dir(irn); - - if(ofs == BE_STACK_FRAME_SIZE) { - ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg)); - be_set_IncSP_offset(irn, ofs); - } - - if(omit_fp) - bias += dir * ofs; + Check, if the node relates to an entity on the stack frame. + If so, set the true offset (including the bias) for that + node. + */ + entity *ent = arch_get_frame_entity(arch_env, irn); + if(ent) { + int offset = get_stack_entity_offset(env->frame, ent, bias); + arch_set_frame_offset(arch_env, irn, offset); + DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias)); } /* - Else check, if the node relates to an entity on the stack frame. - If so, set the true offset (including the bias) for that - node. - */ - else { - entity *ent = arch_get_frame_entity(aenv, irn); - if(ent) { - int offset = get_stack_entity_offset(env->frame, ent, bias); - arch_set_frame_offset(aenv, irn, offset); - DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset)); + If the node modifies the stack pointer by a constant offset, + record that in the bias. + */ + if(arch_irn_is(arch_env, irn, modify_sp)) { + int ofs = arch_get_sp_bias(arch_env, irn); + + if(be_is_IncSP(irn)) { + if(ofs == BE_STACK_FRAME_SIZE_EXPAND) { + ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg)); + be_set_IncSP_offset(irn, ofs); + } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) { + ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg)); + be_set_IncSP_offset(irn, ofs); + } } + + if(omit_fp) + bias += ofs; } } @@ -1974,10 +2022,19 @@ static entity *abi_get_frame_entity(const void *_self, const ir_node *irn) return NULL; } -static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias) +static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent) +{ +} + +static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias) { } +static int abi_get_sp_bias(const void *self, const ir_node *irn) +{ + return 0; +} + static const arch_irn_ops_if_t abi_irn_ops = { abi_get_irn_reg_req, abi_set_irn_reg, @@ -1985,7 +2042,13 @@ static const arch_irn_ops_if_t abi_irn_ops = { abi_classify, abi_get_flags, abi_get_frame_entity, - abi_set_stack_bias + abi_set_frame_entity, + abi_set_frame_offset, + abi_get_sp_bias, + NULL, /* get_inverse */ + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ }; static const arch_irn_handler_t abi_irn_handler = {