X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeabi.c;h=9ab4e696fef6ba20c5d85d90c60e4cf89a79b0b9;hb=f7bb0868bf4510296dc85c5dc3dab4227638720e;hp=96d777cfd7958824567469b0676c753b1548f592;hpb=aaa55e60e91116d79c76cf8e083bf3ce2c9453ac;p=libfirm diff --git a/ir/be/beabi.c b/ir/be/beabi.c index 96d777cfd..9ab4e696f 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -25,6 +25,9 @@ #include "irprintf_t.h" #include "irgopt.h" #include "irbitset.h" +#include "height.h" +#include "pdeq.h" +#include "irtools.h" #include "be.h" #include "beabi.h" @@ -33,9 +36,6 @@ #include "belive_t.h" #include "besched_t.h" -#define MAX(x, y) ((x) > (y) ? (x) : (y)) -#define MIN(x, y) ((x) < (y) ? (x) : (y)) - typedef struct _be_abi_call_arg_t { unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */ unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */ @@ -115,6 +115,7 @@ struct _be_abi_irg_t { /* Forward, since be need it in be_abi_introduce(). */ static const arch_irn_ops_if_t abi_irn_ops; static const arch_irn_handler_t abi_irn_handler; +static heights_t *ir_heights; /* Flag: if set, try to omit the frame pointer if called by the backend */ int be_omit_fp = 1; @@ -217,7 +218,7 @@ be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call) * * @return the new ABI call object */ -static be_abi_call_t *be_abi_call_new() +static be_abi_call_t *be_abi_call_new(void) { be_abi_call_t *call = xmalloc(sizeof(call[0])); call->flags.val = 0; @@ -382,7 +383,7 @@ static INLINE int is_on_stack(be_abi_call_t *call, int pos) * @param curr_sp The stack pointer node to use. * @return The stack pointer after the call. */ -static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) +static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy) { ir_graph *irg = env->birg->irg; const arch_isa_t *isa = env->birg->main_env->arch_env->isa; @@ -399,7 +400,6 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) const arch_register_t *sp = arch_isa_sp(isa); ir_mode *mach_mode = sp->reg_class->mode; struct obstack *obst = &env->obst; - ir_node *no_mem = get_irg_no_mem(irg); int no_alloc = call->flags.bits.frame_is_setup_on_call; ir_node *res_proj = NULL; @@ -470,7 +470,18 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * moving the stack pointer along the stack's direction. */ if(stack_dir < 0 && !do_seq && !no_alloc) { - curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand); + curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + } + + if(!do_seq) { + obstack_ptr_grow(obst, get_Call_mem(irn)); + curr_mem = new_NoMem(); + } else { + curr_mem = get_Call_mem(irn); } assert(mode_is_reference(mach_mode) && "machine mode must be pointer"); @@ -490,8 +501,12 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) */ if (do_seq) { curr_ofs = 0; - addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem, - param_size + arg->space_before, be_stack_dir_expand); + addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + add_irn_dep(curr_sp, curr_mem); } else { curr_ofs += arg->space_before; @@ -506,15 +521,18 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) /* Insert a store for primitive arguments. */ if (is_atomic_type(param_type)) { - mem = new_r_Store(irg, bl, curr_mem, addr, param); - mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M); + ir_node *store; + store = new_r_Store(irg, bl, curr_mem, addr, param); + mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M); } /* Make a mem copy for compound arguments. */ else { + ir_node *copy; + assert(mode_is_reference(get_irn_mode(param))); - mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type); - mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular); + copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type); + mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular); } curr_ofs += param_size; @@ -528,8 +546,13 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) in = (ir_node **) obstack_finish(obst); /* We need the sync only, if we didn't build the stores sequentially. */ - if(!do_seq) - curr_mem = new_r_Sync(irg, bl, n_pos, in); + if(!do_seq) { + if(n_pos >= 1) { + curr_mem = new_r_Sync(irg, bl, n_pos + 1, in); + } else { + curr_mem = get_Call_mem(irn); + } + } obstack_free(obst, in); } @@ -685,12 +708,20 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) } } - if(!mem_proj) + if(!mem_proj) { mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M); + keep_alive(mem_proj); + } /* Clean up the stack frame if we allocated it */ - if(!no_alloc) - curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink); + if(!no_alloc) { + curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size); + add_irn_dep(curr_sp, mem_proj); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + } } be_abi_call_free(call); @@ -705,7 +736,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * Adjust an alloca. * The alloca is transformed into a back end alloca node and connected to the stack nodes. */ -static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp) +static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy) { if (get_Alloc_where(alloc) == stack_alloc) { ir_node *bl = get_nodes_block(alloc); @@ -715,6 +746,8 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp const ir_edge_t *edge; ir_node *new_alloc; + ir_node *addr; + ir_node *copy; foreach_out_edge(alloc, edge) { ir_node *irn = get_edge_src_irn(edge); @@ -735,7 +768,7 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp /* Beware: currently Alloc nodes without a result might happen, only escape analysis kills them and this phase runs only for object oriented source. We kill the Alloc here. */ - if (alloc_res == NULL) { + if (alloc_res == NULL && alloc_mem) { exchange(alloc_mem, get_Alloc_mem(alloc)); return curr_sp; } @@ -745,17 +778,34 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp env->call->flags.bits.try_omit_fp = 0; new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc)); - exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp); + exchange(alloc, new_alloc); if(alloc_mem != NULL) - exchange(alloc_mem, new_r_NoMem(irg)); + set_Proj_proj(alloc_mem, pn_be_AddSP_M); + + /* fix projnum of alloca res */ + set_Proj_proj(alloc_res, pn_be_AddSP_res); - curr_sp = new_alloc; + addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp; + + /* copy the address away, since it could be used after further stack pointer modifications. */ + /* Let it point curr_sp just for the moment, I'll reroute it in a second. */ + *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp); + + /* Let all users of the Alloc() result now point to the copy. */ + edges_reroute(alloc_res, copy, irg); + + /* Rewire the copy appropriately. */ + set_irn_n(copy, be_pos_Copy_op, addr); + + curr_sp = alloc_res; } return curr_sp; } +/* the following function is replaced by the usage of the heights module */ +#if 0 /** * Walker for dependent_on(). * This function searches a node tgt recursively from a given node @@ -775,13 +825,14 @@ static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl) /* Phi functions stop the recursion inside a basic block */ if (! is_Phi(curr)) { for(i = 0, n = get_irn_arity(curr); i < n; ++i) { - if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr)) + if (check_dependence(get_irn_n(curr, i), tgt, bl)) return 1; } } return 0; } +#endif /* if 0 */ /** * Check if a node is somehow data dependent on another one. @@ -793,10 +844,11 @@ static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl) static int dependent_on(ir_node *n1, ir_node *n2) { ir_node *bl = get_nodes_block(n1); - ir_graph *irg = get_irn_irg(bl); assert(bl == get_nodes_block(n2)); - return check_dependence(n1, n2, bl); + + return heights_reachable_in_block(ir_heights, n1, n2); + //return check_dependence(n1, n2, bl); } static int cmp_call_dependecy(const void *c1, const void *c2) @@ -824,12 +876,13 @@ static int cmp_call_dependecy(const void *c1, const void *c2) */ static void link_calls_in_block_walker(ir_node *irn, void *data) { - if(is_Call(irn)) { + if(is_Call(irn) || (get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)) { be_abi_irg_t *env = data; ir_node *bl = get_nodes_block(irn); void *save = get_irn_link(bl); - env->call->flags.bits.irg_is_leaf = 0; + if (is_Call(irn)) + env->call->flags.bits.irg_is_leaf = 0; set_irn_link(irn, save); set_irn_link(bl, irn); @@ -857,6 +910,7 @@ static void process_calls_in_block(ir_node *bl, void *data) if(n > 0) { ir_node *keep; ir_node **nodes; + ir_node *copy = NULL; int i; nodes = obstack_finish(&env->obst); @@ -870,10 +924,10 @@ static void process_calls_in_block(ir_node *bl, void *data) DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn)); switch(get_irn_opcode(irn)) { case iro_Call: - curr_sp = adjust_call(env, irn, curr_sp); + curr_sp = adjust_call(env, irn, curr_sp, copy); break; case iro_Alloc: - curr_sp = adjust_alloc(env, irn, curr_sp); + curr_sp = adjust_alloc(env, irn, curr_sp, ©); break; default: break; @@ -900,7 +954,10 @@ static void process_calls(be_abi_irg_t *env) env->call->flags.bits.irg_is_leaf = 1; irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env); + + ir_heights = heights_new(env->birg->irg); irg_block_walk_graph(irg, NULL, process_calls_in_block, env); + heights_free(ir_heights); } static void collect_return_walker(ir_node *irn, void *data) @@ -928,7 +985,7 @@ static ir_node *setup_frame(be_abi_irg_t *env) int stack_nr = get_Proj_proj(stack); if(flags.try_omit_fp) { - stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND); frame = stack; } @@ -942,7 +999,7 @@ static ir_node *setup_frame(be_abi_irg_t *env) arch_set_irn_register(env->birg->main_env->arch_env, frame, bp); } - stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND); } be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore); @@ -967,7 +1024,7 @@ static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct pmap_entry *ent; if(env->call->flags.bits.try_omit_fp) { - stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink); + stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK); } else { @@ -1260,7 +1317,6 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, i /* clear SP entry, since it has already been grown. */ pmap_insert(reg_map, (void *) isa->sp, NULL); for(i = 0; i < n_res; ++i) { - ir_node *res = get_Return_res(irn, i); be_abi_call_arg_t *arg = get_call_arg(call, 1, i); in[n] = be_abi_reg_map_get(reg_map, arg->reg); @@ -1447,7 +1503,6 @@ static void modify_irg(be_abi_irg_t *env) ir_graph *irg = env->birg->irg; ir_node *bl = get_irg_start_block(irg); ir_node *end = get_irg_end_block(irg); - ir_node *no_mem = get_irg_no_mem(irg); ir_node *mem = get_irg_initial_mem(irg); ir_type *method_type = get_entity_type(get_irg_entity(irg)); pset *dont_save = pset_new_ptr(8); @@ -1586,7 +1641,7 @@ static void modify_irg(be_abi_irg_t *env) /* do the stack allocation BEFORE the barrier, or spill code might be added before it */ env->init_sp = be_abi_reg_map_get(env->regs, sp); - env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND); be_abi_reg_map_set(env->regs, sp, env->init_sp); barrier = create_barrier(env, bl, &mem, env->regs, 0); @@ -1693,14 +1748,18 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg) restore_optimization_state(&state); FIRM_DBG_REGISTER(env->dbg, "firm.be.abi"); - env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg); - memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler)); env->irn_ops.impl = &abi_irn_ops; /* Lower all call nodes in the IRG. */ process_calls(env); + /* + Beware: init backend abi call object after processing calls, + otherwise some information might be not yet available. + */ + env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg); + /* Process the IRG */ modify_irg(env); @@ -1719,7 +1778,6 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg) arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler); env->call->cb->done(env->cb); - be_liveness(irg); return env; } @@ -1766,11 +1824,16 @@ static void collect_stack_nodes_walker(ir_node *irn, void *data) { struct fix_stack_walker_info *info = data; - if(arch_irn_is(info->aenv, irn, modify_sp)) + if (is_Block(irn)) + return; + + if (arch_irn_is(info->aenv, irn, modify_sp)) { + assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T); pset_insert_ptr(info->nodes, irn); + } } -void be_abi_fix_stack_nodes(be_abi_irg_t *env) +void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv) { dom_front_info_t *df; pset *stack_nodes = pset_new_ptr(16); @@ -1783,64 +1846,52 @@ void be_abi_fix_stack_nodes(be_abi_irg_t *env) df = be_compute_dominance_frontiers(env->birg->irg); irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info); pset_insert_ptr(stack_nodes, env->init_sp); - be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis); + be_ssa_constr_set_phis(df, lv, stack_nodes, env->stack_phis); del_pset(stack_nodes); - /* Liveness could have changed due to Phi nodes. */ - be_liveness(env->birg->irg); - /* free these dominance frontiers */ be_free_dominance_frontiers(df); } -/** - * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand) - * into -1 or 1, respectively. - * @param irn The node. - * @return 1, if the direction of the IncSP was along, -1 if against. - */ -static int get_dir(ir_node *irn) -{ - return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink); -} - static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias) { - const arch_env_t *aenv = env->birg->main_env->arch_env; + const arch_env_t *arch_env = env->birg->main_env->arch_env; int omit_fp = env->call->flags.bits.try_omit_fp; ir_node *irn; sched_foreach(bl, irn) { /* - If the node modifies the stack pointer by a constant offset, - record that in the bias. - */ - if(be_is_IncSP(irn)) { - int ofs = be_get_IncSP_offset(irn); - int dir = get_dir(irn); - - if(ofs == BE_STACK_FRAME_SIZE) { - ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg)); - be_set_IncSP_offset(irn, ofs); - } - - if(omit_fp) - bias += dir * ofs; + Check, if the node relates to an entity on the stack frame. + If so, set the true offset (including the bias) for that + node. + */ + entity *ent = arch_get_frame_entity(arch_env, irn); + if(ent) { + int offset = get_stack_entity_offset(env->frame, ent, bias); + arch_set_frame_offset(arch_env, irn, offset); + DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias)); } /* - Else check, if the node relates to an entity on the stack frame. - If so, set the true offset (including the bias) for that - node. - */ - else { - entity *ent = arch_get_frame_entity(aenv, irn); - if(ent) { - int offset = get_stack_entity_offset(env->frame, ent, bias); - arch_set_frame_offset(aenv, irn, offset); - DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset)); + If the node modifies the stack pointer by a constant offset, + record that in the bias. + */ + if(arch_irn_is(arch_env, irn, modify_sp)) { + int ofs = arch_get_sp_bias(arch_env, irn); + + if(be_is_IncSP(irn)) { + if(ofs == BE_STACK_FRAME_SIZE_EXPAND) { + ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg)); + be_set_IncSP_offset(irn, ofs); + } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) { + ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg)); + be_set_IncSP_offset(irn, ofs); + } } + + if(omit_fp) + bias += ofs; } } @@ -1971,8 +2022,17 @@ static entity *abi_get_frame_entity(const void *_self, const ir_node *irn) return NULL; } -static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias) +static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent) +{ +} + +static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias) +{ +} + +static int abi_get_sp_bias(const void *self, const ir_node *irn) { + return 0; } static const arch_irn_ops_if_t abi_irn_ops = { @@ -1982,7 +2042,13 @@ static const arch_irn_ops_if_t abi_irn_ops = { abi_classify, abi_get_flags, abi_get_frame_entity, - abi_set_stack_bias + abi_set_frame_entity, + abi_set_frame_offset, + abi_get_sp_bias, + NULL, /* get_inverse */ + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ }; static const arch_irn_handler_t abi_irn_handler = {