X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeabi.c;h=9bc4ca4d4f8a81fc1a3f3a8fa7d7b38981d932a4;hb=4764ebb82834c3370640980c9299f0dbb1ac598d;hp=492f8285ca6d0034bc10c5f8b6444e31dd2cd816;hpb=39cb52264857d7c21c7141ba82bb55adaa78064d;p=libfirm diff --git a/ir/be/beabi.c b/ir/be/beabi.c index 492f8285c..9bc4ca4d4 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -26,7 +26,6 @@ #include "config.h" #include "obst.h" -#include "offset.h" #include "irgopt.h" @@ -39,6 +38,7 @@ #include "irprintf_t.h" #include "irgopt.h" #include "irbitset.h" +#include "iropt_t.h" #include "height.h" #include "pdeq.h" #include "irtools.h" @@ -54,11 +54,15 @@ #include "besched.h" #include "beirg.h" #include "bessaconstr.h" +#include "bemodule.h" + +DEBUG_ONLY(static firm_dbg_module_t *dbg;) typedef struct _be_abi_call_arg_t { unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */ unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */ unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */ + unsigned callee : 1; /**< 1: someone called us. 0: We call another function */ int pos; const arch_register_t *reg; @@ -79,21 +83,17 @@ struct _be_abi_call_t { }; /** - * The ABI information for the current birg. + * The ABI information for the current graph. */ struct _be_abi_irg_t { - struct obstack obst; - be_irg_t *birg; /**< The back end IRG. */ - const arch_env_t *arch_env; survive_dce_t *dce_survivor; be_abi_call_t *call; /**< The ABI call information. */ - ir_type *method_type; /**< The type of the method of the IRG. */ ir_node *init_sp; /**< The node representing the stack pointer at the start of the function. */ - ir_node *reg_params; /**< The reg params node. */ + ir_node *start; /**< The be_Start params node. */ pmap *regs; /**< A map of all callee-save and ignore regs to their Projs to the RegParams node. */ @@ -107,10 +107,6 @@ struct _be_abi_irg_t { ir_node **calls; /**< flexible array containing all be_Call nodes */ arch_register_req_t *sp_req; - - be_stack_layout_t frame; /**< The stack frame model. */ - - DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */ }; static heights_t *ir_heights; @@ -139,7 +135,7 @@ static int cmp_call_arg(const void *a, const void *b, size_t n) { const be_abi_call_arg_t *p = a, *q = b; (void) n; - return !(p->is_res == q->is_res && p->pos == q->pos); + return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee); } /** @@ -148,8 +144,9 @@ static int cmp_call_arg(const void *a, const void *b, size_t n) * @param call the abi call * @param is_res true for call results, false for call arguments * @param pos position of the argument + * @param callee context type - if we are callee or caller */ -static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos) +static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee) { be_abi_call_arg_t arg; unsigned hash; @@ -157,6 +154,7 @@ static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos) memset(&arg, 0, sizeof(arg)); arg.is_res = is_res; arg.pos = pos; + arg.callee = callee; hash = is_res * 128 + pos; @@ -165,23 +163,18 @@ static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos) /** * Set an ABI call object argument. - * - * @param call the abi call - * @param is_res true for call results, false for call arguments - * @param pos position of the argument */ -static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos) +static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context) { - be_abi_call_arg_t arg; - unsigned hash; - - memset(&arg, 0, sizeof(arg)); - arg.is_res = is_res; - arg.pos = pos; - - hash = is_res * 128 + pos; - - return set_insert(call->params, &arg, sizeof(arg), hash); + unsigned hash = arg->is_res * 128 + arg->pos; + if (context & ABI_CONTEXT_CALLEE) { + arg->callee = 1; + set_insert(call->params, arg, sizeof(*arg), hash); + } + if (context & ABI_CONTEXT_CALLER) { + arg->callee = 0; + set_insert(call->params, arg, sizeof(*arg), hash); + } } /* Set the flags for a call. */ @@ -205,29 +198,49 @@ void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_regi } -void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after) +void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, + ir_mode *load_mode, unsigned alignment, + unsigned space_before, unsigned space_after, + be_abi_context_t context) { - be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos); - arg->on_stack = 1; - arg->load_mode = load_mode; - arg->alignment = alignment; - arg->space_before = space_before; - arg->space_after = space_after; + be_abi_call_arg_t arg; + memset(&arg, 0, sizeof(arg)); assert(alignment > 0 && "Alignment must be greater than 0"); + arg.on_stack = 1; + arg.load_mode = load_mode; + arg.alignment = alignment; + arg.space_before = space_before; + arg.space_after = space_after; + arg.is_res = 0; + arg.pos = arg_pos; + + remember_call_arg(&arg, call, context); } -void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg) +void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context) { - be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos); - arg->in_reg = 1; - arg->reg = reg; + be_abi_call_arg_t arg; + memset(&arg, 0, sizeof(arg)); + + arg.in_reg = 1; + arg.reg = reg; + arg.is_res = 0; + arg.pos = arg_pos; + + remember_call_arg(&arg, call, context); } -void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg) +void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context) { - be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos); - arg->in_reg = 1; - arg->reg = reg; + be_abi_call_arg_t arg; + memset(&arg, 0, sizeof(arg)); + + arg.in_reg = 1; + arg.reg = reg; + arg.is_res = 1; + arg.pos = arg_pos; + + remember_call_arg(&arg, call, context); } /* Get the flags of a ABI call object. */ @@ -329,7 +342,12 @@ static int stack_frame_compute_initial_offset(be_stack_layout_t *frame) ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type; ir_entity *ent = search_ent_with_offset(base, 0); - frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0; + if (ent == NULL) { + frame->initial_offset + = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type); + } else { + frame->initial_offset = get_stack_entity_offset(frame, ent, 0); + } return frame->initial_offset; } @@ -362,8 +380,7 @@ static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *ar if (stack_dir > 0) { frame->order[0] = args; frame->order[2] = locals; - } - else { + } else { /* typical decreasing stack: locals have the * lowest addresses, arguments the highest */ frame->order[0] = locals; @@ -372,35 +389,6 @@ static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *ar return frame; } -#if 0 -/** Dumps the stack layout to file. */ -static void stack_layout_dump(FILE *file, be_stack_layout_t *frame) -{ - int i, j, n; - - ir_fprintf(file, "initial offset: %d\n", frame->initial_offset); - for (j = 0; j < N_FRAME_TYPES; ++j) { - ir_type *t = frame->order[j]; - - ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t)); - for (i = 0, n = get_compound_n_members(t); i < n; ++i) { - ir_entity *ent = get_compound_member(t, i); - ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0)); - } - } -} -#endif - -/** - * Returns non-zero if the call argument at given position - * is transfered on the stack. - */ -static inline int is_on_stack(be_abi_call_t *call, int pos) -{ - be_abi_call_arg_t *arg = get_call_arg(call, 0, pos); - return arg && !arg->in_reg; -} - /* ____ _ _ / ___|__ _| | |___ @@ -422,8 +410,8 @@ static inline int is_on_stack(be_abi_call_t *call, int pos) */ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) { - ir_graph *irg = env->birg->irg; - const arch_env_t *arch_env = env->birg->main_env->arch_env; + ir_graph *irg = get_irn_irg(irn); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); ir_type *call_tp = get_Call_type(irn); ir_node *call_ptr = get_Call_ptr(irn); int n_params = get_method_n_params(call_tp); @@ -434,7 +422,6 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) const arch_register_t *sp = arch_env->sp; be_abi_call_t *call = be_abi_call_new(sp->reg_class); ir_mode *mach_mode = sp->reg_class->mode; - struct obstack *obst = &env->obst; int no_alloc = call->flags.bits.frame_is_setup_on_call; int n_res = get_method_n_ress(call_tp); int do_seq = call->flags.bits.store_args_sequential && !no_alloc; @@ -465,8 +452,9 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) /* Insert code to put the stack arguments on the stack. */ assert(get_Call_n_params(irn) == n_params); + stack_param_idx = ALLOCAN(int, n_params); for (i = 0; i < n_params; ++i) { - be_abi_call_arg_t *arg = get_call_arg(call, 0, i); + be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0); assert(arg); if (arg->on_stack) { int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i)); @@ -474,21 +462,19 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) stack_size += round_up2(arg->space_before, arg->alignment); stack_size += round_up2(arg_size, arg->alignment); stack_size += round_up2(arg->space_after, arg->alignment); - obstack_int_grow(obst, i); - ++n_stack_params; + + stack_param_idx[n_stack_params++] = i; } } - stack_param_idx = obstack_finish(obst); /* Collect all arguments which are passed in registers. */ + reg_param_idxs = ALLOCAN(int, n_params); for (i = 0; i < n_params; ++i) { - be_abi_call_arg_t *arg = get_call_arg(call, 0, i); + be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0); if (arg && arg->in_reg) { - obstack_int_grow(obst, i); - ++n_reg_params; + reg_param_idxs[n_reg_params++] = i; } } - reg_param_idxs = obstack_finish(obst); /* * If the stack is decreasing and we do not want to store sequentially, @@ -506,7 +492,9 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) dbgi = get_irn_dbg_info(irn); /* If there are some parameters which shall be passed on the stack. */ if (n_stack_params > 0) { - int curr_ofs = 0; + int curr_ofs = 0; + ir_node **in = ALLOCAN(ir_node*, n_stack_params+1); + unsigned n_in = 0; /* * Reverse list of stack parameters if call arguments are from left to right. @@ -524,12 +512,12 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) curr_mem = get_Call_mem(irn); if (! do_seq) { - obstack_ptr_grow(obst, curr_mem); + in[n_in++] = curr_mem; } for (i = 0; i < n_stack_params; ++i) { int p = stack_param_idx[i]; - be_abi_call_arg_t *arg = get_call_arg(call, 0, p); + be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0); ir_node *param = get_Call_param(irn, p); ir_node *addr = curr_sp; ir_node *mem = NULL; @@ -543,10 +531,10 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) */ if (do_seq) { curr_ofs = 0; - addr = curr_sp = be_new_IncSP(sp, bl, curr_sp, param_size + arg->space_before, 0); + addr = curr_sp = be_new_IncSP(sp, bl, curr_sp, + param_size + arg->space_before, 0); add_irn_dep(curr_sp, curr_mem); - } - else { + } else { curr_ofs += arg->space_before; curr_ofs = round_up2(curr_ofs, arg->alignment); @@ -566,16 +554,14 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) ir_node *store; ir_node *mem_input = do_seq ? curr_mem : new_NoMem(); store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0); - mem = new_r_Proj(bl, store, mode_M, pn_Store_M); - } - - /* Make a mem copy for compound arguments. */ - else { + mem = new_r_Proj(store, mode_M, pn_Store_M); + } else { + /* Make a mem copy for compound arguments. */ ir_node *copy; assert(mode_is_reference(get_irn_mode(param))); copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type); - mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular); + mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular); } curr_ofs += param_size; @@ -583,20 +569,17 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) if (do_seq) curr_mem = mem; else - obstack_ptr_grow(obst, mem); + in[n_in++] = mem; } - in = (ir_node **) obstack_finish(obst); - /* We need the sync only, if we didn't build the stores sequentially. */ if (! do_seq) { if (n_stack_params >= 1) { - curr_mem = new_r_Sync(bl, n_stack_params + 1, in); + curr_mem = new_r_Sync(bl, n_in, in); } else { curr_mem = get_Call_mem(irn); } } - obstack_free(obst, in); } /* check for the return_twice property */ @@ -667,9 +650,12 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) */ n_reg_results = n_res; + n_ins = 0; + in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states)); + /* make the back end call node and set its register requirements. */ for (i = 0; i < n_reg_params; ++i) { - obstack_ptr_grow(obst, get_Call_param(irn, reg_param_idxs[i])); + in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]); } /* add state registers ins */ @@ -680,11 +666,9 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) ir_fprintf(stderr, "Adding %+F\n", regnode); #endif ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls)); - obstack_ptr_grow(obst, regnode); + in[n_ins++] = regnode; } - n_ins = n_reg_params + pset_new_size(&states); - - in = obstack_finish(obst); + assert(n_ins == (int) (n_reg_params + pset_new_size(&states))); /* ins collected, build the call */ if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) { @@ -705,7 +689,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) ARR_APP1(ir_node *, env->calls, low_call); /* create new stack pointer */ - curr_sp = new_r_Proj(bl, low_call, get_irn_mode(curr_sp), pn_be_Call_sp); + curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp); be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp, arch_register_req_type_ignore | arch_register_req_type_produces_sp); arch_set_irn_register(curr_sp, sp); @@ -714,7 +698,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) for (i = 0; i < n_res; ++i) { int pn; ir_node *proj = res_projs[i]; - be_abi_call_arg_t *arg = get_call_arg(call, 1, i); + be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0); /* returns values on stack not supported yet */ assert(arg->in_reg); @@ -729,7 +713,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) if (proj == NULL) { ir_type *res_type = get_method_res_type(call_tp, i); ir_mode *mode = get_type_mode(res_type); - proj = new_r_Proj(bl, low_call, mode, pn); + proj = new_r_Proj(low_call, mode, pn); res_projs[i] = proj; } else { set_Proj_pred(proj, low_call); @@ -747,12 +731,12 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) */ be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr); - DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call)); + DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call)); /* Set the register classes and constraints of the Call parameters. */ for (i = 0; i < n_reg_params; ++i) { int index = reg_param_idxs[i]; - be_abi_call_arg_t *arg = get_call_arg(call, 0, index); + be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0); assert(arg->reg != NULL); be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i, @@ -762,14 +746,13 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) /* Set the register constraints of the results. */ for (i = 0; i < n_res; ++i) { ir_node *proj = res_projs[i]; - const be_abi_call_arg_t *arg = get_call_arg(call, 1, i); + const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0); int pn = get_Proj_proj(proj); assert(arg->in_reg); be_set_constr_single_reg_out(low_call, pn, arg->reg, 0); arch_set_irn_register(proj, arg->reg); } - obstack_free(obst, in); exchange(irn, low_call); /* kill the ProjT node */ @@ -786,41 +769,41 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) int n = 0; int curr_res_proj = pn_be_Call_first_res + n_reg_results; pset_new_iterator_t iter; + int n_ins; + + n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1; + in = ALLOCAN(ir_node *, n_ins); /* also keep the stack pointer */ - ++n; set_irn_link(curr_sp, (void*) sp); - obstack_ptr_grow(obst, curr_sp); + in[n++] = curr_sp; foreach_pset_new(&destroyed_regs, reg, iter) { - ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj); + ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj); /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */ be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0); arch_set_irn_register(proj, reg); set_irn_link(proj, (void*) reg); - obstack_ptr_grow(obst, proj); + in[n++] = proj; ++curr_res_proj; - ++n; } for (i = 0; i < n_reg_results; ++i) { ir_node *proj = res_projs[i]; const arch_register_t *reg = arch_get_irn_register(proj); set_irn_link(proj, (void*) reg); - obstack_ptr_grow(obst, proj); + in[n++] = proj; } - n += n_reg_results; + assert(n <= n_ins); /* create the Keep for the caller save registers */ - in = (ir_node **) obstack_finish(obst); keep = be_new_Keep(bl, n, in); for (i = 0; i < n; ++i) { const arch_register_t *reg = get_irn_link(in[i]); be_node_set_reg_class_in(keep, i, reg->reg_class); } - obstack_free(obst, in); } /* Clean up the stack. */ @@ -839,7 +822,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) } if (! mem_proj) { - mem_proj = new_r_Proj(bl, low_call, mode_M, pn_be_Call_M_regular); + mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular); keep_alive(mem_proj); } } @@ -849,7 +832,6 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) } be_abi_call_free(call); - obstack_free(obst, stack_param_idx); pset_new_destroy(&states); pset_new_destroy(&destroyed_regs); @@ -896,25 +878,24 @@ static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size, */ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp) { - ir_node *block; - ir_graph *irg; - ir_node *alloc_mem; - ir_node *alloc_res; - ir_type *type; - dbg_info *dbg; + ir_node *block = get_nodes_block(alloc); + ir_graph *irg = get_Block_irg(block); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); + ir_node *alloc_mem = NULL; + ir_node *alloc_res = NULL; + ir_type *type = get_Alloc_type(alloc); + dbg_info *dbg; const ir_edge_t *edge; - ir_node *new_alloc, *size, *addr, *ins[2]; + ir_node *new_alloc; + ir_node *count; + ir_node *size; + ir_node *ins[2]; unsigned stack_alignment; + /* all non-stack Alloc nodes should already be lowered before the backend */ assert(get_Alloc_where(alloc) == stack_alloc); - block = get_nodes_block(alloc); - irg = get_Block_irg(block); - alloc_mem = NULL; - alloc_res = NULL; - type = get_Alloc_type(alloc); - foreach_out_edge(alloc, edge) { ir_node *irn = get_edge_src_irn(edge); @@ -939,32 +920,34 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp return curr_sp; } - dbg = get_irn_dbg_info(alloc); - size = get_Alloc_size(alloc); + dbg = get_irn_dbg_info(alloc); + count = get_Alloc_count(alloc); - /* we might need to multiply the size with the element size */ + /* we might need to multiply the count with the element size */ if (type != firm_unknown_type && get_type_size_bytes(type) != 1) { - ir_mode *mode = get_irn_mode(size); + ir_mode *mode = get_irn_mode(count); tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode); ir_node *cnst = new_rd_Const(dbg, irg, tv); - size = new_rd_Mul(dbg, block, size, cnst, mode); + size = new_rd_Mul(dbg, block, count, cnst, mode); + } else { + size = count; } /* The stack pointer will be modified in an unknown manner. We cannot omit it. */ env->call->flags.bits.try_omit_fp = 0; - stack_alignment = 1 << env->arch_env->stack_alignment; + stack_alignment = 1 << arch_env->stack_alignment; size = adjust_alloc_size(stack_alignment, size, block, dbg); - new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size); + new_alloc = be_new_AddSP(arch_env->sp, block, curr_sp, size); set_irn_dbg_info(new_alloc, dbg); if (alloc_mem != NULL) { ir_node *addsp_mem; ir_node *sync; - addsp_mem = new_r_Proj(block, new_alloc, mode_M, pn_be_AddSP_M); + addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M); /* We need to sync the output mem of the AddSP with the input mem edge into the alloc node. */ @@ -980,12 +963,10 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp /* fix projnum of alloca res */ set_Proj_proj(alloc_res, pn_be_AddSP_res); - addr = alloc_res; - curr_sp = new_r_Proj(block, new_alloc, get_irn_mode(curr_sp), - pn_be_AddSP_sp); + curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp); return curr_sp; -} /* adjust_alloc */ +} /** * Adjust a Free. @@ -993,23 +974,20 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp */ static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp) { - ir_node *block; - ir_graph *irg; - ir_node *subsp, *mem, *res, *size, *sync; - ir_type *type; + ir_node *block = get_nodes_block(free); + ir_graph *irg = get_irn_irg(free); + ir_type *type = get_Free_type(free); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); + ir_mode *sp_mode = arch_env->sp->reg_class->mode; + dbg_info *dbg = get_irn_dbg_info(free); + ir_node *subsp, *mem, *res, *size, *sync; ir_node *in[2]; - ir_mode *sp_mode; unsigned stack_alignment; - dbg_info *dbg; + /* all non-stack-alloc Free nodes should already be lowered before the + * backend phase */ assert(get_Free_where(free) == stack_alloc); - block = get_nodes_block(free); - irg = get_irn_irg(block); - type = get_Free_type(free); - sp_mode = env->arch_env->sp->reg_class->mode; - dbg = get_irn_dbg_info(free); - /* we might need to multiply the size with the element size */ if (type != firm_unknown_type && get_type_size_bytes(type) != 1) { tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu); @@ -1021,17 +999,17 @@ static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp) size = get_Free_size(free); } - stack_alignment = 1 << env->arch_env->stack_alignment; + stack_alignment = 1 << arch_env->stack_alignment; size = adjust_alloc_size(stack_alignment, size, block, dbg); /* The stack pointer will be modified in an unknown manner. We cannot omit it. */ env->call->flags.bits.try_omit_fp = 0; - subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size); + subsp = be_new_SubSP(arch_env->sp, block, curr_sp, size); set_irn_dbg_info(subsp, dbg); - mem = new_r_Proj(block, subsp, mode_M, pn_be_SubSP_M); - res = new_r_Proj(block, subsp, sp_mode, pn_be_SubSP_sp); + mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M); + res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp); /* we need to sync the memory */ in[0] = get_Free_mem(free); @@ -1046,37 +1024,7 @@ static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp) curr_sp = res; return curr_sp; -} /* adjust_free */ - -/* the following function is replaced by the usage of the heights module */ -#if 0 -/** - * Walker for dependent_on(). - * This function searches a node tgt recursively from a given node - * but is restricted to the given block. - * @return 1 if tgt was reachable from curr, 0 if not. - */ -static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl) -{ - int n, i; - - if (get_nodes_block(curr) != bl) - return 0; - - if (curr == tgt) - return 1; - - /* Phi functions stop the recursion inside a basic block */ - if (! is_Phi(curr)) { - for (i = 0, n = get_irn_arity(curr); i < n; ++i) { - if (check_dependence(get_irn_n(curr, i), tgt, bl)) - return 1; - } - } - - return 0; } -#endif /* if 0 */ /** * Check if a node is somehow data dependent on another one. @@ -1156,29 +1104,35 @@ static void link_ops_in_block_walker(ir_node *irn, void *data) */ static void process_ops_in_block(ir_node *bl, void *data) { - be_abi_irg_t *env = data; - ir_node *curr_sp = env->init_sp; - ir_node *irn; - int n; + be_abi_irg_t *env = data; + ir_node *curr_sp = env->init_sp; + ir_node *irn; + ir_node **nodes; + int n; + int n_nodes; - for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) - obstack_ptr_grow(&env->obst, irn); + n_nodes = 0; + for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) { + ++n_nodes; + } + + nodes = ALLOCAN(ir_node*, n_nodes); + for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) { + nodes[n] = irn; + } /* If there were call nodes in the block. */ if (n > 0) { ir_node *keep; - ir_node **nodes; int i; - nodes = obstack_finish(&env->obst); - /* order the call nodes according to data dependency */ - qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency); + qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency); - for (i = n - 1; i >= 0; --i) { + for (i = n_nodes - 1; i >= 0; --i) { ir_node *irn = nodes[i]; - DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn)); + DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn)); switch (get_irn_opcode(irn)) { case iro_Call: if (! be_omit_fp) { @@ -1197,12 +1151,9 @@ static void process_ops_in_block(ir_node *bl, void *data) break; default: panic("invalid call"); - break; } } - obstack_free(&env->obst, nodes); - /* Keep the last stack state in the block by tying it to Keep node, * the proj from calls is already kept */ if (curr_sp != env->init_sp && @@ -1214,20 +1165,20 @@ static void process_ops_in_block(ir_node *bl, void *data) } set_irn_link(bl, curr_sp); -} /* process_ops_in_block */ +} /** * Adjust all call nodes in the graph to the ABI conventions. */ -static void process_calls(be_abi_irg_t *env) +static void process_calls(ir_graph *irg) { - ir_graph *irg = env->birg->irg; + be_abi_irg_t *abi = be_get_irg_abi(irg); - env->call->flags.bits.irg_is_leaf = 1; - irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env); + abi->call->flags.bits.irg_is_leaf = 1; + irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi); - ir_heights = heights_new(env->birg->irg); - irg_block_walk_graph(irg, NULL, process_ops_in_block, env); + ir_heights = heights_new(irg); + irg_block_walk_graph(irg, NULL, process_ops_in_block, abi); heights_free(ir_heights); } @@ -1244,27 +1195,30 @@ static void process_calls(be_abi_irg_t *env) * * @return the stack argument layout type */ -static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, +static ir_type *compute_arg_type(be_abi_irg_t *env, ir_graph *irg, + be_abi_call_t *call, ir_type *method_type, ir_type *val_param_tp, ir_entity ***param_map) { + const arch_env_t *arch_env = be_get_irg_arch_env(irg); int dir = env->call->flags.bits.left_to_right ? 1 : -1; - int inc = env->birg->main_env->arch_env->stack_dir * dir; + int inc = arch_env->stack_dir * dir; int n = get_method_n_params(method_type); int curr = inc > 0 ? 0 : n - 1; + struct obstack *obst = be_get_be_obst(irg); int ofs = 0; char buf[128]; ir_type *res; int i; - ident *id = get_entity_ident(get_irg_entity(env->birg->irg)); + ident *id = get_entity_ident(get_irg_entity(irg)); ir_entity **map; - *param_map = map = OALLOCN(&env->obst, ir_entity*, n); + *param_map = map = OALLOCN(obst, ir_entity*, n); res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8))); for (i = 0; i < n; ++i, curr += inc) { ir_type *param_type = get_method_param_type(method_type, curr); - be_abi_call_arg_t *arg = get_call_arg(call, 0, curr); + be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1); map[i] = NULL; if (arg->on_stack) { @@ -1274,8 +1228,6 @@ static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, arg->stack_ent = copy_entity_own(val_ent, res); set_entity_link(val_ent, arg->stack_ent); set_entity_link(arg->stack_ent, NULL); - /* must be automatic to set a fixed layout */ - set_entity_allocation(arg->stack_ent, allocation_automatic); } else { /* create a new entity */ snprintf(buf, sizeof(buf), "param_%d", i); @@ -1310,12 +1262,11 @@ static int cmp_regs(const void *a, const void *b) return p->reg->reg_class - q->reg->reg_class; } -static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map) +static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map) { pmap_entry *ent; int n = pmap_count(reg_map); int i = 0; - reg_node_map_t *res = OALLOCN(obst, reg_node_map_t, n); foreach_pmap(reg_map, ent) { res[i].reg = ent->key; @@ -1324,43 +1275,44 @@ static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map) } qsort(res, n, sizeof(res[0]), cmp_regs); - return res; } /** * Creates a barrier. */ -static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req) +static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs, + int in_req) { - int n_regs = pmap_count(regs); - int n; - ir_node *irn; - ir_node **in; + int n_regs = pmap_count(regs); + int n; + ir_node *irn; + ir_node **in; reg_node_map_t *rm; - rm = reg_map_to_arr(&env->obst, regs); - - for (n = 0; n < n_regs; ++n) - obstack_ptr_grow(&env->obst, rm[n].irn); + in = ALLOCAN(ir_node*, n_regs+1); + rm = ALLOCAN(reg_node_map_t, n_regs); + reg_map_to_arr(rm, regs); + for (n = 0; n < n_regs; ++n) { + in[n] = rm[n].irn; + } if (mem) { - obstack_ptr_grow(&env->obst, *mem); - n++; + in[n++] = *mem; } - in = (ir_node **) obstack_finish(&env->obst); irn = be_new_Barrier(bl, n, in); - obstack_free(&env->obst, in); for (n = 0; n < n_regs; ++n) { ir_node *pred = rm[n].irn; const arch_register_t *reg = rm[n].reg; arch_register_type_t add_type = 0; ir_node *proj; + const backend_info_t *info; /* stupid workaround for now... as not all nodes report register * requirements. */ - if (!is_Phi(pred)) { + info = be_get_info(skip_Proj(pred)); + if (info != NULL && info->out_infos != NULL) { const arch_register_req_t *ireq = arch_get_register_req_out(pred); if (ireq->type & arch_register_req_type_ignore) add_type |= arch_register_req_type_ignore; @@ -1368,7 +1320,7 @@ static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pm add_type |= arch_register_req_type_produces_sp; } - proj = new_r_Proj(bl, irn, get_irn_mode(pred), n); + proj = new_r_Proj(irn, get_irn_mode(pred), n); be_node_set_reg_class_in(irn, n, reg->reg_class); if (in_req) be_set_constr_single_reg_in(irn, n, reg, 0); @@ -1379,10 +1331,9 @@ static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pm } if (mem) { - *mem = new_r_Proj(bl, irn, mode_M, n); + *mem = new_r_Proj(irn, mode_M, n); } - obstack_free(&env->obst, rm); return irn; } @@ -1399,7 +1350,8 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) { be_abi_call_t *call = env->call; - const arch_env_t *arch_env = env->birg->main_env->arch_env; + ir_graph *irg = get_Block_irg(bl); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); dbg_info *dbgi; pmap *reg_map = pmap_create(); ir_node *keep = pmap_get(env->keep_map, bl); @@ -1423,13 +1375,13 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, if (keep) { stack = get_irn_n(keep, 0); kill_node(keep); - remove_End_keepalive(get_irg_end(env->birg->irg), keep); + remove_End_keepalive(get_irg_end(irg), keep); } /* Insert results for Return into the register map. */ for (i = 0; i < n_res; ++i) { ir_node *res = get_Return_res(irn, i); - be_abi_call_arg_t *arg = get_call_arg(call, 1, i); + be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1); assert(arg->in_reg && "return value must be passed in register"); pmap_insert(reg_map, (void *) arg->reg, res); } @@ -1444,7 +1396,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, be_abi_reg_map_set(reg_map, arch_env->sp, stack); /* Make the Epilogue node and call the arch's epilogue maker. */ - create_barrier(env, bl, &mem, reg_map, 1); + create_barrier(bl, &mem, reg_map, 1); call->cb->epilogue(env->cb, bl, &mem, reg_map); /* @@ -1453,8 +1405,8 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, */ in_max = pmap_count(reg_map) + n_res + 2; - in = OALLOCN(&env->obst, ir_node*, in_max); - regs = OALLOCN(&env->obst, arch_register_t const*, in_max); + in = ALLOCAN(ir_node*, in_max); + regs = ALLOCAN(arch_register_t const*, in_max); in[0] = mem; in[1] = be_abi_reg_map_get(reg_map, arch_env->sp); @@ -1465,7 +1417,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, /* clear SP entry, since it has already been grown. */ pmap_insert(reg_map, (void *) arch_env->sp, NULL); for (i = 0; i < n_res; ++i) { - be_abi_call_arg_t *arg = get_call_arg(call, 1, i); + be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1); in[n] = be_abi_reg_map_get(reg_map, arg->reg); regs[n++] = arg->reg; @@ -1490,7 +1442,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, } /* we have to pop the shadow parameter in in case of struct returns */ pop = call->pop; - ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in); + ret = be_new_Return(dbgi, irg, bl, n_res, pop, n, in); /* Set the register classes of the return's parameter accordingly. */ for (i = 0; i < n; ++i) { @@ -1501,7 +1453,6 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, } /* Free the space of the Epilog's in array and the register <-> proj map. */ - obstack_free(&env->obst, in); pmap_destroy(reg_map); return ret; @@ -1550,7 +1501,6 @@ static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ct argument_ent = copy_entity_own(ent, frame_tp); /* must be automatic to set a fixed layout */ - set_entity_allocation(argument_ent, allocation_automatic); set_entity_offset(argument_ent, offset); offset += get_type_size_bytes(tp); @@ -1615,19 +1565,19 @@ static void lower_frame_sels_walker(ir_node *irn, void *data) * In the default case we move the entity to the frame type and create * a backing store into the first block. */ -static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list) +static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_graph *irg, + ent_pos_pair *value_param_list) { - be_abi_call_t *call = env->call; - ir_graph *irg = env->birg->irg; + be_abi_call_t *call = env->call; + const arch_env_t *arch_env = be_get_irg_arch_env(irg); ent_pos_pair *entry, *new_list; ir_type *frame_tp; int i, n = ARR_LEN(value_param_list); - DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;) new_list = NULL; for (i = 0; i < n; ++i) { int pos = value_param_list[i].pos; - be_abi_call_arg_t *arg = get_call_arg(call, 0, pos); + be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1); if (arg->in_reg) { DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos)); @@ -1638,16 +1588,11 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *val if (new_list != NULL) { /* ok, change the graph */ ir_node *start_bl = get_irg_start_block(irg); - ir_node *first_bl = NULL; - ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl; - const ir_edge_t *edge; + ir_node *first_bl = get_first_block_succ(start_bl); + ir_node *frame, *imem, *nmem, *store, *mem, *args; optimization_state_t state; unsigned offset; - foreach_block_succ(start_bl, edge) { - first_bl = get_edge_src_irn(edge); - break; - } assert(first_bl && first_bl != start_bl); /* we had already removed critical edges, so the following assertion should be always true. */ @@ -1659,7 +1604,7 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *val save_optimization_state(&state); set_optimize(0); - nmem = new_r_Proj(start_bl, get_irg_start(irg), mode_M, pn_Start_M); + nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M); restore_optimization_state(&state); /* reroute all edges to the new memory source */ @@ -1668,7 +1613,6 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *val store = NULL; mem = imem; args = get_irg_args(irg); - args_bl = get_nodes_block(args); for (entry = new_list; entry != NULL; entry = entry->next) { int i = entry->pos; ir_type *tp = get_entity_type(entry->ent); @@ -1676,14 +1620,14 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *val ir_node *addr; /* address for the backing store */ - addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent); + addr = be_new_FrameAddr(arch_env->sp->reg_class, first_bl, frame, entry->ent); if (store) - mem = new_r_Proj(first_bl, store, mode_M, pn_Store_M); + mem = new_r_Proj(store, mode_M, pn_Store_M); /* the backing store itself */ store = new_r_Store(first_bl, mem, addr, - new_r_Proj(args_bl, args, mode, i), 0); + new_r_Proj(args, mode, i), 0); } /* the new memory Proj gets the last Proj from store */ set_Proj_pred(nmem, store); @@ -1699,9 +1643,10 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *val for (entry = new_list; entry != NULL; entry = entry->next) { ir_entity *ent = entry->ent; - /* If the entity is still on the argument type, move it to the frame type. - This happens if the value_param type was build due to compound - params. */ + /* If the entity is still on the argument type, move it to the + * frame type. + * This happens if the value_param type was build due to compound + * params. */ if (get_entity_owner(ent) != frame_tp) { ir_type *tp = get_entity_type(ent); unsigned align = get_type_alignment_bytes(tp); @@ -1709,9 +1654,7 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *val offset += align - 1; offset &= ~(align - 1); set_entity_owner(ent, frame_tp); - add_class_member(frame_tp, ent); /* must be automatic to set a fixed layout */ - set_entity_allocation(ent, allocation_automatic); set_entity_offset(ent, offset); offset += get_type_size_bytes(tp); } @@ -1742,8 +1685,8 @@ static void fix_start_block(ir_graph *irg) continue; if (block != start_block) { ir_node *jmp = new_r_Jmp(start_block); - set_Block_cfgpred(block, get_edge_src_pos(edge), jmp); + set_irg_initial_exec(irg, jmp); return; } } @@ -1753,7 +1696,8 @@ static void fix_start_block(ir_graph *irg) /** * Update the entity of Sels to the outer value parameters. */ -static void update_outer_frame_sels(ir_node *irn, void *env) { +static void update_outer_frame_sels(ir_node *irn, void *env) +{ lower_frame_sels_env_t *ctx = env; ir_node *ptr; ir_entity *ent; @@ -1803,7 +1747,9 @@ static void fix_outer_variable_access(be_abi_irg_t *env, if (! is_method_entity(ent)) continue; - if (get_entity_peculiarity(ent) == peculiarity_description) + + irg = get_entity_irg(ent); + if (irg == NULL) continue; /* @@ -1812,7 +1758,6 @@ static void fix_outer_variable_access(be_abi_irg_t *env, */ ctx->static_link_pos = 0; - irg = get_entity_irg(ent); irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx); } } @@ -1820,18 +1765,19 @@ static void fix_outer_variable_access(be_abi_irg_t *env, /** * Modify the irg itself and the frame type. */ -static void modify_irg(be_abi_irg_t *env) +static void modify_irg(ir_graph *irg) { - be_abi_call_t *call = env->call; - const arch_env_t *arch_env= env->birg->main_env->arch_env; - const arch_register_t *sp = arch_env->sp; - ir_graph *irg = env->birg->irg; - ir_node *start_bl; + be_abi_irg_t *env = be_get_irg_abi(irg); + be_abi_call_t *call = env->call; + const arch_env_t *arch_env = be_get_irg_arch_env(irg); + const arch_register_t *sp = arch_env->sp; + ir_type *method_type = get_entity_type(get_irg_entity(irg)); + struct obstack *obst = be_get_be_obst(irg); + be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg); ir_node *end; ir_node *old_mem; ir_node *new_mem_proj; ir_node *mem; - ir_type *method_type = get_entity_type(get_irg_entity(irg)); int n_params; int i, n; @@ -1841,7 +1787,7 @@ static void modify_irg(be_abi_irg_t *env) reg_node_map_t *rm; const arch_register_t *fp_reg; ir_node *frame_pointer; - ir_node *reg_params_bl; + ir_node *start_bl; ir_node **args; ir_node *arg_tuple; const ir_edge_t *edge; @@ -1849,8 +1795,6 @@ static void modify_irg(be_abi_irg_t *env) lower_frame_sels_env_t ctx; ir_entity **param_map; - DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;) - DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg)); /* Must fetch memory here, otherwise the start Barrier gets the wrong @@ -1872,15 +1816,20 @@ static void modify_irg(be_abi_irg_t *env) } } - arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map); + arg_type = compute_arg_type(env, irg, call, method_type, tp, ¶m_map); /* Convert the Sel nodes in the irg to frame addr nodes: */ ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0); ctx.frame = get_irg_frame(irg); - ctx.sp_class = env->arch_env->sp->reg_class; - ctx.link_class = env->arch_env->link_class; + ctx.sp_class = arch_env->sp->reg_class; + ctx.link_class = arch_env->link_class; ctx.frame_tp = get_irg_frame_type(irg); + /* layout the stackframe now */ + if (get_type_state(ctx.frame_tp) == layout_undefined) { + default_layout_compound_type(ctx.frame_tp); + } + /* we will possible add new entities to the frame: set the layout to undefined */ assert(get_type_state(ctx.frame_tp) == layout_fixed); set_type_state(ctx.frame_tp, layout_undefined); @@ -1898,7 +1847,7 @@ static void modify_irg(be_abi_irg_t *env) env->regs = pmap_create(); n_params = get_method_n_params(method_type); - args = OALLOCNZ(&env->obst, ir_node*, n_params); + args = OALLOCNZ(obst, ir_node*, n_params); /* * for inner function we must now fix access to outer frame entities. @@ -1915,7 +1864,7 @@ static void modify_irg(be_abi_irg_t *env) * In the default case we move the entity to the frame type and create * a backing store into the first block. */ - fix_address_of_parameter_access(env, ctx.value_param_list); + fix_address_of_parameter_access(env, irg, ctx.value_param_list); DEL_ARR_F(ctx.value_param_list); irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK); @@ -1932,11 +1881,13 @@ static void modify_irg(be_abi_irg_t *env) } bet_type = call->cb->get_between_type(env->cb); - stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map); + stack_frame_init(stack_layout, arg_type, bet_type, + get_irg_frame_type(irg), arch_env->stack_dir, param_map); + stack_layout->sp_relative = call->flags.bits.try_omit_fp; /* Count the register params and add them to the number of Projs for the RegParams node */ for (i = 0; i < n_params; ++i) { - be_abi_call_arg_t *arg = get_call_arg(call, 0, i); + be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1); if (arg->in_reg && args[i]) { assert(arg->reg != sp && "cannot use stack pointer as parameter register"); assert(i == get_Proj_proj(args[i])); @@ -1959,11 +1910,13 @@ static void modify_irg(be_abi_irg_t *env) } } + /* handle start block here (place a jump in the block) */ + fix_start_block(irg); + pmap_insert(env->regs, (void *) sp, NULL); pmap_insert(env->regs, (void *) arch_env->bp, NULL); - reg_params_bl = get_irg_start_block(irg); - env->reg_params = be_new_RegParams(reg_params_bl, pmap_count(env->regs)); - add_irn_dep(env->reg_params, get_irg_start(irg)); + start_bl = get_irg_start_block(irg); + env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1); /* * make proj nodes for the callee save registers. @@ -1973,7 +1926,8 @@ static void modify_irg(be_abi_irg_t *env) * the old Proj from start for that argument. */ - rm = reg_map_to_arr(&env->obst, env->regs); + rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs)); + reg_map_to_arr(rm, env->regs); for (i = 0, n = pmap_count(env->regs); i < n; ++i) { arch_register_t *reg = (void *) rm[i].reg; ir_mode *mode = reg->reg_class->mode; @@ -1985,33 +1939,31 @@ static void modify_irg(be_abi_irg_t *env) add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore; assert(nr >= 0); - proj = new_r_Proj(reg_params_bl, env->reg_params, mode, nr); + proj = new_r_Proj(env->start, mode, nr + 1); pmap_insert(env->regs, (void *) reg, proj); - be_set_constr_single_reg_out(env->reg_params, nr, reg, add_type); + be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type); arch_set_irn_register(proj, reg); DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name)); } - obstack_free(&env->obst, rm); /* create a new initial memory proj */ assert(is_Proj(old_mem)); - new_mem_proj = new_r_Proj(get_nodes_block(old_mem), - new_r_Unknown(irg, mode_T), mode_M, - get_Proj_proj(old_mem)); + arch_set_out_register_req(env->start, 0, arch_no_register_req); + new_mem_proj = new_r_Proj(env->start, mode_M, 0); mem = new_mem_proj; + set_irg_initial_mem(irg, mem); /* Generate the Prologue */ - fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias); + fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &stack_layout->initial_bias); /* do the stack allocation BEFORE the barrier, or spill code might be added before it */ env->init_sp = be_abi_reg_map_get(env->regs, sp); - start_bl = get_irg_start_block(irg); env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0); be_abi_reg_map_set(env->regs, sp, env->init_sp); - create_barrier(env, start_bl, &mem, env->regs, 0); + create_barrier(start_bl, &mem, env->regs, 0); env->init_sp = be_abi_reg_map_get(env->regs, sp); arch_set_irn_register(env->init_sp, sp); @@ -2021,9 +1973,11 @@ static void modify_irg(be_abi_irg_t *env) pset_insert_ptr(env->ignore_regs, fp_reg); /* rewire old mem users to new mem */ - set_Proj_pred(new_mem_proj, get_Proj_pred(old_mem)); exchange(old_mem, mem); + /* keep the mem (for functions with an endless loop = no return) */ + keep_alive(mem); + set_irg_initial_mem(irg, mem); /* Now, introduce stack param nodes for all parameters passed on the stack */ @@ -2038,24 +1992,24 @@ static void modify_irg(be_abi_irg_t *env) ir_mode *mode; nr = MIN(nr, n_params); - arg = get_call_arg(call, 0, nr); + arg = get_call_arg(call, 0, nr, 1); param_type = get_method_param_type(method_type, nr); if (arg->in_reg) { repl = pmap_get(env->regs, (void *) arg->reg); } else if (arg->on_stack) { - ir_node *addr = be_new_FrameAddr(sp->reg_class, reg_params_bl, frame_pointer, arg->stack_ent); + ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent); /* For atomic parameters which are actually used, we create a Load node. */ if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) { ir_mode *mode = get_type_mode(param_type); ir_mode *load_mode = arg->load_mode; - ir_node *load = new_r_Load(reg_params_bl, new_NoMem(), addr, load_mode, cons_floats); - repl = new_r_Proj(reg_params_bl, load, load_mode, pn_Load_res); + ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats); + repl = new_r_Proj(load, load_mode, pn_Load_res); if (mode != load_mode) { - repl = new_r_Conv(reg_params_bl, repl, mode); + repl = new_r_Conv(start_bl, repl, mode); } } else { /* The stack parameter is not primitive (it is a struct or array), @@ -2094,20 +2048,16 @@ static void modify_irg(be_abi_irg_t *env) exchange(irn, ret); } } + /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then, the code is dead and will never be executed. */ - - obstack_free(&env->obst, args); - - /* handle start block here (place a jump in the block) */ - fix_start_block(irg); } /** Fix the state inputs of calls that still hang on unknowns */ -static -void fix_call_state_inputs(be_abi_irg_t *env) +static void fix_call_state_inputs(ir_graph *irg) { - const arch_env_t *arch_env = env->arch_env; + be_abi_irg_t *env = be_get_irg_abi(irg); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); int i, n, n_states; arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0); @@ -2152,12 +2102,11 @@ static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method) { ir_type *type = get_entity_type(method); ident *old_id = get_entity_ld_ident(method); - ident *id = id_mangle3("L", old_id, "$stub"); + ident *id = id_mangle3("", old_id, "$stub"); ir_type *parent = be->pic_trampolines_type; ir_entity *ent = new_entity(parent, old_id, type); set_entity_ld_ident(ent, id); - set_entity_visibility(ent, visibility_local); - set_entity_variability(ent, variability_uninitialized); + set_entity_visibility(ent, ir_visibility_private); return ent; } @@ -2179,14 +2128,13 @@ static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method) static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity) { ident *old_id = get_entity_ld_ident(entity); - ident *id = id_mangle3("L", old_id, "$non_lazy_ptr"); + ident *id = id_mangle3("", old_id, "$non_lazy_ptr"); ir_type *e_type = get_entity_type(entity); - ir_type *type = new_type_pointer(id, e_type, mode_P_data); + ir_type *type = new_type_pointer(e_type); ir_type *parent = be->pic_symbols_type; ir_entity *ent = new_entity(parent, old_id, type); set_entity_ld_ident(ent, id); - set_entity_visibility(ent, visibility_local); - set_entity_variability(ent, variability_uninitialized); + set_entity_visibility(ent, ir_visibility_private); return ent; } @@ -2209,23 +2157,23 @@ static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity) */ static int can_address_relative(ir_entity *entity) { - return get_entity_visibility(entity) != visibility_external_allocated; + return get_entity_visibility(entity) != ir_visibility_external + && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE); } /** patches SymConsts to work in position independent code */ static void fix_pic_symconsts(ir_node *node, void *data) { - ir_graph *irg; ir_node *pic_base; ir_node *add; ir_node *block; - ir_node *unknown; ir_mode *mode; ir_node *load; ir_node *load_res; - be_abi_irg_t *env = data; + ir_graph *irg = get_irn_irg(node); int arity, i; - be_main_env_t *be = env->birg->main_env; + be_main_env_t *be = be_get_irg_main_env(irg); + (void) data; arity = get_irn_arity(node); for (i = 0; i < arity; ++i) { @@ -2240,7 +2188,6 @@ static void fix_pic_symconsts(ir_node *node, void *data) entity = get_SymConst_entity(pred); block = get_nodes_block(pred); - irg = get_irn_irg(pred); /* calls can jump to relative addresses, so we can directly jump to the (relatively) known call address or the trampoline */ @@ -2261,8 +2208,7 @@ static void fix_pic_symconsts(ir_node *node, void *data) /* everything else is accessed relative to EIP */ mode = get_irn_mode(pred); - unknown = new_r_Unknown(irg, mode); - pic_base = arch_code_generator_get_pic_base(env->birg->cg); + pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(irg)); /* all ok now for locally constructed stuff */ if (can_address_relative(entity)) { @@ -2278,7 +2224,7 @@ static void fix_pic_symconsts(ir_node *node, void *data) dbgi = get_irn_dbg_info(pred); pic_symbol = get_pic_symbol(be, entity); pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code, - pic_symbol, NULL); + pic_symbol, NULL); add = new_r_Add(block, pic_base, pic_symconst, mode); mark_irn_visited(add); @@ -2286,82 +2232,82 @@ static void fix_pic_symconsts(ir_node *node, void *data) module. The loads are always safe and can therefore float and need no memory input */ load = new_r_Load(block, new_NoMem(), add, mode, cons_floats); - load_res = new_r_Proj(block, load, mode, pn_Load_res); + load_res = new_r_Proj(load, mode, pn_Load_res); set_irn_n(node, i, load_res); } } -be_abi_irg_t *be_abi_introduce(be_irg_t *birg) +be_abi_irg_t *be_abi_introduce(ir_graph *irg) { - be_abi_irg_t *env = XMALLOC(be_abi_irg_t); - ir_node *old_frame = get_irg_frame(birg->irg); - ir_graph *irg = birg->irg; + be_abi_irg_t *env = XMALLOCZ(be_abi_irg_t); + ir_node *old_frame = get_irg_frame(irg); + struct obstack *obst = be_get_be_obst(irg); + be_options_t *options = be_get_irg_options(irg); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); + ir_entity *entity = get_irg_entity(irg); + ir_type *method_type = get_entity_type(entity); pmap_entry *ent; ir_node *dummy; - optimization_state_t state; unsigned *limited_bitset; arch_register_req_t *sp_req; - be_omit_fp = birg->main_env->options->omit_fp; - be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp; - - obstack_init(&env->obst); + be_omit_fp = options->omit_fp; + be_omit_leaf_fp = options->omit_leaf_fp; - env->arch_env = birg->main_env->arch_env; - env->method_type = get_entity_type(get_irg_entity(irg)); - env->call = be_abi_call_new(env->arch_env->sp->reg_class); - arch_env_get_call_abi(env->arch_env, env->method_type, env->call); + obstack_init(obst); env->ignore_regs = pset_new_ptr_default(); env->keep_map = pmap_create(); env->dce_survivor = new_survive_dce(); - env->birg = birg; - sp_req = OALLOCZ(&env->obst, arch_register_req_t); + sp_req = OALLOCZ(obst, arch_register_req_t); env->sp_req = sp_req; sp_req->type = arch_register_req_type_limited | arch_register_req_type_produces_sp; - sp_req->cls = arch_register_get_class(env->arch_env->sp); + sp_req->cls = arch_register_get_class(arch_env->sp); - limited_bitset = rbitset_obstack_alloc(&env->obst, sp_req->cls->n_regs); - rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp)); + limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs); + rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp)); sp_req->limited = limited_bitset; - if (env->arch_env->sp->type & arch_register_type_ignore) { + if (arch_env->sp->type & arch_register_type_ignore) { sp_req->type |= arch_register_req_type_ignore; } - /* Beware: later we replace this node by the real one, ensure it is not CSE'd - to another Unknown or the stack pointer gets used */ - save_optimization_state(&state); - set_optimize(0); - env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode); - restore_optimization_state(&state); + /* break here if backend provides a custom API. + * Note: we shouldn't have to setup any be_abi_irg_t* stuff at all, + * but need more cleanup to make this work + */ + be_set_irg_abi(irg, env); + if (arch_env->custom_abi) + return env; - FIRM_DBG_REGISTER(env->dbg, "firm.be.abi"); + env->call = be_abi_call_new(arch_env->sp->reg_class); + arch_env_get_call_abi(arch_env, method_type, env->call); - env->calls = NEW_ARR_F(ir_node*, 0); + env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode); + env->calls = NEW_ARR_F(ir_node*, 0); - if (birg->main_env->options->pic) { + if (options->pic) { irg_walk_graph(irg, fix_pic_symconsts, NULL, env); } /* Lower all call nodes in the IRG. */ - process_calls(env); + process_calls(irg); /* Beware: init backend abi call object after processing calls, otherwise some information might be not yet available. */ - env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg); + env->cb = env->call->cb->init(env->call, irg); /* Process the IRG */ - modify_irg(env); + modify_irg(irg); /* fix call inputs for state registers */ - fix_call_state_inputs(env); + fix_call_state_inputs(irg); /* We don't need the keep map anymore. */ pmap_destroy(env->keep_map); @@ -2386,14 +2332,20 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg) return env; } -void be_abi_free(be_abi_irg_t *env) +void be_abi_free(ir_graph *irg) { - be_abi_call_free(env->call); + be_abi_irg_t *env = be_get_irg_abi(irg); + + if (env->call != NULL) + be_abi_call_free(env->call); free_survive_dce(env->dce_survivor); - del_pset(env->ignore_regs); - pmap_destroy(env->regs); - obstack_free(&env->obst, NULL); + if (env->ignore_regs != NULL) + del_pset(env->ignore_regs); + if (env->regs != NULL) + pmap_destroy(env->regs); free(env); + + be_set_irg_abi(irg, NULL); } void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs) @@ -2426,14 +2378,7 @@ void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t * } } -/* Returns the stack layout from a abi environment. */ -const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) -{ - return &abi->frame; -} - /* - _____ _ ____ _ _ | ___(_)_ __ / ___|| |_ __ _ ___| | __ | |_ | \ \/ / \___ \| __/ _` |/ __| |/ / @@ -2453,9 +2398,16 @@ typedef struct fix_stack_walker_env_t { */ static void collect_stack_nodes_walker(ir_node *node, void *data) { + ir_node *insn = node; fix_stack_walker_env_t *env = data; const arch_register_req_t *req; + if (is_Proj(node)) { + insn = get_Proj_pred(node); + } + + if (arch_irn_get_n_outs(insn) == 0) + return; if (get_irn_mode(node) == mode_T) return; @@ -2466,18 +2418,19 @@ static void collect_stack_nodes_walker(ir_node *node, void *data) ARR_APP1(ir_node*, env->sp_nodes, node); } -void be_abi_fix_stack_nodes(be_abi_irg_t *env) +void be_abi_fix_stack_nodes(ir_graph *irg) { + be_abi_irg_t *abi = be_get_irg_abi(irg); + be_lv_t *lv = be_get_irg_liveness(irg); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); be_ssa_construction_env_t senv; int i, len; ir_node **phis; - be_irg_t *birg = env->birg; - be_lv_t *lv = be_get_birg_liveness(birg); fix_stack_walker_env_t walker_env; walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0); - irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env); + irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env); /* nothing to be done if we didn't find any node, in fact we mustn't * continue, as for endless loops incsp might have had no users and is bad @@ -2489,7 +2442,7 @@ void be_abi_fix_stack_nodes(be_abi_irg_t *env) return; } - be_ssa_construction_init(&senv, birg); + be_ssa_construction_init(&senv, irg); be_ssa_construction_add_copies(&senv, walker_env.sp_nodes, ARR_LEN(walker_env.sp_nodes)); be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes, @@ -2509,8 +2462,8 @@ void be_abi_fix_stack_nodes(be_abi_irg_t *env) len = ARR_LEN(phis); for (i = 0; i < len; ++i) { ir_node *phi = phis[i]; - be_set_phi_reg_req(phi, env->sp_req); - arch_set_irn_register(phi, env->arch_env->sp); + be_set_phi_reg_req(phi, abi->sp_req); + arch_set_irn_register(phi, arch_env->sp); } be_ssa_construction_destroy(&senv); @@ -2520,17 +2473,19 @@ void be_abi_fix_stack_nodes(be_abi_irg_t *env) /** * Fix all stack accessing operations in the block bl. * - * @param env the abi environment * @param bl the block to process * @param real_bias the bias value * * @return the bias at the end of this block */ -static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias) +static int process_stack_bias(ir_node *bl, int real_bias) { - int omit_fp = env->call->flags.bits.try_omit_fp; - ir_node *irn; - int wanted_bias = real_bias; + int wanted_bias = real_bias; + ir_graph *irg = get_Block_irg(bl); + be_stack_layout_t *layout = be_get_irg_stack_layout(irg); + bool sp_relative = layout->sp_relative; + const arch_env_t *arch_env = be_get_irg_arch_env(irg); + ir_node *irn; sched_foreach(bl, irn) { int ofs; @@ -2542,10 +2497,10 @@ static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias) */ ir_entity *ent = arch_get_frame_entity(irn); if (ent != NULL) { - int bias = omit_fp ? real_bias : 0; - int offset = get_stack_entity_offset(&env->frame, ent, bias); + int bias = sp_relative ? real_bias : 0; + int offset = get_stack_entity_offset(layout, ent, bias); arch_set_frame_offset(irn, offset); - DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", + DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias)); } @@ -2558,19 +2513,19 @@ static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias) if (be_is_IncSP(irn)) { /* fill in real stack frame size */ if (ofs == BE_STACK_FRAME_SIZE_EXPAND) { - ir_type *frame_type = get_irg_frame_type(env->birg->irg); + ir_type *frame_type = get_irg_frame_type(irg); ofs = (int) get_type_size_bytes(frame_type); be_set_IncSP_offset(irn, ofs); } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) { - ir_type *frame_type = get_irg_frame_type(env->birg->irg); + ir_type *frame_type = get_irg_frame_type(irg); ofs = - (int)get_type_size_bytes(frame_type); be_set_IncSP_offset(irn, ofs); } else { if (be_get_IncSP_align(irn)) { /* patch IncSP to produce an aligned stack pointer */ - ir_type *between_type = env->frame.between_type; + ir_type *between_type = layout->between_type; int between_size = get_type_size_bytes(between_type); - int alignment = 1 << env->arch_env->stack_alignment; + int alignment = 1 << arch_env->stack_alignment; int delta = (real_bias + ofs + between_size) & (alignment - 1); assert(ofs >= 0); if (delta > 0) { @@ -2601,7 +2556,6 @@ static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias) * A helper struct for the bias walker. */ struct bias_walk { - be_abi_irg_t *env; /**< The ABI irg environment. */ int start_block_bias; /**< The bias at the end of the start block. */ int between_size; ir_node *start_block; /**< The start block of the current graph. */ @@ -2615,7 +2569,7 @@ static void stack_bias_walker(ir_node *bl, void *data) { struct bias_walk *bw = data; if (bl != bw->start_block) { - process_stack_bias(bw->env, bl, bw->start_block_bias); + process_stack_bias(bl, bw->start_block_bias); } } @@ -2623,22 +2577,27 @@ static void stack_bias_walker(ir_node *bl, void *data) * Walker: finally lower all Sels of outer frame or parameter * entities. */ -static void lower_outer_frame_sels(ir_node *sel, void *ctx) { - be_abi_irg_t *env = ctx; - ir_node *ptr; - ir_entity *ent; - ir_type *owner; +static void lower_outer_frame_sels(ir_node *sel, void *ctx) +{ + ir_node *ptr; + ir_entity *ent; + ir_type *owner; + be_stack_layout_t *layout; + ir_graph *irg; + (void) ctx; if (! is_Sel(sel)) return; - ent = get_Sel_entity(sel); - owner = get_entity_owner(ent); - ptr = get_Sel_ptr(sel); + ent = get_Sel_entity(sel); + owner = get_entity_owner(ent); + ptr = get_Sel_ptr(sel); + irg = get_irn_irg(sel); + layout = be_get_irg_stack_layout(irg); - if (owner == env->frame.frame_type || owner == env->frame.arg_type) { + if (owner == layout->frame_type || owner == layout->arg_type) { /* found access to outer frame or arguments */ - int offset = get_stack_entity_offset(&env->frame, ent, 0); + int offset = get_stack_entity_offset(layout, ent, 0); if (offset != 0) { ir_node *bl = get_nodes_block(sel); @@ -2653,22 +2612,22 @@ static void lower_outer_frame_sels(ir_node *sel, void *ctx) { } } -void be_abi_fix_stack_bias(be_abi_irg_t *env) +void be_abi_fix_stack_bias(ir_graph *irg) { - ir_graph *irg = env->birg->irg; + be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg); ir_type *frame_tp; int i; struct bias_walk bw; - stack_frame_compute_initial_offset(&env->frame); - // stack_layout_dump(stdout, frame); + stack_frame_compute_initial_offset(stack_layout); + // stack_layout_dump(stdout, stack_layout); /* Determine the stack bias at the end of the start block. */ - bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias); - bw.between_size = get_type_size_bytes(env->frame.between_type); + bw.start_block_bias = process_stack_bias(get_irg_start_block(irg), + stack_layout->initial_bias); + bw.between_size = get_type_size_bytes(stack_layout->between_type); /* fix the bias is all other blocks */ - bw.env = env; bw.start_block = get_irg_start_block(irg); irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw); @@ -2677,11 +2636,10 @@ void be_abi_fix_stack_bias(be_abi_irg_t *env) frame_tp = get_irg_frame_type(irg); for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) { ir_entity *ent = get_class_member(frame_tp, i); + ir_graph *irg = get_entity_irg(ent); - if (is_method_entity(ent) && get_entity_peculiarity(ent) != peculiarity_description) { - ir_graph *irg = get_entity_irg(ent); - - irg_walk_graph(irg, NULL, lower_outer_frame_sels, env); + if (irg != NULL) { + irg_walk_graph(irg, NULL, lower_outer_frame_sels, NULL); } } } @@ -2700,11 +2658,8 @@ ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg) return pmap_get(abi->regs, (void *) reg); } -/** - * Returns non-zero if the ABI has omitted the frame pointer in - * the current graph. - */ -int be_abi_omit_fp(const be_abi_irg_t *abi) +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi); +void be_init_abi(void) { - return abi->call->flags.bits.try_omit_fp; + FIRM_DBG_REGISTER(dbg, "firm.be.abi"); }