X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeabi.c;h=0aeb25548ada8696ed567831da4aa024bb601fa6;hb=80a6158fdd766f42ee6c508a773bc114ff1b61f3;hp=ed6aeb13a54e1713df59ae5a0a4372adebb32b57;hpb=f78f725b468fab99d9cb4cb6cf146a706709c2ad;p=libfirm diff --git a/ir/be/beabi.c b/ir/be/beabi.c index ed6aeb13a..0aeb25548 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -2,7 +2,8 @@ * ABI lowering. * * @author Sebastian Hack - * @date 7.3.2005 + * @date 7.3.2005 + * @cvsid $Id$ */ #ifdef HAVE_CONFIG_H @@ -23,6 +24,10 @@ #include "irgwalk.h" #include "irprintf_t.h" #include "irgopt.h" +#include "irbitset.h" +#include "height.h" +#include "pdeq.h" +#include "irtools.h" #include "be.h" #include "beabi.h" @@ -30,61 +35,44 @@ #include "benode_t.h" #include "belive_t.h" #include "besched_t.h" - -#define MAX(x, y) ((x) > (y) ? (x) : (y)) -#define MIN(x, y) ((x) < (y) ? (x) : (y)) +#include "beirg.h" typedef struct _be_abi_call_arg_t { - unsigned is_res : 1; - unsigned in_reg : 1; - unsigned on_stack : 1; + unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */ + unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */ + unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */ int pos; const arch_register_t *reg; - entity *stack_ent; + ir_entity *stack_ent; unsigned alignment; unsigned space_before; unsigned space_after; } be_abi_call_arg_t; struct _be_abi_call_t { - be_abi_call_flags_t flags; - const be_abi_callbacks_t *cb; - type *between_type; - set *params; -}; - -#define N_FRAME_TYPES 3 - -typedef struct _be_stack_frame_t { - type *arg_type; - type *between_type; - type *frame_type; - - type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */ - - int initial_offset; - int stack_dir; -} be_stack_frame_t; - -struct _be_stack_slot_t { - struct _be_stack_frame_t *frame; - entity *ent; + be_abi_call_flags_t flags; + const be_abi_callbacks_t *cb; + ir_type *between_type; + set *params; + const arch_register_class_t *cls_addr; }; struct _be_abi_irg_t { struct obstack obst; - be_stack_frame_t *frame; /**< The stack frame model. */ - const be_irg_t *birg; /**< The back end IRG. */ + be_stack_layout_t *frame; /**< The stack frame model. */ + be_irg_t *birg; /**< The back end IRG. */ const arch_isa_t *isa; /**< The isa. */ survive_dce_t *dce_survivor; be_abi_call_t *call; /**< The ABI call information. */ - type *method_type; /**< The type of the method of the IRG. */ + ir_type *method_type; /**< The type of the method of the IRG. */ ir_node *init_sp; /**< The node representing the stack pointer at the start of the function. */ + ir_node *start_barrier; /**< The barrier of the start block */ + ir_node *reg_params; /**< The reg params node. */ pmap *regs; /**< A map of all callee-save and ignore regs to their Projs to the RegParams node. */ @@ -110,9 +98,10 @@ struct _be_abi_irg_t { /* Forward, since be need it in be_abi_introduce(). */ static const arch_irn_ops_if_t abi_irn_ops; static const arch_irn_handler_t abi_irn_handler; +static heights_t *ir_heights; /* Flag: if set, try to omit the frame pointer if called by the backend */ -int be_omit_fp = 1; +static int be_omit_fp = 1; /* _ ____ ___ ____ _ _ _ _ @@ -173,10 +162,18 @@ static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, i /* Set the flags for a call. */ void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb) { - call->flags = flags; - call->cb = cb; + call->flags = flags; + call->cb = cb; } + +/* Set register class for call address */ +void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls) +{ + call->cls_addr = cls; +} + + void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after) { be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1); @@ -212,14 +209,17 @@ be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call) * * @return the new ABI call object */ -static be_abi_call_t *be_abi_call_new() +static be_abi_call_t *be_abi_call_new(void) { be_abi_call_t *call = xmalloc(sizeof(call[0])); + call->flags.val = 0; call->params = new_set(cmp_call_arg, 16); call->cb = NULL; + call->cls_addr = NULL; call->flags.bits.try_omit_fp = be_omit_fp; + return call; } @@ -249,10 +249,10 @@ static void be_abi_call_free(be_abi_call_t *call) and the spills. */ -static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias) +static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent, int bias) { - type *t = get_entity_owner(ent); - int ofs = get_entity_offset_bytes(ent); + ir_type *t = get_entity_owner(ent); + int ofs = get_entity_offset(ent); int i, index; @@ -278,29 +278,44 @@ static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bia /** * Retrieve the entity with given offset from a frame type. */ -static entity *search_ent_with_offset(type *t, int offset) +static ir_entity *search_ent_with_offset(ir_type *t, int offset) { int i, n; - for(i = 0, n = get_class_n_members(t); i < n; ++i) { - entity *ent = get_class_member(t, i); - if(get_entity_offset_bytes(ent) == offset) + for(i = 0, n = get_compound_n_members(t); i < n; ++i) { + ir_entity *ent = get_compound_member(t, i); + if(get_entity_offset(ent) == offset) return ent; } return NULL; } -static int stack_frame_compute_initial_offset(be_stack_frame_t *frame) +static int stack_frame_compute_initial_offset(be_stack_layout_t *frame) { - type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type; - entity *ent = search_ent_with_offset(base, 0); - frame->initial_offset = 0; - frame->initial_offset = get_stack_entity_offset(frame, ent, 0); + ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type; + ir_entity *ent = search_ent_with_offset(base, 0); + + frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0; + return frame->initial_offset; } -static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir) +/** + * Initializes the frame layout from parts + * + * @param frame the stack layout that will be initialized + * @param args the stack argument layout type + * @param between the between layout type + * @param locals the method frame type + * @param stack_dir the stack direction + * @param param_map an array mapping method argument positions to the stack argument type + * + * @return the initialized stack layout + */ +static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args, + ir_type *between, ir_type *locals, int stack_dir, + ir_entity *param_map[]) { frame->arg_type = args; frame->between_type = between; @@ -308,68 +323,37 @@ static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, t frame->initial_offset = 0; frame->stack_dir = stack_dir; frame->order[1] = between; + frame->param_map = param_map; if(stack_dir > 0) { frame->order[0] = args; frame->order[2] = locals; } - else { frame->order[0] = locals; frame->order[2] = args; } - return frame; } -static void stack_frame_dump(FILE *file, be_stack_frame_t *frame) +#if 0 +/** Dumps the stack layout to file. */ +static void stack_layout_dump(FILE *file, be_stack_layout_t *frame) { int i, j, n; ir_fprintf(file, "initial offset: %d\n", frame->initial_offset); - for(j = 0; j < N_FRAME_TYPES; ++j) { - type *t = frame->order[j]; + for (j = 0; j < N_FRAME_TYPES; ++j) { + ir_type *t = frame->order[j]; - ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t)); - for(i = 0, n = get_class_n_members(t); i < n; ++i) { - entity *ent = get_class_member(t, i); + ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t)); + for (i = 0, n = get_compound_n_members(t); i < n; ++i) { + ir_entity *ent = get_compound_member(t, i); ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0)); } } } - -/** - * If irn is a Sel node computes the address of an entity - * on the frame type return the entity, else NULL. - */ -static INLINE entity *get_sel_ent(ir_node *irn) -{ - if(is_Sel(irn) && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) { - return get_Sel_entity(irn); - } - - return NULL; -} - -/** - * Walker: Replaces Loads, Stores and Sels of frame type entities - * by FrameLoad, FrameStore and FrameAdress. - */ -static void lower_frame_sels_walker(ir_node *irn, void *data) -{ - ir_node *nw = NULL; - entity *ent = get_sel_ent(irn); - - if(ent != NULL) { - be_abi_irg_t *env = data; - ir_node *bl = get_nodes_block(irn); - ir_graph *irg = get_irn_irg(bl); - ir_node *frame = get_irg_frame(irg); - - nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent); - exchange(irn, nw); - } -} +#endif /** * Returns non-zero if the call argument at given position @@ -399,7 +383,7 @@ static INLINE int is_on_stack(be_abi_call_t *call, int pos) * @param curr_sp The stack pointer node to use. * @return The stack pointer after the call. */ -static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) +static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy) { ir_graph *irg = env->birg->irg; const arch_isa_t *isa = env->birg->main_env->arch_env->isa; @@ -416,7 +400,6 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) const arch_register_t *sp = arch_isa_sp(isa); ir_mode *mach_mode = sp->reg_class->mode; struct obstack *obst = &env->obst; - ir_node *no_mem = get_irg_no_mem(irg); int no_alloc = call->flags.bits.frame_is_setup_on_call; ir_node *res_proj = NULL; @@ -440,11 +423,12 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) for(i = 0; i < n_params; ++i) { be_abi_call_arg_t *arg = get_call_arg(call, 0, i); assert(arg); - if(arg->on_stack) { - stack_size += arg->space_before; - stack_size = round_up2(stack_size, arg->alignment); - stack_size += get_type_size_bytes(get_method_param_type(mt, i)); - stack_size += arg->space_after; + if (arg->on_stack) { + int arg_size = get_type_size_bytes(get_method_param_type(mt, i)); + + stack_size += round_up2(arg->space_before, arg->alignment); + stack_size += round_up2(arg_size, arg->alignment); + stack_size += round_up2(arg->space_after, arg->alignment); obstack_int_grow(obst, i); n_pos++; } @@ -487,7 +471,18 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * moving the stack pointer along the stack's direction. */ if(stack_dir < 0 && !do_seq && !no_alloc) { - curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand); + curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + } + + if(!do_seq) { + obstack_ptr_grow(obst, get_Call_mem(irn)); + curr_mem = new_NoMem(); + } else { + curr_mem = get_Call_mem(irn); } assert(mode_is_reference(mach_mode) && "machine mode must be pointer"); @@ -497,52 +492,68 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) ir_node *param = get_Call_param(irn, p); ir_node *addr = curr_sp; ir_node *mem = NULL; - type *param_type = get_method_param_type(mt, p); + ir_type *param_type = get_method_param_type(mt, p); int param_size = get_type_size_bytes(param_type) + arg->space_after; - curr_ofs += arg->space_before; - curr_ofs = round_up2(curr_ofs, arg->alignment); + /* + * If we wanted to build the arguments sequentially, + * the stack pointer for the next must be incremented, + * and the memory value propagated. + */ + if (do_seq) { + curr_ofs = 0; + addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + add_irn_dep(curr_sp, curr_mem); + } + else { + curr_ofs += arg->space_before; + curr_ofs = round_up2(curr_ofs, arg->alignment); - /* Make the expression to compute the argument's offset. */ - if(curr_ofs > 0) { - addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs); - addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode); + /* Make the expression to compute the argument's offset. */ + if(curr_ofs > 0) { + addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs); + addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode); + } } /* Insert a store for primitive arguments. */ - if(is_atomic_type(param_type)) { - mem = new_r_Store(irg, bl, curr_mem, addr, param); - mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M); + if (is_atomic_type(param_type)) { + ir_node *store; + store = new_r_Store(irg, bl, curr_mem, addr, param); + mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M); } /* Make a mem copy for compound arguments. */ else { + ir_node *copy; + assert(mode_is_reference(get_irn_mode(param))); - mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type); - mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular); + copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type); + mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular); } - obstack_ptr_grow(obst, mem); - curr_ofs += param_size; - /* - * If we wanted to build the arguments sequentially, - * the stack pointer for the next must be incremented, - * and the memory value propagated. - */ - if(do_seq) { - curr_ofs = 0; - curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem, param_size, be_stack_dir_expand); + if (do_seq) curr_mem = mem; - } + else + obstack_ptr_grow(obst, mem); } in = (ir_node **) obstack_finish(obst); /* We need the sync only, if we didn't build the stores sequentially. */ - if(!do_seq) - curr_mem = new_r_Sync(irg, bl, n_pos, in); + if(!do_seq) { + if(n_pos >= 1) { + curr_mem = new_r_Sync(irg, bl, n_pos + 1, in); + } else { + curr_mem = get_Call_mem(irn); + } + } obstack_free(obst, in); } @@ -625,11 +636,15 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) get_Call_type(irn)); /* - TODO: - Set the register class of the call address to the same as the stack pointer's. - That' probably buggy for some architectures. + Set the register class of the call address to the same as the stack pointer's + if it's not set by the backend in the abi callback. */ - be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class); + be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr ? call->cls_addr : sp->reg_class); + + /* Set input requirement for stack pointer. */ + be_node_set_reg_class(low_call, be_pos_Call_sp, arch_get_irn_reg_class(isa->main_env->arch_env, curr_sp, -1)); + + DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call)); /* Set the register classes and constraints of the Call parameters. */ for(i = 0; i < n_low_args; ++i) { @@ -696,12 +711,20 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) } } - if(!mem_proj) + if(!mem_proj) { mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M); + keep_alive(mem_proj); + } /* Clean up the stack frame if we allocated it */ - if(!no_alloc) - curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink); + if(!no_alloc) { + curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size); + add_irn_dep(curr_sp, mem_proj); + if(alloca_copy) { + add_irn_dep(curr_sp, alloca_copy); + alloca_copy = NULL; + } + } } be_abi_call_free(call); @@ -716,7 +739,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * Adjust an alloca. * The alloca is transformed into a back end alloca node and connected to the stack nodes. */ -static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp) +static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy) { if (get_Alloc_where(alloc) == stack_alloc) { ir_node *bl = get_nodes_block(alloc); @@ -726,6 +749,9 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp const ir_edge_t *edge; ir_node *new_alloc; + ir_node *addr; + ir_node *copy; + ir_node *ins[2]; foreach_out_edge(alloc, edge) { ir_node *irn = get_edge_src_irn(edge); @@ -746,7 +772,7 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp /* Beware: currently Alloc nodes without a result might happen, only escape analysis kills them and this phase runs only for object oriented source. We kill the Alloc here. */ - if (alloc_res == NULL) { + if (alloc_res == NULL && alloc_mem) { exchange(alloc_mem, get_Alloc_mem(alloc)); return curr_sp; } @@ -756,44 +782,97 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp env->call->flags.bits.try_omit_fp = 0; new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc)); - exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp); + if(alloc_mem != NULL) { + ir_node *addsp_mem; + ir_node *sync; + + addsp_mem = new_r_Proj(irg, bl, new_alloc, mode_M, pn_be_AddSP_M); - if(alloc_mem != NULL) - exchange(alloc_mem, new_r_NoMem(irg)); + // We need to sync the output mem of the AddSP with the input mem + // edge into the alloc node + ins[0] = get_Alloc_mem(alloc); + ins[1] = addsp_mem; + sync = new_r_Sync(irg, bl, 2, ins); - curr_sp = new_alloc; + exchange(alloc_mem, sync); + } + + exchange(alloc, new_alloc); + + /* fix projnum of alloca res */ + set_Proj_proj(alloc_res, pn_be_AddSP_res); + + addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp; + + /* copy the address away, since it could be used after further stack pointer modifications. */ + /* Let it point curr_sp just for the moment, I'll reroute it in a second. */ + *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp); + + /* Let all users of the Alloc() result now point to the copy. */ + edges_reroute(alloc_res, copy, irg); + + /* Rewire the copy appropriately. */ + set_irn_n(copy, be_pos_Copy_op, addr); + + curr_sp = alloc_res; } + return curr_sp; +} /* adjust_alloc */ + +/** + * Adjust a Free. + * The Free is transformed into a back end free node and connected to the stack nodes. + */ +static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp) +{ + if (get_Free_where(free) == stack_alloc) { + ir_node *bl = get_nodes_block(free); + ir_graph *irg = get_irn_irg(bl); + ir_node *addsp, *mem, *res; + + /* The stack pointer will be modified in an unknown manner. + We cannot omit it. */ + env->call->flags.bits.try_omit_fp = 0; + addsp = be_new_SubSP(env->isa->sp, irg, bl, curr_sp, get_Free_size(free)); + + mem = new_r_Proj(irg, bl, addsp, mode_M, pn_be_SubSP_M); + res = new_r_Proj(irg, bl, addsp, mode_P_data, pn_be_SubSP_res); + exchange(free, mem); + curr_sp = res; + } return curr_sp; -} +} /* adjust_free */ +/* the following function is replaced by the usage of the heights module */ +#if 0 /** * Walker for dependent_on(). * This function searches a node tgt recursively from a given node * but is restricted to the given block. * @return 1 if tgt was reachable from curr, 0 if not. */ -static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr) +static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl) { int n, i; - if(get_irn_visited(curr) >= visited_nr) - return 0; - - set_irn_visited(curr, visited_nr); - if(get_nodes_block(curr) != bl) + if (get_nodes_block(curr) != bl) return 0; - if(curr == tgt) + if (curr == tgt) return 1; - for(i = 0, n = get_irn_arity(curr); i < n; ++i) { - if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr)) - return 1; + /* Phi functions stop the recursion inside a basic block */ + if (! is_Phi(curr)) { + for(i = 0, n = get_irn_arity(curr); i < n; ++i) { + if (check_dependence(get_irn_n(curr, i), tgt, bl)) + return 1; + } } return 0; } +#endif /* if 0 */ /** * Check if a node is somehow data dependent on another one. @@ -805,12 +884,11 @@ static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned l static int dependent_on(ir_node *n1, ir_node *n2) { ir_node *bl = get_nodes_block(n1); - ir_graph *irg = get_irn_irg(bl); - long vis_nr = get_irg_visited(irg) + 1; assert(bl == get_nodes_block(n2)); - set_irg_visited(irg, vis_nr); - return check_dependence(n1, n2, bl, vis_nr); + + return heights_reachable_in_block(ir_heights, n1, n2); + //return check_dependence(n1, n2, bl); } static int cmp_call_dependecy(const void *c1, const void *c2) @@ -824,17 +902,31 @@ static int cmp_call_dependecy(const void *c1, const void *c2) 1 if second is "smaller" that first -1 if first is "smaller" that second */ - return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1); + if (dependent_on(n1, n2)) + return -1; + + if (dependent_on(n2, n1)) + return 1; + + return 0; } +/** + * Walker: links all Call/alloc/Free nodes to the Block they are contained. + */ static void link_calls_in_block_walker(ir_node *irn, void *data) { - if(is_Call(irn)) { + opcode code = get_irn_opcode(irn); + + if (code == iro_Call || + (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) || + (code == iro_Free && get_Free_where(irn) == stack_alloc)) { be_abi_irg_t *env = data; ir_node *bl = get_nodes_block(irn); void *save = get_irn_link(bl); - env->call->flags.bits.irg_is_leaf = 0; + if (code == iro_Call) + env->call->flags.bits.irg_is_leaf = 0; set_irn_link(irn, save); set_irn_link(bl, irn); @@ -842,9 +934,10 @@ static void link_calls_in_block_walker(ir_node *irn, void *data) } /** - * Process all call nodes inside a basic block. + * Block-walker: + * Process all Call nodes inside a basic block. * Note that the link field of the block must contain a linked list of all - * Call nodes inside the block. We first order this list according to data dependency + * Call nodes inside the Block. We first order this list according to data dependency * and that connect the calls together. */ static void process_calls_in_block(ir_node *bl, void *data) @@ -861,6 +954,7 @@ static void process_calls_in_block(ir_node *bl, void *data) if(n > 0) { ir_node *keep; ir_node **nodes; + ir_node *copy = NULL; int i; nodes = obstack_finish(&env->obst); @@ -871,12 +965,16 @@ static void process_calls_in_block(ir_node *bl, void *data) for(i = n - 1; i >= 0; --i) { ir_node *irn = nodes[i]; + DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn)); switch(get_irn_opcode(irn)) { case iro_Call: - curr_sp = adjust_call(env, irn, curr_sp); + curr_sp = adjust_call(env, irn, curr_sp, copy); break; case iro_Alloc: - curr_sp = adjust_alloc(env, irn, curr_sp); + curr_sp = adjust_alloc(env, irn, curr_sp, ©); + break; + case iro_Free: + curr_sp = adjust_free(env, irn, curr_sp); break; default: break; @@ -892,7 +990,7 @@ static void process_calls_in_block(ir_node *bl, void *data) } set_irn_link(bl, curr_sp); -} +} /* process_calls_in_block */ /** * Adjust all call nodes in the graph to the ABI conventions. @@ -903,15 +1001,10 @@ static void process_calls(be_abi_irg_t *env) env->call->flags.bits.irg_is_leaf = 1; irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env); - irg_block_walk_graph(irg, NULL, process_calls_in_block, env); -} -static void collect_return_walker(ir_node *irn, void *data) -{ - if(get_irn_opcode(irn) == iro_Return) { - struct obstack *obst = data; - obstack_ptr_grow(obst, irn); - } + ir_heights = heights_new(env->birg->irg); + irg_block_walk_graph(irg, NULL, process_calls_in_block, env); + heights_free(ir_heights); } #if 0 /* @@ -931,7 +1024,7 @@ static ir_node *setup_frame(be_abi_irg_t *env) int stack_nr = get_Proj_proj(stack); if(flags.try_omit_fp) { - stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND); frame = stack; } @@ -945,7 +1038,7 @@ static ir_node *setup_frame(be_abi_irg_t *env) arch_set_irn_register(env->birg->main_env->arch_env, frame, bp); } - stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND); } be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore); @@ -970,7 +1063,7 @@ static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct pmap_entry *ent; if(env->call->flags.bits.try_omit_fp) { - stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink); + stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK); } else { @@ -994,7 +1087,19 @@ static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct */ #endif -static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type) +/** + * Computes the stack argument layout type. + * Changes a possibly allocated value param type by moving + * entities to the stack layout type. + * + * @param env the ABI environment + * @param call the current call ABI + * @param method_type the method type + * @param param_map an array mapping method arguments to the stack layout type + * + * @return the stack argument layout type + */ +static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map) { int dir = env->call->flags.bits.left_to_right ? 1 : -1; int inc = env->birg->main_env->arch_env->isa->stack_dir * dir; @@ -1005,29 +1110,45 @@ static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type char buf[128]; ir_type *res; int i; - - snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg))); - res = new_type_class(new_id_from_str(buf)); - - for(i = 0; i < n; ++i, curr += inc) { - type *param_type = get_method_param_type(method_type, curr); + ir_type *val_param_tp = get_method_value_param_type(method_type); + ident *id = get_entity_ident(get_irg_entity(env->birg->irg)); + ir_entity **map; + + *param_map = map = obstack_alloc(&env->obst, n * sizeof(ir_entity *)); + res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8))); + for (i = 0; i < n; ++i, curr += inc) { + ir_type *param_type = get_method_param_type(method_type, curr); be_abi_call_arg_t *arg = get_call_arg(call, 0, curr); - if(arg->on_stack) { - snprintf(buf, sizeof(buf), "param_%d", i); - arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type); + map[i] = NULL; + if (arg->on_stack) { + if (val_param_tp) { + /* the entity was already created, move it to the param type */ + arg->stack_ent = get_method_value_param_ent(method_type, i); + remove_struct_member(val_param_tp, arg->stack_ent); + set_entity_owner(arg->stack_ent, res); + add_struct_member(res, arg->stack_ent); + /* must be automatic to set a fixed layout */ + set_entity_allocation(arg->stack_ent, allocation_automatic); + } + else { + snprintf(buf, sizeof(buf), "param_%d", i); + arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type); + } ofs += arg->space_before; ofs = round_up2(ofs, arg->alignment); - set_entity_offset_bytes(arg->stack_ent, ofs); + set_entity_offset(arg->stack_ent, ofs); ofs += arg->space_after; ofs += get_type_size_bytes(param_type); + map[i] = arg->stack_ent; } } - set_type_size_bytes(res, ofs); + set_type_state(res, layout_fixed); return res; } +#if 0 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs) { int i, j, n; @@ -1069,6 +1190,7 @@ static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node obstack_free(&obst, NULL); } +#endif typedef struct { const arch_register_t *reg; @@ -1109,8 +1231,8 @@ static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map) static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req) { ir_graph *irg = env->birg->irg; + int n_regs = pmap_count(regs); int n; - int n_regs = pmap_count(regs); ir_node *irn; ir_node **in; reg_node_map_t *rm; @@ -1130,9 +1252,10 @@ static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pm obstack_free(&env->obst, in); for(n = 0; n < n_regs; ++n) { - int pos = BE_OUT_POS(n); - ir_node *proj; const arch_register_t *reg = rm[n].reg; + int flags = 0; + int pos = BE_OUT_POS(n); + ir_node *proj; proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n); be_node_set_reg_class(irn, n, reg->reg_class); @@ -1144,7 +1267,12 @@ static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pm /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */ if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore)) - be_node_set_flags(irn, pos, arch_irn_flags_ignore); + flags |= arch_irn_flags_ignore; + + if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp)) + flags |= arch_irn_flags_modify_sp; + + be_node_set_flags(irn, pos, flags); pmap_insert(regs, (void *) reg, proj); } @@ -1157,6 +1285,15 @@ static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pm return irn; } +/** + * Creates a be_Return for a Return node. + * + * @param @env the abi environment + * @param irn the Return node or NULL if there was none + * @param bl the block where the be_Retun should be placed + * @param mem the current memory + * @param n_res number of return results + */ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) { be_abi_call_t *call = env->call; const arch_isa_t *isa = env->birg->main_env->arch_env->isa; @@ -1178,8 +1315,14 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, i it then. Else we use the stack from the start block and let the ssa construction fix the usage. */ - stack = keep ? get_irn_n(keep, 0) : be_abi_reg_map_get(env->regs, isa->sp); - be_abi_reg_map_set(reg_map, isa->sp, stack); + stack = be_abi_reg_map_get(env->regs, isa->sp); + if (keep) { + ir_node *bad = new_r_Bad(env->birg->irg); + stack = get_irn_n(keep, 0); + set_nodes_block(keep, bad); + set_irn_n(keep, 0, bad); + // exchange(keep, new_r_Bad(env->birg->irg)); + } /* Insert results for Return into the register map. */ for(i = 0; i < n_res; ++i) { @@ -1196,6 +1339,8 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, i pmap_insert(reg_map, ent->key, ent->value); } + be_abi_reg_map_set(reg_map, isa->sp, stack); + /* Make the Epilogue node and call the arch's epilogue maker. */ create_barrier(env, bl, &mem, reg_map, 1); call->cb->epilogue(env->cb, bl, &mem, reg_map); @@ -1218,7 +1363,6 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, i /* clear SP entry, since it has already been grown. */ pmap_insert(reg_map, (void *) isa->sp, NULL); for(i = 0; i < n_res; ++i) { - ir_node *res = get_Return_res(irn, i); be_abi_call_arg_t *arg = get_call_arg(call, 1, i); in[n] = be_abi_reg_map_get(reg_map, arg->reg); @@ -1237,7 +1381,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, i } /* The in array for the new back end return is now ready. */ - ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n, in); + ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in); /* Set the register classes of the return's parameter accordingly. */ for(i = 0; i < n; ++i) @@ -1251,6 +1395,150 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, i return ret; } +typedef struct lower_frame_sels_env_t { + be_abi_irg_t *env; + ir_entity *value_param_list; /**< the list of all value param entities */ +} lower_frame_sels_env_t; + +/** + * Walker: Replaces Sels of frame type and + * value param type entities by FrameAddress. + */ +static void lower_frame_sels_walker(ir_node *irn, void *data) +{ + lower_frame_sels_env_t *ctx = data; + + if (is_Sel(irn)) { + ir_graph *irg = current_ir_graph; + ir_node *frame = get_irg_frame(irg); + ir_node *param_base = get_irg_value_param_base(irg); + ir_node *ptr = get_Sel_ptr(irn); + + if (ptr == frame || ptr == param_base) { + be_abi_irg_t *env = ctx->env; + ir_entity *ent = get_Sel_entity(irn); + ir_node *bl = get_nodes_block(irn); + ir_node *nw; + + nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent); + exchange(irn, nw); + + /* check, if it's a param sel and if have not seen this entity immediatly before */ + if (ptr == param_base && ctx->value_param_list != ent) { + set_entity_link(ent, ctx->value_param_list); + ctx->value_param_list = ent; + } + } + } +} + +/** + * Check if a value parameter is transmitted as a register. + * This might happen if the address of an parameter is taken which is + * transmitted in registers. + * + * Note that on some architectures this case must be handled specially + * because the place of the backing store is determined by their ABI. + * + * In the default case we move the entity to the frame type and create + * a backing store into the first block. + */ +static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_entity *value_param_list) { + be_abi_call_t *call = env->call; + ir_graph *irg = env->birg->irg; + ir_entity *ent, *next_ent, *new_list; + ir_type *frame_tp; + DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;) + + new_list = NULL; + for (ent = value_param_list; ent; ent = next_ent) { + int i = get_struct_member_index(get_entity_owner(ent), ent); + be_abi_call_arg_t *arg = get_call_arg(call, 0, i); + + next_ent = get_entity_link(ent); + if (arg->in_reg) { + DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i)); + set_entity_link(ent, new_list); + new_list = ent; + } + } + if (new_list) { + /* ok, change the graph */ + ir_node *start_bl = get_irg_start_block(irg); + ir_node *first_bl = NULL; + ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl; + const ir_edge_t *edge; + optimization_state_t state; + int offset; + + foreach_block_succ(start_bl, edge) { + ir_node *succ = get_edge_src_irn(edge); + if (start_bl != succ) { + first_bl = succ; + break; + } + } + assert(first_bl); + /* we had already removed critical edges, so the following + assertion should be always true. */ + assert(get_Block_n_cfgpreds(first_bl) == 1); + + /* now create backing stores */ + frame = get_irg_frame(irg); + imem = get_irg_initial_mem(irg); + + save_optimization_state(&state); + set_optimize(0); + nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M); + restore_optimization_state(&state); + + /* reroute all edges to the new memory source */ + edges_reroute(imem, nmem, irg); + + store = NULL; + mem = imem; + args = get_irg_args(irg); + args_bl = get_nodes_block(args); + for (ent = new_list; ent; ent = get_entity_link(ent)) { + int i = get_struct_member_index(get_entity_owner(ent), ent); + ir_type *tp = get_entity_type(ent); + ir_mode *mode = get_type_mode(tp); + ir_node *addr; + + /* address for the backing store */ + addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent); + + if (store) + mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M); + + /* the backing store itself */ + store = new_r_Store(irg, first_bl, mem, addr, + new_r_Proj(irg, args_bl, args, mode, i)); + } + /* the new memory Proj gets the last Proj from store */ + set_Proj_pred(nmem, store); + set_Proj_proj(nmem, pn_Store_M); + + /* move all entities to the frame type */ + frame_tp = get_irg_frame_type(irg); + offset = get_type_size_bytes(frame_tp); + for (ent = new_list; ent; ent = get_entity_link(ent)) { + ir_type *tp = get_entity_type(ent); + int align = get_type_alignment_bytes(tp); + + offset += align - 1; + offset &= -align; + set_entity_owner(ent, frame_tp); + add_class_member(frame_tp, ent); + /* must be automatic to set a fixed layout */ + set_entity_allocation(ent, allocation_automatic); + set_entity_offset(ent, offset); + offset += get_type_size_bytes(tp); + } + set_type_size_bytes(frame_tp, offset); + } +} + /** * Modify the irg itself and the frame type. */ @@ -1262,14 +1550,11 @@ static void modify_irg(be_abi_irg_t *env) ir_graph *irg = env->birg->irg; ir_node *bl = get_irg_start_block(irg); ir_node *end = get_irg_end_block(irg); - ir_node *arg_tuple = get_irg_args(irg); - ir_node *no_mem = get_irg_no_mem(irg); ir_node *mem = get_irg_initial_mem(irg); - type *method_type = get_entity_type(get_irg_entity(irg)); + ir_type *method_type = get_entity_type(get_irg_entity(irg)); pset *dont_save = pset_new_ptr(8); - int n_params = get_method_n_params(method_type); - int max_arg = 0; + int n_params; int i, j, n; reg_node_map_t *rm; @@ -1278,8 +1563,11 @@ static void modify_irg(be_abi_irg_t *env) ir_node *barrier; ir_node *reg_params_bl; ir_node **args; + ir_node *arg_tuple; const ir_edge_t *edge; ir_type *arg_type, *bet_type; + lower_frame_sels_env_t ctx; + ir_entity **param_map; bitset_t *used_proj_nr; DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;) @@ -1287,24 +1575,32 @@ static void modify_irg(be_abi_irg_t *env) DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg)); /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */ - irg_walk_graph(irg, lower_frame_sels_walker, NULL, env); + ctx.env = env; + ctx.value_param_list = NULL; + irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx); env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0])); env->regs = pmap_create(); - /* Find the maximum proj number of the argument tuple proj */ - foreach_out_edge(arg_tuple, edge) { - ir_node *irn = get_edge_src_irn(edge); - int nr = get_Proj_proj(irn); - max_arg = MAX(max_arg, nr); - } - used_proj_nr = bitset_alloca(1024); - max_arg = MAX(max_arg + 1, n_params); - args = obstack_alloc(&env->obst, max_arg * sizeof(args[0])); - memset(args, 0, max_arg * sizeof(args[0])); + n_params = get_method_n_params(method_type); + args = obstack_alloc(&env->obst, n_params * sizeof(args[0])); + memset(args, 0, n_params * sizeof(args[0])); + + /* Check if a value parameter is transmitted as a register. + * This might happen if the address of an parameter is taken which is + * transmitted in registers. + * + * Note that on some architectures this case must be handled specially + * because the place of the backing store is determined by their ABI. + * + * In the default case we move the entity to the frame type and create + * a backing store into the first block. + */ + fix_address_of_parameter_access(env, ctx.value_param_list); /* Fill the argument vector */ + arg_tuple = get_irg_args(irg); foreach_out_edge(arg_tuple, edge) { ir_node *irn = get_edge_src_irn(edge); int nr = get_Proj_proj(irn); @@ -1312,9 +1608,9 @@ static void modify_irg(be_abi_irg_t *env) DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn)); } - arg_type = compute_arg_type(env, call, method_type); + arg_type = compute_arg_type(env, call, method_type, ¶m_map); bet_type = call->cb->get_between_type(env->cb); - stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir); + stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map); /* Count the register params and add them to the number of Projs for the RegParams node */ for(i = 0; i < n_params; ++i) { @@ -1344,6 +1640,7 @@ static void modify_irg(be_abi_irg_t *env) pmap_insert(env->regs, (void *) isa->bp, NULL); reg_params_bl = get_irg_start_block(irg); env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs)); + add_irn_dep(env->reg_params, get_irg_start(irg)); /* * make proj nodes for the callee save registers. @@ -1357,10 +1654,12 @@ static void modify_irg(be_abi_irg_t *env) for(i = 0, n = pmap_count(env->regs); i < n; ++i) { arch_register_t *reg = (void *) rm[i].reg; ir_node *arg_proj = rm[i].irn; - ir_node *proj; ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode; long nr = i; int pos = BE_OUT_POS((int) nr); + int flags = 0; + + ir_node *proj; assert(nr >= 0); bitset_set(used_proj_nr, nr); @@ -1374,7 +1673,12 @@ static void modify_irg(be_abi_irg_t *env) * The Proj for that register shall also be ignored during register allocation. */ if(arch_register_type_is(reg, ignore)) - be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore); + flags |= arch_irn_flags_ignore; + + if(reg == sp) + flags |= arch_irn_flags_modify_sp; + + be_node_set_flags(env->reg_params, pos, flags); DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name)); } @@ -1386,10 +1690,10 @@ static void modify_irg(be_abi_irg_t *env) /* do the stack allocation BEFORE the barrier, or spill code might be added before it */ env->init_sp = be_abi_reg_map_get(env->regs, sp); - env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand); + env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND); be_abi_reg_map_set(env->regs, sp, env->init_sp); - barrier = create_barrier(env, bl, &mem, env->regs, 0); + env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0); env->init_sp = be_abi_reg_map_get(env->regs, sp); arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp); @@ -1399,7 +1703,7 @@ static void modify_irg(be_abi_irg_t *env) pset_insert_ptr(env->ignore_regs, fp_reg); /* Now, introduce stack param nodes for all parameters passed on the stack */ - for(i = 0; i < max_arg; ++i) { + for(i = 0; i < n_params; ++i) { ir_node *arg_proj = args[i]; ir_node *repl = NULL; @@ -1441,32 +1745,18 @@ static void modify_irg(be_abi_irg_t *env) for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) { ir_node *irn = get_Block_cfgpred(end, i); - if (get_irn_opcode(irn) == iro_Return) { - ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn)); - exchange(irn, ret); + if (is_Return(irn)) { + ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn)); + exchange(irn, ret); } } - - if (n <= 0) { - /* we have endless loops, add a dummy return without return vals */ - ir_node *ret = create_be_return(env, NULL, end, get_irg_no_mem(irg), n); - add_End_keepalive(get_irg_end(irg), ret); - } + /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return than, + the code is dead and will never be executed. */ del_pset(dont_save); obstack_free(&env->obst, args); } -/** - * Walker: puts all Alloc(stack_alloc) on a obstack - */ -static void collect_alloca_walker(ir_node *irn, void *data) -{ - be_abi_irg_t *env = data; - if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc) - obstack_ptr_grow(&env->obst, irn); -} - be_abi_irg_t *be_abi_introduce(be_irg_t *birg) { be_abi_irg_t *env = xmalloc(sizeof(env[0])); @@ -1475,6 +1765,9 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg) pmap_entry *ent; ir_node *dummy; + optimization_state_t state; + + be_omit_fp = birg->main_env->options->omit_fp; obstack_init(&env->obst); @@ -1488,17 +1781,26 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg) env->dce_survivor = new_survive_dce(); env->birg = birg; env->stack_phis = pset_new_ptr(16); + /* Beware: later we replace this node by the real one, ensure it is not CSE'd + to another Unknown or the stack pointer gets used */ + save_optimization_state(&state); + set_optimize(0); env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode); + restore_optimization_state(&state); FIRM_DBG_REGISTER(env->dbg, "firm.be.abi"); - env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg); - memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler)); env->irn_ops.impl = &abi_irn_ops; /* Lower all call nodes in the IRG. */ process_calls(env); + /* + Beware: init backend abi call object after processing calls, + otherwise some information might be not yet available. + */ + env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg); + /* Process the IRG */ modify_irg(env); @@ -1517,7 +1819,7 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg) arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler); env->call->cb->done(env->cb); - be_liveness(irg); + env->cb = NULL; return env; } @@ -1541,6 +1843,10 @@ void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_set(bs, reg->index); } +/* Returns the stack layout from a abi environment. */ +const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) { + return abi->frame; +} /* @@ -1552,85 +1858,82 @@ void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, */ +struct fix_stack_walker_info { + nodeset *nodes; + const arch_env_t *aenv; +}; + /** * Walker. Collect all stack modifying nodes. */ static void collect_stack_nodes_walker(ir_node *irn, void *data) { - pset *s = data; + struct fix_stack_walker_info *info = data; - if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn)) - pset_insert_ptr(s, irn); + if (is_Block(irn)) + return; + + if (arch_irn_is(info->aenv, irn, modify_sp)) { + assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T); + pset_insert_ptr(info->nodes, irn); + } } -void be_abi_fix_stack_nodes(be_abi_irg_t *env) +void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv) { - dom_front_info_t *df; - pset *stack_nodes; + pset *stack_nodes = pset_new_ptr(16); + struct fix_stack_walker_info info; - /* We need dominance frontiers for fix up */ - df = be_compute_dominance_frontiers(env->birg->irg); - stack_nodes = pset_new_ptr(16); - pset_insert_ptr(stack_nodes, env->init_sp); - irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes); - be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis); - del_pset(stack_nodes); + info.nodes = stack_nodes; + info.aenv = env->birg->main_env->arch_env; - /* Liveness could have changed due to Phi nodes. */ - be_liveness(env->birg->irg); - - /* free these dominance frontiers */ - be_free_dominance_frontiers(df); -} + be_assure_dom_front(env->birg); -/** - * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand) - * into -1 or 1, respectively. - * @param irn The node. - * @return 1, if the direction of the IncSP was along, -1 if against. - */ -static int get_dir(ir_node *irn) -{ - return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink); + irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info); + pset_insert_ptr(stack_nodes, env->init_sp); + be_ssa_constr_set_phis(env->birg->dom_front, lv, stack_nodes, env->stack_phis); + del_pset(stack_nodes); } static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias) { - const arch_env_t *aenv = env->birg->main_env->arch_env; + const arch_env_t *arch_env = env->birg->main_env->arch_env; int omit_fp = env->call->flags.bits.try_omit_fp; ir_node *irn; sched_foreach(bl, irn) { /* - If the node modifies the stack pointer by a constant offset, - record that in the bias. - */ - if(be_is_IncSP(irn)) { - int ofs = be_get_IncSP_offset(irn); - int dir = get_dir(irn); - - if(ofs == BE_STACK_FRAME_SIZE) { - ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg)); - be_set_IncSP_offset(irn, ofs); - } - - if(omit_fp) - bias += dir * ofs; + Check, if the node relates to an entity on the stack frame. + If so, set the true offset (including the bias) for that + node. + */ + ir_entity *ent = arch_get_frame_entity(arch_env, irn); + if(ent) { + int offset = get_stack_entity_offset(env->frame, ent, bias); + arch_set_frame_offset(arch_env, irn, offset); + DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias)); } /* - Else check, if the node relates to an entity on the stack frame. - If so, set the true offset (including the bias) for that - node. - */ - else { - entity *ent = arch_get_frame_entity(aenv, irn); - if(ent) { - int offset = get_stack_entity_offset(env->frame, ent, bias); - arch_set_frame_offset(aenv, irn, offset); - DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset)); + If the node modifies the stack pointer by a constant offset, + record that in the bias. + */ + if(arch_irn_is(arch_env, irn, modify_sp)) { + int ofs = arch_get_sp_bias(arch_env, irn); + + if(be_is_IncSP(irn)) { + if(ofs == BE_STACK_FRAME_SIZE_EXPAND) { + ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg)); + be_set_IncSP_offset(irn, ofs); + } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) { + ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg)); + be_set_IncSP_offset(irn, ofs); + } } + + if(omit_fp) + bias += ofs; } } @@ -1643,6 +1946,7 @@ static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias) struct bias_walk { be_abi_irg_t *env; /**< The ABI irg environment. */ int start_block_bias; /**< The bias at the end of the start block. */ + ir_node *start_block; /**< The start block of the current graph. */ }; /** @@ -1650,8 +1954,8 @@ struct bias_walk { */ static void stack_bias_walker(ir_node *bl, void *data) { - if(bl != get_irg_start_block(get_irn_irg(bl))) { - struct bias_walk *bw = data; + struct bias_walk *bw = data; + if (bl != bw->start_block) { process_stack_bias(bw->env, bl, bw->start_block_bias); } } @@ -1662,13 +1966,14 @@ void be_abi_fix_stack_bias(be_abi_irg_t *env) struct bias_walk bw; stack_frame_compute_initial_offset(env->frame); - // stack_frame_dump(stdout, env->frame); + // stack_layout_dump(stdout, env->frame); /* Determine the stack bias at the end of the start block. */ bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0); /* fix the bias is all other blocks */ bw.env = env; + bw.start_block = get_irg_start_block(irg); irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw); } @@ -1679,6 +1984,18 @@ ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *re return pmap_get(abi->regs, (void *) reg); } +ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg) +{ + assert(arch_register_type_is(reg, ignore)); + assert(pmap_contains(abi->regs, (void *) reg)); + return pmap_get(abi->regs, (void *) reg); +} + +ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi) +{ + return abi->start_barrier; +} + /* _____ _____ _ _ _ _ _ _ |_ _| __ \| \ | | | | | | | | | @@ -1751,16 +2068,25 @@ static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn) static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn) { - return arch_irn_flags_ignore; + return arch_irn_flags_ignore | arch_irn_flags_modify_sp; } -static entity *abi_get_frame_entity(const void *_self, const ir_node *irn) +static ir_entity *abi_get_frame_entity(const void *_self, const ir_node *irn) { return NULL; } -static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias) +static void abi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent) +{ +} + +static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias) +{ +} + +static int abi_get_sp_bias(const void *self, const ir_node *irn) { + return 0; } static const arch_irn_ops_if_t abi_irn_ops = { @@ -1770,9 +2096,23 @@ static const arch_irn_ops_if_t abi_irn_ops = { abi_classify, abi_get_flags, abi_get_frame_entity, - abi_set_stack_bias + abi_set_frame_entity, + abi_set_frame_offset, + abi_get_sp_bias, + NULL, /* get_inverse */ + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ }; static const arch_irn_handler_t abi_irn_handler = { abi_get_irn_ops }; + +/** + * Returns non-zero if the ABI has omitted the frame pointer in + * the current graph. + */ +int be_abi_omit_fp(const be_abi_irg_t *abi) { + return abi->call->flags.bits.try_omit_fp; +}