/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
- int pos;
+ int pos;
const arch_register_t *reg;
- ir_entity *stack_ent;
- unsigned alignment; /**< stack alignment */
- unsigned space_before; /**< allocate space before */
- unsigned space_after; /**< allocate space after */
+ ir_entity *stack_ent;
+ ir_mode *load_mode;
+ unsigned alignment; /**< stack alignment */
+ unsigned space_before; /**< allocate space before */
+ unsigned space_after; /**< allocate space after */
} be_abi_call_arg_t;
struct _be_abi_call_t {
- be_abi_call_flags_t flags;
+ be_abi_call_flags_t flags;
+ int pop;
const be_abi_callbacks_t *cb;
ir_type *between_type;
set *params;
struct obstack obst;
be_stack_layout_t *frame; /**< The stack frame model. */
be_irg_t *birg; /**< The back end IRG. */
- const arch_isa_t *isa; /**< The isa. */
+ const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
be_abi_call_t *call; /**< The ABI call information. */
ir_node *init_sp; /**< The node representing the stack pointer
at the start of the function. */
- ir_node *start_barrier; /**< The barrier of the start block */
-
ir_node *reg_params; /**< The reg params node. */
pmap *regs; /**< A map of all callee-save and ignore regs to
their Projs to the RegParams node. */
arch_register_req_t sp_req;
arch_register_req_t sp_cls_req;
- DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
+ DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
};
static heights_t *ir_heights;
-/* Flag: if set, try to omit the frame pointer if called by the backend */
+/** Flag: if set, try to omit the frame pointer in all routines. */
static int be_omit_fp = 1;
+/** Flag: if set, try to omit the frame pointer in leaf routines only. */
+static int be_omit_leaf_fp = 1;
+
/*
_ ____ ___ ____ _ _ _ _
/ \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
call->cb = cb;
}
+void be_abi_call_set_pop(be_abi_call_t *call, int pop)
+{
+ assert(pop >= 0);
+ call->pop = pop;
+}
/* Set register class for call address */
void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
}
-void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
+void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
{
be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
arg->on_stack = 1;
+ arg->load_mode = load_mode;
arg->alignment = alignment;
arg->space_before = space_before;
arg->space_after = space_after;
static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
{
be_abi_call_t *call = xmalloc(sizeof(call[0]));
+ memset(call, 0, sizeof(call[0]));
call->flags.val = 0;
call->params = new_set(cmp_call_arg, 16);
call->cb = NULL;
call->cls_addr = cls_addr;
- call->flags.bits.try_omit_fp = be_omit_fp;
+ call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
return call;
}
and the spills.
*/
-static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent, int bias)
+static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
+ int bias)
{
ir_type *t = get_entity_owner(ent);
int ofs = get_entity_offset(ent);
{
ir_graph *irg = env->birg->irg;
const arch_env_t *arch_env = env->birg->main_env->arch_env;
- const arch_isa_t *isa = arch_env->isa;
- ir_type *mt = get_Call_type(irn);
+ ir_type *call_tp = get_Call_type(irn);
ir_node *call_ptr = get_Call_ptr(irn);
- int n_params = get_method_n_params(mt);
+ int n_params = get_method_n_params(call_tp);
ir_node *curr_mem = get_Call_mem(irn);
ir_node *bl = get_nodes_block(irn);
pset *results = pset_new_ptr(8);
pset *caller_save = pset_new_ptr(8);
pset *states = pset_new_ptr(2);
int stack_size = 0;
- int stack_dir = arch_isa_stack_dir(isa);
- const arch_register_t *sp = arch_isa_sp(isa);
+ int stack_dir = arch_env_stack_dir(arch_env);
+ const arch_register_t *sp = arch_env_sp(arch_env);
be_abi_call_t *call = be_abi_call_new(sp->reg_class);
ir_mode *mach_mode = sp->reg_class->mode;
struct obstack *obst = &env->obst;
int no_alloc = call->flags.bits.frame_is_setup_on_call;
- int n_res = get_method_n_ress(mt);
+ int n_res = get_method_n_ress(call_tp);
+ int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
ir_node *res_proj = NULL;
int n_reg_params = 0;
int i, n;
/* Let the isa fill out the abi description for that call node. */
- arch_isa_get_call_abi(isa, mt, call);
+ arch_env_get_call_abi(arch_env, call_tp, call);
/* Insert code to put the stack arguments on the stack. */
assert(get_Call_n_params(irn) == n_params);
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
assert(arg);
if (arg->on_stack) {
- int arg_size = get_type_size_bytes(get_method_param_type(mt, i));
+ int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
stack_size += round_up2(arg->space_before, arg->alignment);
stack_size += round_up2(arg_size, arg->alignment);
}
reg_param_idxs = obstack_finish(obst);
+ /*
+ * If the stack is decreasing and we do not want to store sequentially,
+ * or someone else allocated the call frame
+ * we allocate as much space on the stack all parameters need, by
+ * moving the stack pointer along the stack's direction.
+ *
+ * Note: we also have to do this for stack_size == 0, because we may have
+ * to adjust stack alignment for the call.
+ */
+ if (stack_dir < 0 && !do_seq && !no_alloc) {
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size, 1);
+ }
+
/* If there are some parameters which shall be passed on the stack. */
if (n_stack_params > 0) {
int curr_ofs = 0;
- int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
/*
* Reverse list of stack parameters if call arguments are from left to right.
}
}
- /*
- * If the stack is decreasing and we do not want to store sequentially,
- * or someone else allocated the call frame
- * we allocate as much space on the stack all parameters need, by
- * moving the stack pointer along the stack's direction.
- */
- if (stack_dir < 0 && !do_seq && !no_alloc) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
- }
-
+ curr_mem = get_Call_mem(irn);
if (! do_seq) {
- obstack_ptr_grow(obst, get_Call_mem(irn));
- curr_mem = new_NoMem();
- } else {
- curr_mem = get_Call_mem(irn);
+ obstack_ptr_grow(obst, curr_mem);
}
for (i = 0; i < n_stack_params; ++i) {
ir_node *param = get_Call_param(irn, p);
ir_node *addr = curr_sp;
ir_node *mem = NULL;
- ir_type *param_type = get_method_param_type(mt, p);
+ ir_type *param_type = get_method_param_type(call_tp, p);
int param_size = get_type_size_bytes(param_type) + arg->space_after;
/*
*/
if (do_seq) {
curr_ofs = 0;
- addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
+ addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before, 0);
add_irn_dep(curr_sp, curr_mem);
}
else {
/* Insert a store for primitive arguments. */
if (is_atomic_type(param_type)) {
ir_node *store;
- store = new_r_Store(irg, bl, curr_mem, addr, param);
+ ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
+ store = new_r_Store(irg, bl, mem_input, addr, param);
mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
}
}
/* Collect caller save registers */
- for (i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
- int j;
- const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+ for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
+ unsigned j;
+ const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
for (j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = arch_register_for_index(cls, j);
if (arch_register_type_is(reg, caller_save)) {
n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
n_ins, in, get_Call_type(irn));
}
+ be_Call_set_pop(low_call, call->pop);
ARR_APP1(ir_node *, env->calls, low_call);
+ /* create new stack pointer */
+ curr_sp = new_r_Proj(irg, bl, low_call, get_irn_mode(curr_sp),
+ pn_be_Call_sp);
+ be_set_constr_single_reg(low_call, BE_OUT_POS(pn_be_Call_sp), sp);
+ arch_set_irn_register(arch_env, curr_sp, sp);
+ be_node_set_flags(low_call, BE_OUT_POS(pn_be_Call_sp),
+ arch_irn_flags_ignore | arch_irn_flags_modify_sp);
+
for(i = 0; i < n_res; ++i) {
int pn;
ir_node *proj = res_projs[i];
pn = i + pn_be_Call_first_res;
if(proj == NULL) {
- ir_type *res_type = get_method_res_type(mt, i);
+ ir_type *res_type = get_method_res_type(call_tp, i);
ir_mode *mode = get_type_mode(res_type);
proj = new_r_Proj(irg, bl, low_call, mode, pn);
res_projs[i] = proj;
/* Make additional projs for the caller save registers
and the Keep node which keeps them alive. */
- if (pset_count(caller_save) + n_reg_results > 0) {
+ if (1 || pset_count(caller_save) + n_reg_results > 0) {
const arch_register_t *reg;
ir_node **in, *keep;
- int i, n;
+ int i;
+ int n = 0;
int curr_res_proj
= pn_be_Call_first_res + n_reg_results;
- for (reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
+ /* also keep the stack pointer */
+ ++n;
+ set_irn_link(curr_sp, (void*) sp);
+ obstack_ptr_grow(obst, curr_sp);
+
+ for (reg = pset_first(caller_save); reg; reg = pset_next(caller_save), ++n) {
ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode,
curr_res_proj);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+ arch_set_irn_register(arch_env, proj, reg);
/* a call can produce ignore registers, in this case set the flag and register for the Proj */
if (arch_register_type_is(reg, ignore)) {
}
/* Clean up the stack. */
+ assert(stack_size >= call->pop);
+ stack_size -= call->pop;
+
if (stack_size > 0) {
ir_node *mem_proj = NULL;
}
if (! mem_proj) {
- mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
+ mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_be_Call_M_regular);
keep_alive(mem_proj);
}
-
- /* Clean up the stack frame if we allocated it */
- if (! no_alloc) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
- add_irn_dep(curr_sp, mem_proj);
- }
+ }
+ /* Clean up the stack frame or revert alignment fixes if we allocated it */
+ if (! no_alloc) {
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size, 0);
}
be_abi_call_free(call);
ir_graph *irg, ir_node *block, dbg_info *dbg)
{
if (stack_alignment > 1) {
- ir_mode *mode = get_irn_mode(size);
- tarval *tv = new_tarval_from_long(stack_alignment-1, mode);
- ir_node *mask = new_r_Const(irg, block, mode, tv);
+ ir_mode *mode;
+ tarval *tv;
+ ir_node *mask;
+ assert(is_po2(stack_alignment));
+
+ mode = get_irn_mode(size);
+ tv = new_tarval_from_long(stack_alignment-1, mode);
+ mask = new_r_Const(irg, block, mode, tv);
size = new_rd_Add(dbg, irg, block, size, mask, mode);
tv = new_tarval_from_long(-(long)stack_alignment, mode);
ir_node *new_alloc, *size, *addr, *ins[2];
unsigned stack_alignment;
- if (get_Alloc_where(alloc) != stack_alloc) {
- assert(0);
- return alloc;
- }
+ assert(get_Alloc_where(alloc) == stack_alloc);
block = get_nodes_block(alloc);
irg = get_irn_irg(block);
ir_node *irn = get_edge_src_irn(edge);
assert(is_Proj(irn));
- switch(get_Proj_proj(irn)) {
+ switch (get_Proj_proj(irn)) {
case pn_Alloc_M:
alloc_mem = irn;
break;
dbg = get_irn_dbg_info(alloc);
/* we might need to multiply the size with the element size */
- if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
+ if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
mode_Iu);
ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
We cannot omit it. */
env->call->flags.bits.try_omit_fp = 0;
- /* FIXME: size must be here round up for the stack alignment, but
- this must be transmitted from the backend. */
- stack_alignment = 4;
+ stack_alignment = 1 << env->arch_env->stack_alignment;
size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
- new_alloc = be_new_AddSP(env->isa->sp, irg, block, curr_sp, size);
+ new_alloc = be_new_AddSP(env->arch_env->sp, irg, block, curr_sp, size);
set_irn_dbg_info(new_alloc, dbg);
if(alloc_mem != NULL) {
unsigned stack_alignment;
dbg_info *dbg;
- if (get_Free_where(free) != stack_alloc) {
- assert(0);
- return free;
- }
+ assert(get_Free_where(free) == stack_alloc);
block = get_nodes_block(free);
irg = get_irn_irg(block);
type = get_Free_type(free);
- sp_mode = env->isa->sp->reg_class->mode;
+ sp_mode = env->arch_env->sp->reg_class->mode;
dbg = get_irn_dbg_info(free);
/* we might need to multiply the size with the element size */
- if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
+ if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
ir_node *mul = new_rd_Mul(dbg, irg, block, get_Free_size(free),
size = get_Free_size(free);
}
- /* FIXME: size must be here round up for the stack alignment, but
- this must be transmitted from the backend. */
- stack_alignment = 4;
- size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
+ stack_alignment = 1 << env->arch_env->stack_alignment;
+ size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
/* The stack pointer will be modified in an unknown manner.
We cannot omit it. */
env->call->flags.bits.try_omit_fp = 0;
- subsp = be_new_SubSP(env->isa->sp, irg, block, curr_sp, size);
+ subsp = be_new_SubSP(env->arch_env->sp, irg, block, curr_sp, size);
set_irn_dbg_info(subsp, dbg);
mem = new_r_Proj(irg, block, subsp, mode_M, pn_be_SubSP_M);
}
/**
- * Walker: links all Call/alloc/Free nodes to the Block they are contained.
+ * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
+ * Clears the irg_is_leaf flag if a Call is detected.
*/
-static void link_calls_in_block_walker(ir_node *irn, void *data)
+static void link_ops_in_block_walker(ir_node *irn, void *data)
{
ir_opcode code = get_irn_opcode(irn);
if (code == iro_Call ||
- (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
- (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
+ (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
+ (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
be_abi_irg_t *env = data;
ir_node *bl = get_nodes_block(irn);
void *save = get_irn_link(bl);
/**
* Block-walker:
- * Process all Call nodes inside a basic block.
+ * Process all Call/Alloc/Free nodes inside a basic block.
* Note that the link field of the block must contain a linked list of all
* Call nodes inside the Block. We first order this list according to data dependency
* and that connect the calls together.
*/
-static void process_calls_in_block(ir_node *bl, void *data)
+static void process_ops_in_block(ir_node *bl, void *data)
{
be_abi_irg_t *env = data;
ir_node *curr_sp = env->init_sp;
ir_node *irn;
int n;
- for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
+ for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
obstack_ptr_grow(&env->obst, irn);
/* If there were call nodes in the block. */
- if(n > 0) {
+ if (n > 0) {
ir_node *keep;
ir_node **nodes;
int i;
/* order the call nodes according to data dependency */
qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
- for(i = n - 1; i >= 0; --i) {
+ for (i = n - 1; i >= 0; --i) {
ir_node *irn = nodes[i];
DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
- switch(get_irn_opcode(irn)) {
+ switch (get_irn_opcode(irn)) {
case iro_Call:
+ if (! be_omit_fp) {
+ /* The stack pointer will be modified due to a call. */
+ env->call->flags.bits.try_omit_fp = 0;
+ }
curr_sp = adjust_call(env, irn, curr_sp);
break;
case iro_Alloc:
- curr_sp = adjust_alloc(env, irn, curr_sp);
+ if (get_Alloc_where(irn) == stack_alloc)
+ curr_sp = adjust_alloc(env, irn, curr_sp);
break;
case iro_Free:
- curr_sp = adjust_free(env, irn, curr_sp);
+ if (get_Free_where(irn) == stack_alloc)
+ curr_sp = adjust_free(env, irn, curr_sp);
break;
default:
panic("invalid call");
obstack_free(&env->obst, nodes);
- /* Keep the last stack state in the block by tying it to Keep node */
- if(curr_sp != env->init_sp) {
+ /* Keep the last stack state in the block by tying it to Keep node,
+ * the proj from calls is already kept */
+ if (curr_sp != env->init_sp &&
+ !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
nodes[0] = curr_sp;
- keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl),
- bl, 1, nodes);
+ keep = be_new_Keep(env->arch_env->sp->reg_class,
+ get_irn_irg(bl), bl, 1, nodes);
pmap_insert(env->keep_map, bl, keep);
}
}
ir_graph *irg = env->birg->irg;
env->call->flags.bits.irg_is_leaf = 1;
- irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
+ irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
ir_heights = heights_new(env->birg->irg);
- irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
+ irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
heights_free(ir_heights);
}
-#if 0 /*
-static ir_node *setup_frame(be_abi_irg_t *env)
-{
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
- const arch_register_t *sp = isa->sp;
- const arch_register_t *bp = isa->bp;
- be_abi_call_flags_bits_t flags = env->call->flags.bits;
- ir_graph *irg = env->birg->irg;
- ir_node *bl = get_irg_start_block(irg);
- ir_node *no_mem = get_irg_no_mem(irg);
- ir_node *old_frame = get_irg_frame(irg);
- ir_node *stack = pmap_get(env->regs, (void *) sp);
- ir_node *frame = pmap_get(env->regs, (void *) bp);
-
- int stack_nr = get_Proj_proj(stack);
-
- if(flags.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
- frame = stack;
- }
-
- else {
- frame = be_new_Copy(bp->reg_class, irg, bl, stack);
-
- be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
- if(!flags.fp_free) {
- be_set_constr_single_reg(frame, -1, bp);
- be_node_set_flags(frame, -1, arch_irn_flags_ignore);
- arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
- }
-
- stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
- }
-
- be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
- env->init_sp = stack;
- set_irg_frame(irg, frame);
- edges_reroute(old_frame, frame, irg);
-
- return frame;
-}
-
-static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
-{
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
- const arch_register_t *sp = isa->sp;
- const arch_register_t *bp = isa->bp;
- ir_graph *irg = env->birg->irg;
- ir_node *ret_mem = get_Return_mem(ret);
- ir_node *frame = get_irg_frame(irg);
- ir_node *bl = get_nodes_block(ret);
- ir_node *stack = get_irn_link(bl);
-
- pmap_entry *ent;
-
- if(env->call->flags.bits.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
- }
-
- else {
- stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
- be_set_constr_single_reg(stack, -1, sp);
- be_node_set_flags(stack, -1, arch_irn_flags_ignore);
- }
-
- pmap_foreach(env->regs, ent) {
- const arch_register_t *reg = ent->key;
- ir_node *irn = ent->value;
-
- if(reg == sp)
- obstack_ptr_grow(&env->obst, stack);
- else if(reg == bp)
- obstack_ptr_grow(&env->obst, frame);
- else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
- obstack_ptr_grow(obst, irn);
- }
-}
-*/
-#endif
-
/**
* Computes the stack argument layout type.
* Changes a possibly allocated value param type by moving
static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map)
{
int dir = env->call->flags.bits.left_to_right ? 1 : -1;
- int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
+ int inc = env->birg->main_env->arch_env->stack_dir * dir;
int n = get_method_n_params(method_type);
int curr = inc > 0 ? 0 : n - 1;
int ofs = 0;
int i = 0;
reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
- pmap_foreach(reg_map, ent) {
+ foreach_pmap(reg_map, ent) {
res[i].reg = ent->key;
res[i].irn = ent->value;
i++;
* @param mem the current memory
* @param n_res number of return results
*/
-static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
- be_abi_call_t *call = env->call;
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
-
+static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
+ ir_node *mem, int n_res)
+{
+ be_abi_call_t *call = env->call;
+ const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ dbg_info *dbgi;
pmap *reg_map = pmap_create();
ir_node *keep = pmap_get(env->keep_map, bl);
int in_max;
ir_node *ret;
int i, n;
+ unsigned pop;
ir_node **in;
ir_node *stack;
const arch_register_t **regs;
it then. Else we use the stack from the start block and let
the ssa construction fix the usage.
*/
- stack = be_abi_reg_map_get(env->regs, isa->sp);
+ stack = be_abi_reg_map_get(env->regs, arch_env->sp);
if (keep) {
stack = get_irn_n(keep, 0);
be_kill_node(keep);
}
/* Add uses of the callee save registers. */
- pmap_foreach(env->regs, ent) {
+ foreach_pmap(env->regs, ent) {
const arch_register_t *reg = ent->key;
if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
pmap_insert(reg_map, ent->key, ent->value);
}
- be_abi_reg_map_set(reg_map, isa->sp, stack);
+ be_abi_reg_map_set(reg_map, arch_env->sp, stack);
/* Make the Epilogue node and call the arch's epilogue maker. */
create_barrier(env, bl, &mem, reg_map, 1);
regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
in[0] = mem;
- in[1] = be_abi_reg_map_get(reg_map, isa->sp);
+ in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
regs[0] = NULL;
- regs[1] = isa->sp;
+ regs[1] = arch_env->sp;
n = 2;
/* clear SP entry, since it has already been grown. */
- pmap_insert(reg_map, (void *) isa->sp, NULL);
+ pmap_insert(reg_map, (void *) arch_env->sp, NULL);
for(i = 0; i < n_res; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
}
/* grow the rest of the stuff. */
- pmap_foreach(reg_map, ent) {
+ foreach_pmap(reg_map, ent) {
if(ent->value) {
in[n] = ent->value;
regs[n++] = ent->key;
}
/* The in array for the new back end return is now ready. */
- ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
+ if(irn != NULL) {
+ dbgi = get_irn_dbg_info(irn);
+ } else {
+ dbgi = NULL;
+ }
+ /* we have to pop the shadow parameter in in case of struct returns */
+ pop = call->pop;
+ ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
for(i = 0; i < n; ++i)
ir_node *bl = get_nodes_block(irn);
ir_node *nw;
- nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
+ nw = be_new_FrameAddr(env->arch_env->sp->reg_class, irg, bl, frame, ent);
exchange(irn, nw);
/* check, if it's a param sel and if have not seen this entity before */
ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
const ir_edge_t *edge;
optimization_state_t state;
- int offset;
+ unsigned offset;
foreach_block_succ(start_bl, edge) {
ir_node *succ = get_edge_src_irn(edge);
ir_node *addr;
/* address for the backing store */
- addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
+ addr = be_new_FrameAddr(env->arch_env->sp->reg_class, irg, first_bl, frame, ent);
if (store)
mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
/* move all entities to the frame type */
frame_tp = get_irg_frame_type(irg);
offset = get_type_size_bytes(frame_tp);
+
+ /* we will add new entities: set the layout to undefined */
+ assert(get_type_state(frame_tp) == layout_fixed);
+ set_type_state(frame_tp, layout_undefined);
for (ent = new_list; ent; ent = get_entity_link(ent)) {
- ir_type *tp = get_entity_type(ent);
- int align = get_type_alignment_bytes(tp);
+ ir_type *tp = get_entity_type(ent);
+ unsigned align = get_type_alignment_bytes(tp);
offset += align - 1;
- offset &= -align;
+ offset &= ~(align - 1);
set_entity_owner(ent, frame_tp);
add_class_member(frame_tp, ent);
/* must be automatic to set a fixed layout */
offset += get_type_size_bytes(tp);
}
set_type_size_bytes(frame_tp, offset);
+ /* fix the layout again */
+ set_type_state(frame_tp, layout_fixed);
}
}
-#if 0
+#if 1
/**
* The start block has no jump, instead it has an initial exec Proj.
* The backend wants to handle all blocks the same way, so we replace
static void modify_irg(be_abi_irg_t *env)
{
be_abi_call_t *call = env->call;
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
- const arch_register_t *sp = arch_isa_sp(isa);
+ const arch_env_t *arch_env= env->birg->main_env->arch_env;
+ const arch_register_t *sp = arch_env_sp(arch_env);
ir_graph *irg = env->birg->irg;
ir_node *bl = get_irg_start_block(irg);
ir_node *end = get_irg_end_block(irg);
pset *dont_save = pset_new_ptr(8);
int n_params;
- int i, j, n;
+ int i, n;
+ unsigned j;
reg_node_map_t *rm;
const arch_register_t *fp_reg;
ir_node *frame_pointer;
- ir_node *barrier;
ir_node *reg_params_bl;
ir_node **args;
ir_node *arg_tuple;
arg_type = compute_arg_type(env, call, method_type, ¶m_map);
bet_type = call->cb->get_between_type(env->cb);
- stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map);
+ stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
/* Count the register params and add them to the number of Projs for the RegParams node */
for(i = 0; i < n_params; ++i) {
}
/* Collect all callee-save registers */
- for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
- const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+ for(i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
+ const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
for(j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = &cls->regs[j];
if(arch_register_type_is(reg, callee_save) ||
}
pmap_insert(env->regs, (void *) sp, NULL);
- pmap_insert(env->regs, (void *) isa->bp, NULL);
+ pmap_insert(env->regs, (void *) arch_env->bp, NULL);
reg_params_bl = get_irg_start_block(irg);
env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
add_irn_dep(env->reg_params, get_irg_start(irg));
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
+ env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
- env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0);
+ create_barrier(env, bl, &mem, env->regs, 0);
env->init_sp = be_abi_reg_map_get(env->regs, sp);
arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
if (arg->in_reg) {
repl = pmap_get(env->regs, (void *) arg->reg);
- }
-
- else if(arg->on_stack) {
+ } else if(arg->on_stack) {
ir_node *addr = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
/* For atomic parameters which are actually used, we create a Load node. */
if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
- ir_mode *mode = get_type_mode(param_type);
- ir_node *load = new_rd_Load(NULL, irg, reg_params_bl,
- new_NoMem(), addr, mode);
- repl = new_rd_Proj(NULL, irg, reg_params_bl, load,
- mode, pn_Load_res);
- }
+ ir_mode *mode = get_type_mode(param_type);
+ ir_mode *load_mode = arg->load_mode;
- /* The stack parameter is not primitive (it is a struct or array),
- we thus will create a node representing the parameter's address
- on the stack. */
- else {
+ ir_node *load = new_r_Load(irg, reg_params_bl, new_NoMem(), addr, load_mode);
+ set_irn_pinned(load, op_pin_state_floats);
+ repl = new_r_Proj(irg, reg_params_bl, load, load_mode, pn_Load_res);
+
+ if (mode != load_mode) {
+ repl = new_r_Conv(irg, reg_params_bl, repl, mode);
+ }
+ } else {
+ /* The stack parameter is not primitive (it is a struct or array),
+ * we thus will create a node representing the parameter's address
+ * on the stack. */
repl = addr;
}
}
ir_node *irn = get_Block_cfgpred(end, i);
if (is_Return(irn)) {
- ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
+ ir_node *blk = get_nodes_block(irn);
+ ir_node *mem = get_Return_mem(irn);
+ ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
exchange(irn, ret);
}
}
del_pset(dont_save);
obstack_free(&env->obst, args);
- /* this was needed for STA backend... */
-#if 0
/* handle start block here (place a jump in the block) */
i = 0;
irg_block_walk_graph(irg, fix_start_block, NULL, &i);
-#endif
}
/** Fix the state inputs of calls that still hang on unknowns */
static
void fix_call_state_inputs(be_abi_irg_t *env)
{
- const arch_isa_t *isa = env->isa;
+ const arch_env_t *arch_env = env->arch_env;
int i, n, n_states;
arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
/* Collect caller save registers */
- n = arch_isa_get_n_reg_class(isa);
+ n = arch_env_get_n_reg_class(arch_env);
for(i = 0; i < n; ++i) {
- int j;
- const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+ unsigned j;
+ const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
for(j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = arch_register_for_index(cls, j);
if(arch_register_type_is(reg, state)) {
arity = get_irn_arity(call);
- /* the statereg inputs are the last n inputs of the calls */
+ /* the state reg inputs are the last n inputs of the calls */
for(s = 0; s < n_states; ++s) {
int inp = arity - n_states + s;
const arch_register_t *reg = stateregs[s];
}
}
+/**
+ * Create a trampoline entity for the given method.
+ */
+static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
+{
+ ir_type *type = get_entity_type(method);
+ ident *old_id = get_entity_ld_ident(method);
+ ident *id = mangle3("L", old_id, "$stub");
+ ir_type *parent = be->pic_trampolines_type;
+ ir_entity *ent = new_entity(parent, old_id, type);
+ set_entity_ld_ident(ent, id);
+ set_entity_visibility(ent, visibility_local);
+ set_entity_variability(ent, variability_uninitialized);
+
+ return ent;
+}
+
+/**
+ * Returns the trampoline entity for the given method.
+ */
+static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
+{
+ ir_entity *result = pmap_get(env->ent_trampoline_map, method);
+ if (result == NULL) {
+ result = create_trampoline(env, method);
+ pmap_insert(env->ent_trampoline_map, method, result);
+ }
+
+ return result;
+}
+
+/**
+ * Returns non-zero if a given entity can be accessed using a relative address.
+ */
+static int can_address_relative(ir_entity *entity)
+{
+ return get_entity_variability(entity) == variability_initialized
+ || get_entity_visibility(entity) == visibility_local;
+}
+
+/** patches SymConsts to work in position independent code */
+static void fix_pic_symconsts(ir_node *node, void *data)
+{
+ ir_graph *irg;
+ ir_node *pic_base;
+ ir_node *add;
+ ir_node *block;
+ ir_node *unknown;
+ ir_mode *mode;
+ ir_node *load;
+ ir_node *load_res;
+ be_abi_irg_t *env = data;
+ int arity, i;
+ be_main_env_t *be = env->birg->main_env;
+
+ arity = get_irn_arity(node);
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_irn_n(node, i);
+ ir_entity *entity;
+ if (!is_SymConst(pred))
+ continue;
+
+ entity = get_SymConst_entity(pred);
+ block = get_nodes_block(pred);
+ irg = get_irn_irg(pred);
+
+ /* calls can jump to relative addresses, so we can directly jump to
+ the (relatively) known call address or the trampoline */
+ if (is_Call(node) && i == 1) {
+ dbg_info *dbgi;
+ ir_entity *trampoline;
+ ir_node *trampoline_const;
+
+ if (can_address_relative(entity))
+ continue;
+
+ dbgi = get_irn_dbg_info(pred);
+ trampoline = get_trampoline(be, entity);
+ trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code, trampoline, NULL);
+ set_irn_n(node, i, trampoline_const);
+ continue;
+ }
+
+ /* everything else is accessed relative to EIP */
+ mode = get_irn_mode(pred);
+ unknown = new_r_Unknown(irg, mode);
+ pic_base = arch_code_generator_get_pic_base(env->birg->cg);
+ add = new_r_Add(irg, block, pic_base, pred, mode);
+
+ /* make sure the walker doesn't visit this add again */
+ mark_irn_visited(add);
+
+ /* all ok now for locally constructed stuff */
+ if (can_address_relative(entity)) {
+ set_irn_n(node, i, add);
+ continue;
+ }
+
+ /* we need an extra indirection for global data outside our current
+ module. The loads are always safe and can therefore float
+ and need no memory input */
+ load = new_r_Load(irg, block, new_NoMem(), add, mode);
+ load_res = new_r_Proj(irg, block, load, mode, pn_Load_res);
+ set_irn_pinned(load, op_pin_state_floats);
+
+ set_irn_n(node, i, load_res);
+ }
+}
+
be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
{
be_abi_irg_t *env = xmalloc(sizeof(env[0]));
optimization_state_t state;
unsigned *limited_bitset;
- be_omit_fp = birg->main_env->options->omit_fp;
+ be_omit_fp = birg->main_env->options->omit_fp;
+ be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
obstack_init(&env->obst);
- env->isa = birg->main_env->arch_env->isa;
+ env->arch_env = birg->main_env->arch_env;
env->method_type = get_entity_type(get_irg_entity(irg));
- env->call = be_abi_call_new(env->isa->sp->reg_class);
- arch_isa_get_call_abi(env->isa, env->method_type, env->call);
+ env->call = be_abi_call_new(env->arch_env->sp->reg_class);
+ arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
env->ignore_regs = pset_new_ptr_default();
env->keep_map = pmap_create();
env->birg = birg;
env->sp_req.type = arch_register_req_type_limited;
- env->sp_req.cls = arch_register_get_class(env->isa->sp);
+ env->sp_req.cls = arch_register_get_class(env->arch_env->sp);
limited_bitset = rbitset_obstack_alloc(&env->obst, env->sp_req.cls->n_regs);
- rbitset_set(limited_bitset, arch_register_get_index(env->isa->sp));
+ rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
env->sp_req.limited = limited_bitset;
env->sp_cls_req.type = arch_register_req_type_normal;
- env->sp_cls_req.cls = arch_register_get_class(env->isa->sp);
+ env->sp_cls_req.cls = arch_register_get_class(env->arch_env->sp);
/* Beware: later we replace this node by the real one, ensure it is not CSE'd
to another Unknown or the stack pointer gets used */
save_optimization_state(&state);
set_optimize(0);
- env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
+ env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
restore_optimization_state(&state);
FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
env->calls = NEW_ARR_F(ir_node*, 0);
+ if (birg->main_env->options->pic) {
+ irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
+ }
+
/* Lower all call nodes in the IRG. */
process_calls(env);
/* We don't need the keep map anymore. */
pmap_destroy(env->keep_map);
+ env->keep_map = NULL;
/* calls array is not needed anymore */
DEL_ARR_F(env->calls);
+ env->calls = NULL;
/* reroute the stack origin of the calls to the true stack origin. */
exchange(dummy, env->init_sp);
/* Make some important node pointers survive the dead node elimination. */
survive_dce_register_irn(env->dce_survivor, &env->init_sp);
- pmap_foreach(env->regs, ent) {
+ foreach_pmap(env->regs, ent) {
survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
}
be_irg_t *birg = env->birg;
be_lv_t *lv = be_get_birg_liveness(birg);
fix_stack_walker_env_t walker_env;
- arch_isa_t *isa;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
walker_env.arch_env = birg->main_env->arch_env;
- isa = walker_env.arch_env->isa;
irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
ir_node *phi = phis[i];
be_set_phi_reg_req(walker_env.arch_env, phi, &env->sp_req);
be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
- arch_set_irn_register(walker_env.arch_env, phi, env->isa->sp);
+ arch_set_irn_register(walker_env.arch_env, phi, env->arch_env->sp);
}
be_ssa_construction_destroy(&senv);
DEL_ARR_F(walker_env.sp_nodes);
}
-static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
+static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
{
const arch_env_t *arch_env = env->birg->main_env->arch_env;
- int omit_fp = env->call->flags.bits.try_omit_fp;
- ir_node *irn;
+ int omit_fp = env->call->flags.bits.try_omit_fp;
+ ir_node *irn;
+ int wanted_bias = real_bias;
sched_foreach(bl, irn) {
int ofs;
*/
ir_entity *ent = arch_get_frame_entity(arch_env, irn);
if(ent) {
+ int bias = omit_fp ? real_bias : 0;
int offset = get_stack_entity_offset(env->frame, ent, bias);
arch_set_frame_offset(arch_env, irn, offset);
- DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
+ DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
+ ent, offset, bias));
}
- if(omit_fp || be_is_IncSP(irn)) {
- /*
- * If the node modifies the stack pointer by a constant offset,
- * record that in the bias.
- */
- ofs = arch_get_sp_bias(arch_env, irn);
-
- if(be_is_IncSP(irn)) {
- if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
- ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
- be_set_IncSP_offset(irn, ofs);
- } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
- ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
- be_set_IncSP_offset(irn, ofs);
+ /*
+ * If the node modifies the stack pointer by a constant offset,
+ * record that in the bias.
+ */
+ ofs = arch_get_sp_bias(arch_env, irn);
+
+ if(be_is_IncSP(irn)) {
+ /* fill in real stack frame size */
+ if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
+ ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ofs = (int) get_type_size_bytes(frame_type);
+ be_set_IncSP_offset(irn, ofs);
+ } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
+ ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ofs = - (int)get_type_size_bytes(frame_type);
+ be_set_IncSP_offset(irn, ofs);
+ } else {
+ if (be_get_IncSP_align(irn)) {
+ /* patch IncSP to produce an aligned stack pointer */
+ ir_type *between_type = env->frame->between_type;
+ int between_size = get_type_size_bytes(between_type);
+ int alignment = 1 << env->arch_env->stack_alignment;
+ int delta = (real_bias + ofs + between_size) & (alignment - 1);
+ assert(ofs >= 0);
+ if (delta > 0) {
+ be_set_IncSP_offset(irn, ofs + alignment - delta);
+ real_bias += alignment - delta;
+ }
+ } else {
+ /* adjust so real_bias corresponds with wanted_bias */
+ int delta = wanted_bias - real_bias;
+ assert(delta <= 0);
+ if(delta != 0) {
+ be_set_IncSP_offset(irn, ofs + delta);
+ real_bias += delta;
+ }
}
}
-
- if(omit_fp)
- bias += ofs;
}
+
+ real_bias += ofs;
+ wanted_bias += ofs;
}
- return bias;
+ assert(real_bias == wanted_bias);
+ return real_bias;
}
/**
*/
struct bias_walk {
be_abi_irg_t *env; /**< The ABI irg environment. */
- int start_block_bias; /**< The bias at the end of the start block. */
- ir_node *start_block; /**< The start block of the current graph. */
+ int start_block_bias; /**< The bias at the end of the start block. */
+ int between_size;
+ ir_node *start_block; /**< The start block of the current graph. */
};
/**
/* Determine the stack bias at the end of the start block. */
bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
+ bw.between_size = get_type_size_bytes(env->frame->between_type);
/* fix the bias is all other blocks */
bw.env = env;
return pmap_get(abi->regs, (void *) reg);
}
-ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi)
-{
- return abi->start_barrier;
-}
-
/**
* Returns non-zero if the ABI has omitted the frame pointer in
* the current graph.