static heights_t *ir_heights;
-/* Flag: if set, try to omit the frame pointer if called by the backend */
+/** Flag: if set, try to omit the frame pointer in all routines. */
static int be_omit_fp = 1;
+/** Flag: if set, try to omit the frame pointer in leaf routines only. */
+static int be_omit_leaf_fp = 1;
+
/*
_ ____ ___ ____ _ _ _ _
/ \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
call->cb = NULL;
call->cls_addr = cls_addr;
- call->flags.bits.try_omit_fp = be_omit_fp;
+ call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
return call;
}
}
/** TODO: this is not correct for cases where return values are passed
- * on the stack, but no known ABI does this currentl...
+ * on the stack, but no known ABI does this currently...
*/
n_reg_results = n_res;
/* kill the ProjT node */
if (res_proj != NULL) {
- be_kill_node(res_proj);
+ kill_node(res_proj);
}
/* Make additional projs for the caller save registers
ir_node *new_alloc, *size, *addr, *ins[2];
unsigned stack_alignment;
- if (get_Alloc_where(alloc) != stack_alloc) {
- assert(0);
- return alloc;
- }
+ assert(get_Alloc_where(alloc) == stack_alloc);
block = get_nodes_block(alloc);
irg = get_irn_irg(block);
ir_node *irn = get_edge_src_irn(edge);
assert(is_Proj(irn));
- switch(get_Proj_proj(irn)) {
+ switch (get_Proj_proj(irn)) {
case pn_Alloc_M:
alloc_mem = irn;
break;
dbg = get_irn_dbg_info(alloc);
/* we might need to multiply the size with the element size */
- if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
+ if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
mode_Iu);
ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
We cannot omit it. */
env->call->flags.bits.try_omit_fp = 0;
- stack_alignment = env->arch_env->stack_alignment;
+ stack_alignment = 1 << env->arch_env->stack_alignment;
size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
new_alloc = be_new_AddSP(env->arch_env->sp, irg, block, curr_sp, size);
set_irn_dbg_info(new_alloc, dbg);
unsigned stack_alignment;
dbg_info *dbg;
- if (get_Free_where(free) != stack_alloc) {
- assert(0);
- return free;
- }
+ assert(get_Free_where(free) == stack_alloc);
block = get_nodes_block(free);
irg = get_irn_irg(block);
dbg = get_irn_dbg_info(free);
/* we might need to multiply the size with the element size */
- if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
+ if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
ir_node *mul = new_rd_Mul(dbg, irg, block, get_Free_size(free),
size = get_Free_size(free);
}
- stack_alignment = env->arch_env->stack_alignment;
+ stack_alignment = 1 << env->arch_env->stack_alignment;
size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
/* The stack pointer will be modified in an unknown manner.
}
/**
- * Walker: links all Call/alloc/Free nodes to the Block they are contained.
+ * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
+ * Clears the irg_is_leaf flag if a Call is detected.
*/
-static void link_calls_in_block_walker(ir_node *irn, void *data)
+static void link_ops_in_block_walker(ir_node *irn, void *data)
{
ir_opcode code = get_irn_opcode(irn);
if (code == iro_Call ||
- (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
- (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
+ (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
+ (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
be_abi_irg_t *env = data;
ir_node *bl = get_nodes_block(irn);
void *save = get_irn_link(bl);
/**
* Block-walker:
- * Process all Call nodes inside a basic block.
+ * Process all Call/Alloc/Free nodes inside a basic block.
* Note that the link field of the block must contain a linked list of all
* Call nodes inside the Block. We first order this list according to data dependency
* and that connect the calls together.
*/
-static void process_calls_in_block(ir_node *bl, void *data)
+static void process_ops_in_block(ir_node *bl, void *data)
{
be_abi_irg_t *env = data;
ir_node *curr_sp = env->init_sp;
ir_node *irn;
int n;
- for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
+ for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
obstack_ptr_grow(&env->obst, irn);
/* If there were call nodes in the block. */
- if(n > 0) {
+ if (n > 0) {
ir_node *keep;
ir_node **nodes;
int i;
/* order the call nodes according to data dependency */
qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
- for(i = n - 1; i >= 0; --i) {
+ for (i = n - 1; i >= 0; --i) {
ir_node *irn = nodes[i];
DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
- switch(get_irn_opcode(irn)) {
+ switch (get_irn_opcode(irn)) {
case iro_Call:
+ if (! be_omit_fp) {
+ /* The stack pointer will be modified due to a call. */
+ env->call->flags.bits.try_omit_fp = 0;
+ }
curr_sp = adjust_call(env, irn, curr_sp);
break;
case iro_Alloc:
- curr_sp = adjust_alloc(env, irn, curr_sp);
+ if (get_Alloc_where(irn) == stack_alloc)
+ curr_sp = adjust_alloc(env, irn, curr_sp);
break;
case iro_Free:
- curr_sp = adjust_free(env, irn, curr_sp);
+ if (get_Free_where(irn) == stack_alloc)
+ curr_sp = adjust_free(env, irn, curr_sp);
break;
default:
panic("invalid call");
/* Keep the last stack state in the block by tying it to Keep node,
* the proj from calls is already kept */
- if(curr_sp != env->init_sp
- && !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
+ if (curr_sp != env->init_sp &&
+ !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
nodes[0] = curr_sp;
keep = be_new_Keep(env->arch_env->sp->reg_class,
get_irn_irg(bl), bl, 1, nodes);
ir_graph *irg = env->birg->irg;
env->call->flags.bits.irg_is_leaf = 1;
- irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
+ irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
ir_heights = heights_new(env->birg->irg);
- irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
+ irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
heights_free(ir_heights);
}
stack = be_abi_reg_map_get(env->regs, arch_env->sp);
if (keep) {
stack = get_irn_n(keep, 0);
- be_kill_node(keep);
+ kill_node(keep);
remove_End_keepalive(get_irg_end(env->birg->irg), keep);
}
/* value_param_base anchor is not needed anymore now */
value_param_base = get_irg_value_param_base(irg);
- be_kill_node(value_param_base);
+ kill_node(value_param_base);
set_irg_value_param_base(irg, new_r_Bad(irg));
env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
/* the arg proj is not needed anymore now and should be only used by the anchor */
assert(get_irn_n_edges(arg_tuple) == 1);
- be_kill_node(arg_tuple);
+ kill_node(arg_tuple);
set_irg_args(irg, new_rd_Bad(irg));
/* All Return nodes hang on the End node, so look for them there. */
set_irn_n(call, inp, regnode);
}
}
+
+ DEL_ARR_F(stateregs);
}
/**
optimization_state_t state;
unsigned *limited_bitset;
- be_omit_fp = birg->main_env->options->omit_fp;
+ be_omit_fp = birg->main_env->options->omit_fp;
+ be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
obstack_init(&env->obst);
bitset_set(bs, reg->index);
}
+void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
+{
+ unsigned i;
+ arch_register_t *reg;
+
+ for (i = 0; i < cls->n_regs; ++i) {
+ if (arch_register_type_is(&cls->regs[i], ignore))
+ continue;
+
+ rbitset_set(raw_bitset, i);
+ }
+
+ for (reg = pset_first(abi->ignore_regs); reg != NULL;
+ reg = pset_next(abi->ignore_regs)) {
+ if (reg->reg_class != cls)
+ continue;
+
+ rbitset_clear(raw_bitset, reg->index);
+ }
+}
+
/* Returns the stack layout from a abi environment. */
const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
return abi->frame;
/* patch IncSP to produce an aligned stack pointer */
ir_type *between_type = env->frame->between_type;
int between_size = get_type_size_bytes(between_type);
- int alignment = env->arch_env->stack_alignment;
- int delta = (real_bias + ofs + between_size) % env->arch_env->stack_alignment;
+ int alignment = 1 << env->arch_env->stack_alignment;
+ int delta = (real_bias + ofs + between_size) & (alignment - 1);
assert(ofs >= 0);
if (delta > 0) {
be_set_IncSP_offset(irn, ofs + alignment - delta);