const arch_register_t *sp = arch_env->sp;
be_abi_call_t *call = be_abi_call_new(sp->reg_class);
ir_mode *mach_mode = sp->reg_class->mode;
- int no_alloc = call->flags.bits.frame_is_setup_on_call;
int n_res = get_method_n_ress(call_tp);
- int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
ir_node *res_proj = NULL;
int n_reg_params = 0;
* Note: we also have to do this for stack_size == 0, because we may have
* to adjust stack alignment for the call.
*/
- if (!do_seq && !no_alloc) {
- curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
- }
+ curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
dbgi = get_irn_dbg_info(irn);
/* If there are some parameters which shall be passed on the stack. */
ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
unsigned n_in = 0;
- /* push params in reverse direction because stack grows downwards */
- if (do_seq) {
- for (i = 0; i < n_stack_params >> 1; ++i) {
- int other = n_stack_params - i - 1;
- int tmp = stack_param_idx[i];
- stack_param_idx[i] = stack_param_idx[other];
- stack_param_idx[other] = tmp;
- }
- }
-
curr_mem = get_Call_mem(irn);
- if (! do_seq) {
- in[n_in++] = curr_mem;
- }
+ in[n_in++] = curr_mem;
for (i = 0; i < n_stack_params; ++i) {
int p = stack_param_idx[i];
* the stack pointer for the next must be incremented,
* and the memory value propagated.
*/
- if (do_seq) {
- curr_ofs = 0;
- addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
- param_size + arg->space_before, 0);
- add_irn_dep(curr_sp, curr_mem);
- } else {
- curr_ofs += arg->space_before;
- curr_ofs = round_up2(curr_ofs, arg->alignment);
-
- /* Make the expression to compute the argument's offset. */
- if (curr_ofs > 0) {
- ir_mode *constmode = mach_mode;
- if (mode_is_reference(mach_mode)) {
- constmode = mode_Is;
- }
- addr = new_r_Const_long(irg, constmode, curr_ofs);
- addr = new_r_Add(bl, curr_sp, addr, mach_mode);
+ curr_ofs += arg->space_before;
+ curr_ofs = round_up2(curr_ofs, arg->alignment);
+
+ /* Make the expression to compute the argument's offset. */
+ if (curr_ofs > 0) {
+ ir_mode *constmode = mach_mode;
+ if (mode_is_reference(mach_mode)) {
+ constmode = mode_Is;
}
+ addr = new_r_Const_long(irg, constmode, curr_ofs);
+ addr = new_r_Add(bl, curr_sp, addr, mach_mode);
}
/* Insert a store for primitive arguments. */
if (is_atomic_type(param_type)) {
ir_node *nomem = get_irg_no_mem(irg);
- ir_node *mem_input = do_seq ? curr_mem : nomem;
+ ir_node *mem_input = nomem;
ir_node *store = new_rd_Store(dbgi, bl, mem_input, addr, param, cons_none);
mem = new_r_Proj(store, mode_M, pn_Store_M);
} else {
curr_ofs += param_size;
- if (do_seq)
- curr_mem = mem;
- else
- in[n_in++] = mem;
+ in[n_in++] = mem;
}
/* We need the sync only, if we didn't build the stores sequentially. */
- if (! do_seq) {
- if (n_stack_params >= 1) {
- curr_mem = new_r_Sync(bl, n_in, in);
- } else {
- curr_mem = get_Call_mem(irn);
- }
+ if (n_stack_params >= 1) {
+ curr_mem = new_r_Sync(bl, n_in, in);
+ } else {
+ curr_mem = get_Call_mem(irn);
}
}
}
}
/* Clean up the stack frame or revert alignment fixes if we allocated it */
- if (! no_alloc) {
- curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
- }
+ curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
be_abi_call_free(call);
/**
* Walker: links all Call/Alloc/Free nodes to the Block they are contained.
- * Clears the irg_is_leaf flag if a Call is detected.
*/
static void link_ops_in_block_walker(ir_node *irn, void *data)
{
ir_node *bl = get_nodes_block(irn);
void *save = get_irn_link(bl);
- if (code == iro_Call)
- env->call->flags.bits.irg_is_leaf = 0;
-
set_irn_link(irn, save);
set_irn_link(bl, irn);
}
{
be_abi_irg_t *abi = be_get_irg_abi(irg);
- abi->call->flags.bits.irg_is_leaf = 1;
irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi);
ir_heights = heights_new(irg);