unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
- int pos;
+ int pos;
const arch_register_t *reg;
- ir_entity *stack_ent;
- unsigned alignment; /**< stack alignment */
- unsigned space_before; /**< allocate space before */
- unsigned space_after; /**< allocate space after */
+ ir_entity *stack_ent;
+ ir_mode *load_mode;
+ unsigned alignment; /**< stack alignment */
+ unsigned space_before; /**< allocate space before */
+ unsigned space_after; /**< allocate space after */
} be_abi_call_arg_t;
struct _be_abi_call_t {
- be_abi_call_flags_t flags;
+ be_abi_call_flags_t flags;
+ int pop;
const be_abi_callbacks_t *cb;
ir_type *between_type;
set *params;
call->cb = cb;
}
+void be_abi_call_set_pop(be_abi_call_t *call, int pop)
+{
+ assert(pop >= 0);
+ call->pop = pop;
+}
/* Set register class for call address */
void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
}
-void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
+void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
{
be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
arg->on_stack = 1;
+ arg->load_mode = load_mode;
arg->alignment = alignment;
arg->space_before = space_before;
arg->space_after = space_after;
static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
{
be_abi_call_t *call = xmalloc(sizeof(call[0]));
+ memset(call, 0, sizeof(call[0]));
call->flags.val = 0;
call->params = new_set(cmp_call_arg, 16);
n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
n_ins, in, get_Call_type(irn));
}
+ be_Call_set_pop(low_call, call->pop);
ARR_APP1(ir_node *, env->calls, low_call);
/* create new stack pointer */
}
/* Clean up the stack. */
+ assert(stack_size >= call->pop);
+ stack_size -= call->pop;
+
if (stack_size > 0) {
ir_node *mem_proj = NULL;
/* Clean up the stack frame if we allocated it */
if (! no_alloc) {
- /* the callee pops the shadow parameter */
- if(get_method_calling_convention(mt) & cc_compound_ret) {
- unsigned size = get_mode_size_bytes(mode_P_data);
- stack_size -= size;
- be_Call_set_pop(low_call, size);
- }
-
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
- //add_irn_dep(curr_sp, mem_proj);
}
}
static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
ir_node *mem, int n_res)
{
- ir_graph *irg = env->birg->irg;
- ir_entity *entity = get_irg_entity(irg);
- ir_type *method_type = get_entity_type(entity);
be_abi_call_t *call = env->call;
const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
dbg_info *dbgi;
dbgi = NULL;
}
/* we have to pop the shadow parameter in in case of struct returns */
- pop = 0;
- if(get_method_calling_convention(method_type) & cc_compound_ret) {
- pop = get_mode_size_bytes(mode_P_data);
- }
+ pop = call->pop;
ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
const ir_edge_t *edge;
optimization_state_t state;
- int offset;
+ unsigned offset;
foreach_block_succ(start_bl, edge) {
ir_node *succ = get_edge_src_irn(edge);
frame_tp = get_irg_frame_type(irg);
offset = get_type_size_bytes(frame_tp);
for (ent = new_list; ent; ent = get_entity_link(ent)) {
- ir_type *tp = get_entity_type(ent);
- int align = get_type_alignment_bytes(tp);
+ ir_type *tp = get_entity_type(ent);
+ unsigned align = get_type_alignment_bytes(tp);
offset += align - 1;
- offset &= -align;
+ offset &= ~(align - 1);
set_entity_owner(ent, frame_tp);
add_class_member(frame_tp, ent);
/* must be automatic to set a fixed layout */
if (arg->in_reg) {
repl = pmap_get(env->regs, (void *) arg->reg);
- }
-
- else if(arg->on_stack) {
+ } else if(arg->on_stack) {
ir_node *addr = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
/* For atomic parameters which are actually used, we create a Load node. */
if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
- ir_mode *mode = get_type_mode(param_type);
- ir_node *load = new_rd_Load(NULL, irg, reg_params_bl,
- new_NoMem(), addr, mode);
+ ir_mode *mode = get_type_mode(param_type);
+ ir_mode *load_mode = arg->load_mode;
+
+ ir_node *load = new_r_Load(irg, reg_params_bl, new_NoMem(), addr, load_mode);
set_irn_pinned(load, op_pin_state_floats);
- repl = new_rd_Proj(NULL, irg, reg_params_bl, load,
- mode, pn_Load_res);
- }
+ repl = new_r_Proj(irg, reg_params_bl, load, load_mode, pn_Load_res);
- /* The stack parameter is not primitive (it is a struct or array),
- we thus will create a node representing the parameter's address
- on the stack. */
- else {
+ if (mode != load_mode) {
+ repl = new_r_Conv(irg, reg_params_bl, repl, mode);
+ }
+ } else {
+ /* The stack parameter is not primitive (it is a struct or array),
+ * we thus will create a node representing the parameter's address
+ * on the stack. */
repl = addr;
}
}
if(be_is_IncSP(irn)) {
if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
- ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+ ofs = (int)get_type_size_bytes(get_irg_frame_type(env->birg->irg));
be_set_IncSP_offset(irn, ofs);
} else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
- ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+ ofs = - (int)get_type_size_bytes(get_irg_frame_type(env->birg->irg));
be_set_IncSP_offset(irn, ofs);
}
}