ir_node *init_sp; /**< The node representing the stack pointer
at the start of the function. */
- ir_node *start_barrier; /**< The barrier of the start block */
-
ir_node *reg_params; /**< The reg params node. */
pmap *regs; /**< A map of all callee-save and ignore regs to
their Projs to the RegParams node. */
* @param curr_sp The stack pointer node to use.
* @return The stack pointer after the call.
*/
-static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
+static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
{
ir_graph *irg = env->birg->irg;
const arch_env_t *arch_env = env->birg->main_env->arch_env;
ir_mode *mach_mode = sp->reg_class->mode;
struct obstack *obst = &env->obst;
int no_alloc = call->flags.bits.frame_is_setup_on_call;
+ int n_res = get_method_n_ress(mt);
ir_node *res_proj = NULL;
- int curr_res_proj = pn_Call_max;
int n_reg_params = 0;
int n_stack_params = 0;
int n_ins;
ir_node *low_call;
ir_node **in;
ir_node **res_projs;
+ int n_reg_results = 0;
const arch_register_t *reg;
const ir_edge_t *edge;
int *reg_param_idxs;
*/
if (stack_dir < 0 && !do_seq && !no_alloc) {
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
- if (alloca_copy) {
- add_irn_dep(curr_sp, alloca_copy);
- alloca_copy = NULL;
- }
}
+ curr_mem = get_Call_mem(irn);
if (! do_seq) {
- obstack_ptr_grow(obst, get_Call_mem(irn));
- curr_mem = new_NoMem();
- } else {
- curr_mem = get_Call_mem(irn);
+ obstack_ptr_grow(obst, curr_mem);
}
for (i = 0; i < n_stack_params; ++i) {
if (do_seq) {
curr_ofs = 0;
addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
- if (alloca_copy) {
- add_irn_dep(curr_sp, alloca_copy);
- alloca_copy = NULL;
- }
add_irn_dep(curr_sp, curr_mem);
}
else {
/* Insert a store for primitive arguments. */
if (is_atomic_type(param_type)) {
ir_node *store;
- store = new_r_Store(irg, bl, curr_mem, addr, param);
+ ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
+ store = new_r_Store(irg, bl, mem_input, addr, param);
mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
}
/* search the greatest result proj number */
- /* TODO: what if the result is NOT used? Currently there is
- * no way to detect this later, especially there is no way to
- * see this in the proj numbers.
- * While this is ok for the register allocator, it is bad for
- * backends which need to change the be_Call further (x87 simulator
- * for instance. However for this particular case the call_type is
- * sufficient.).
- */
+ res_projs = alloca(n_res * sizeof(res_projs[0]));
+ memset(res_projs, 0, n_res * sizeof(res_projs[0]));
+
foreach_out_edge(irn, edge) {
const ir_edge_t *res_edge;
- ir_node *irn = get_edge_src_irn(edge);
+ ir_node *irn = get_edge_src_irn(edge);
- if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
- res_proj = irn;
- foreach_out_edge(irn, res_edge) {
- int proj;
- be_abi_call_arg_t *arg;
- ir_node *res = get_edge_src_irn(res_edge);
-
- assert(is_Proj(res));
-
- proj = get_Proj_proj(res);
- arg = get_call_arg(call, 1, proj);
-
- /*
- shift the proj number to the right, since we will drop the
- unspeakable Proj_T from the Call. Therefore, all real argument
- Proj numbers must be increased by pn_be_Call_first_res
- */
- proj += pn_be_Call_first_res;
- set_Proj_proj(res, proj);
- obstack_ptr_grow(obst, res);
-
- if (proj > curr_res_proj)
- curr_res_proj = proj;
- if (arg->in_reg) {
- pset_remove_ptr(caller_save, arg->reg);
- //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
- }
- }
+ if(!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
+ continue;
+
+ foreach_out_edge(irn, res_edge) {
+ int proj;
+ ir_node *res = get_edge_src_irn(res_edge);
+
+ assert(is_Proj(res));
+
+ proj = get_Proj_proj(res);
+ assert(proj < n_res);
+ assert(res_projs[proj] == NULL);
+ res_projs[proj] = res;
}
+ res_proj = irn;
+ break;
}
- curr_res_proj++;
- obstack_ptr_grow(obst, NULL);
- res_projs = obstack_finish(obst);
+ /** TODO: this is not correct for cases where return values are passed
+ * on the stack, but no known ABI does this currentl...
+ */
+ n_reg_results = n_res;
/* make the back end call node and set its register requirements. */
for (i = 0; i < n_reg_params; ++i) {
/* direct call */
low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
curr_sp, curr_sp,
- curr_res_proj + pset_count(caller_save), n_ins,
- in, get_Call_type(irn));
+ n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
+ n_ins, in, get_Call_type(irn));
be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
} else {
/* indirect call */
low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
curr_sp, call_ptr,
- curr_res_proj + pset_count(caller_save),
+ n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
n_ins, in, get_Call_type(irn));
}
ARR_APP1(ir_node *, env->calls, low_call);
+ for(i = 0; i < n_res; ++i) {
+ int pn;
+ ir_node *proj = res_projs[i];
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+
+ /* returns values on stack not supported yet */
+ assert(arg->in_reg);
+
+ /*
+ shift the proj number to the right, since we will drop the
+ unspeakable Proj_T from the Call. Therefore, all real argument
+ Proj numbers must be increased by pn_be_Call_first_res
+ */
+ pn = i + pn_be_Call_first_res;
+
+ if(proj == NULL) {
+ ir_type *res_type = get_method_res_type(mt, i);
+ ir_mode *mode = get_type_mode(res_type);
+ proj = new_r_Proj(irg, bl, low_call, mode, pn);
+ res_projs[i] = proj;
+ } else {
+ set_Proj_pred(proj, low_call);
+ set_Proj_proj(proj, pn);
+ }
+
+ if (arg->in_reg) {
+ pset_remove_ptr(caller_save, arg->reg);
+ }
+ }
+
/*
Set the register class of the call address to
the backend provided class (default: stack pointer class)
}
/* Set the register constraints of the results. */
- for (i = 0; res_projs[i]; ++i) {
- int pn = get_Proj_proj(res_projs[i]);
-
- /* Correct Proj number since it has been adjusted! (see above) */
- const be_abi_call_arg_t *arg = get_call_arg(call, 1, pn - pn_Call_max);
-
- /* Matze: we need the information about the real mode for later
- * transforms (signed/unsigend compares, stores...), so leave the fixup
- * for the backend transform phase... */
-#if 0
- /* correct mode */
- const arch_register_class_t *cls = arch_register_get_class(arg->reg);
- ir_mode *mode = arch_register_class_mode(cls);
- set_irn_mode(irn, mode);
-#endif
+ for (i = 0; i < n_res; ++i) {
+ ir_node *proj = res_projs[i];
+ const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ int pn = get_Proj_proj(proj);
assert(arg->in_reg);
be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
- arch_set_irn_register(arch_env, res_projs[i], arg->reg);
+ arch_set_irn_register(arch_env, proj, arg->reg);
}
obstack_free(obst, in);
exchange(irn, low_call);
- /* redirect the result projs to the lowered call instead of the Proj_T */
- for (i = 0; res_projs[i]; ++i)
- set_Proj_pred(res_projs[i], low_call);
-
- /* set the now unnecessary projT to bad */
+ /* kill the ProjT node */
if (res_proj != NULL) {
be_kill_node(res_proj);
}
/* Make additional projs for the caller save registers
and the Keep node which keeps them alive. */
- if (pset_count(caller_save) > 0) {
+ if (pset_count(caller_save) + n_reg_results > 0) {
const arch_register_t *reg;
ir_node **in, *keep;
int i, n;
+ int curr_res_proj
+ = pn_be_Call_first_res + n_reg_results;
for (reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
- ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
+ ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode,
+ curr_res_proj);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
+ arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
/* a call can produce ignore registers, in this case set the flag and register for the Proj */
if (arch_register_type_is(reg, ignore)) {
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
- be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj), arch_irn_flags_ignore);
+ be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj),
+ arch_irn_flags_ignore);
}
- set_irn_link(proj, (void *) reg);
+ set_irn_link(proj, (void*) reg);
obstack_ptr_grow(obst, proj);
curr_res_proj++;
}
+ for(i = 0; i < n_reg_results; ++i) {
+ ir_node *proj = res_projs[i];
+ const arch_register_t *reg = arch_get_irn_register(arch_env, proj);
+ set_irn_link(proj, (void*) reg);
+ obstack_ptr_grow(obst, proj);
+ }
+ n += n_reg_results;
+
/* create the Keep for the caller save registers */
in = (ir_node **) obstack_finish(obst);
keep = be_new_Keep(NULL, irg, bl, n, in);
if (! no_alloc) {
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
add_irn_dep(curr_sp, mem_proj);
- if (alloca_copy) {
- add_irn_dep(curr_sp, alloca_copy);
- alloca_copy = NULL;
- }
}
}
* Adjust an alloca.
* The alloca is transformed into a back end alloca node and connected to the stack nodes.
*/
-static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
+static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
{
ir_node *block;
ir_graph *irg;
dbg_info *dbg;
const ir_edge_t *edge;
- ir_node *new_alloc, *size, *addr, *copy, *ins[2];
+ ir_node *new_alloc, *size, *addr, *ins[2];
unsigned stack_alignment;
if (get_Alloc_where(alloc) != stack_alloc) {
/* we might need to multiply the size with the element size */
if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
- tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
+ tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
+ mode_Iu);
ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
- ir_node *mul = new_rd_Mul(dbg, irg, block, get_Alloc_size(alloc),
- cnst, mode_Iu);
+ ir_node *mul = new_rd_Mul(dbg, irg, block, get_Alloc_size(alloc),
+ cnst, mode_Iu);
size = mul;
} else {
size = get_Alloc_size(alloc);
/* fix projnum of alloca res */
set_Proj_proj(alloc_res, pn_be_AddSP_res);
- addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
-
- /* copy the address away, since it could be used after further stack pointer modifications. */
- /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
- *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, block, curr_sp);
- set_irn_mode(copy, mode_P);
-
- /* Let all users of the Alloc() result now point to the copy. */
- edges_reroute(alloc_res, copy, irg);
-
- /* Rewire the copy appropriately. */
- set_irn_n(copy, be_pos_Copy_op, addr);
-
- curr_sp = alloc_res;
+ addr = alloc_res;
+ curr_sp = new_r_Proj(irg, block, new_alloc, get_irn_mode(curr_sp),
+ pn_be_AddSP_sp);
return curr_sp;
} /* adjust_alloc */
set_irn_dbg_info(subsp, dbg);
mem = new_r_Proj(irg, block, subsp, mode_M, pn_be_SubSP_M);
- res = new_r_Proj(irg, block, subsp, sp_mode, pn_be_SubSP_res);
+ res = new_r_Proj(irg, block, subsp, sp_mode, pn_be_SubSP_sp);
/* we need to sync the memory */
in[0] = get_Free_mem(free);
if(n > 0) {
ir_node *keep;
ir_node **nodes;
- ir_node *copy = NULL;
int i;
nodes = obstack_finish(&env->obst);
DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
switch(get_irn_opcode(irn)) {
case iro_Call:
- curr_sp = adjust_call(env, irn, curr_sp, copy);
+ curr_sp = adjust_call(env, irn, curr_sp);
break;
case iro_Alloc:
- curr_sp = adjust_alloc(env, irn, curr_sp, ©);
+ curr_sp = adjust_alloc(env, irn, curr_sp);
break;
case iro_Free:
curr_sp = adjust_free(env, irn, curr_sp);
heights_free(ir_heights);
}
-#if 0 /*
-static ir_node *setup_frame(be_abi_irg_t *env)
-{
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
- const arch_register_t *sp = isa->sp;
- const arch_register_t *bp = isa->bp;
- be_abi_call_flags_bits_t flags = env->call->flags.bits;
- ir_graph *irg = env->birg->irg;
- ir_node *bl = get_irg_start_block(irg);
- ir_node *no_mem = get_irg_no_mem(irg);
- ir_node *old_frame = get_irg_frame(irg);
- ir_node *stack = pmap_get(env->regs, (void *) sp);
- ir_node *frame = pmap_get(env->regs, (void *) bp);
-
- int stack_nr = get_Proj_proj(stack);
-
- if(flags.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
- frame = stack;
- }
-
- else {
- frame = be_new_Copy(bp->reg_class, irg, bl, stack);
-
- be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
- if(!flags.fp_free) {
- be_set_constr_single_reg(frame, -1, bp);
- be_node_set_flags(frame, -1, arch_irn_flags_ignore);
- arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
- }
-
- stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
- }
-
- be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
- env->init_sp = stack;
- set_irg_frame(irg, frame);
- edges_reroute(old_frame, frame, irg);
-
- return frame;
-}
-
-static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
-{
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
- const arch_register_t *sp = isa->sp;
- const arch_register_t *bp = isa->bp;
- ir_graph *irg = env->birg->irg;
- ir_node *ret_mem = get_Return_mem(ret);
- ir_node *frame = get_irg_frame(irg);
- ir_node *bl = get_nodes_block(ret);
- ir_node *stack = get_irn_link(bl);
-
- pmap_entry *ent;
-
- if(env->call->flags.bits.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
- }
-
- else {
- stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
- be_set_constr_single_reg(stack, -1, sp);
- be_node_set_flags(stack, -1, arch_irn_flags_ignore);
- }
-
- pmap_foreach(env->regs, ent) {
- const arch_register_t *reg = ent->key;
- ir_node *irn = ent->value;
-
- if(reg == sp)
- obstack_ptr_grow(&env->obst, stack);
- else if(reg == bp)
- obstack_ptr_grow(&env->obst, frame);
- else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
- obstack_ptr_grow(obst, irn);
- }
-}
-*/
-#endif
-
/**
* Computes the stack argument layout type.
* Changes a possibly allocated value param type by moving
}
}
+#if 1
/**
* The start block has no jump, instead it has an initial exec Proj.
* The backend wants to handle all blocks the same way, so we replace
}
}
}
+#endif
/**
* Modify the irg itself and the frame type.
pset *dont_save = pset_new_ptr(8);
int n_params;
- int i, j, n, temp;
+ int i, j, n;
reg_node_map_t *rm;
const arch_register_t *fp_reg;
ir_node *frame_pointer;
- ir_node *barrier;
ir_node *reg_params_bl;
ir_node **args;
ir_node *arg_tuple;
env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
- env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0);
+ create_barrier(env, bl, &mem, env->regs, 0);
env->init_sp = be_abi_reg_map_get(env->regs, sp);
arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
}
else if(arg->on_stack) {
- /* For atomic parameters which are actually used, we create a StackParam node. */
+ ir_node *addr = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
+
+ /* For atomic parameters which are actually used, we create a Load node. */
if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
ir_mode *mode = get_type_mode(param_type);
- const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
- repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
+ ir_node *load = new_rd_Load(NULL, irg, reg_params_bl,
+ new_NoMem(), addr, mode);
+ repl = new_rd_Proj(NULL, irg, reg_params_bl, load,
+ mode, pn_Load_res);
}
/* The stack parameter is not primitive (it is a struct or array),
we thus will create a node representing the parameter's address
on the stack. */
else {
- repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
+ repl = addr;
}
}
which may be wrong. Add Conv's then. */
mode = get_irn_mode(args[i]);
if (mode != get_irn_mode(repl)) {
- repl = new_r_Conv(irg, get_nodes_block(repl), repl, mode);
+ repl = new_r_Conv(irg, get_irn_n(repl, -1), repl, mode);
}
exchange(args[i], repl);
}
obstack_free(&env->obst, args);
/* handle start block here (place a jump in the block) */
- temp = 0;
- irg_block_walk_graph(irg, fix_start_block, NULL, &temp);
+ i = 0;
+ irg_block_walk_graph(irg, fix_start_block, NULL, &i);
}
/** Fix the state inputs of calls that still hang on unknowns */
return pmap_get(abi->regs, (void *) reg);
}
-ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi)
-{
- return abi->start_barrier;
-}
-
/**
* Returns non-zero if the ABI has omitted the frame pointer in
* the current graph.