#include "config.h"
#include "obst.h"
-#include "offset.h"
#include "irgopt.h"
unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
+ unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
int pos;
const arch_register_t *reg;
};
/**
- * The ABI information for the current birg.
+ * The ABI information for the current graph.
*/
struct _be_abi_irg_t {
- be_irg_t *birg; /**< The back end IRG. */
- ir_graph *irg;
- const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
be_abi_call_t *call; /**< The ABI call information. */
- ir_type *method_type; /**< The type of the method of the IRG. */
ir_node *init_sp; /**< The node representing the stack pointer
at the start of the function. */
ir_node **calls; /**< flexible array containing all be_Call nodes */
arch_register_req_t *sp_req;
-
- be_stack_layout_t frame; /**< The stack frame model. */
};
static heights_t *ir_heights;
{
const be_abi_call_arg_t *p = a, *q = b;
(void) n;
- return !(p->is_res == q->is_res && p->pos == q->pos);
+ return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
}
/**
* @param call the abi call
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
+ * @param callee context type - if we are callee or caller
*/
-static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
+static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
{
be_abi_call_arg_t arg;
unsigned hash;
memset(&arg, 0, sizeof(arg));
arg.is_res = is_res;
arg.pos = pos;
+ arg.callee = callee;
hash = is_res * 128 + pos;
/**
* Set an ABI call object argument.
- *
- * @param call the abi call
- * @param is_res true for call results, false for call arguments
- * @param pos position of the argument
*/
-static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
+static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
{
- be_abi_call_arg_t arg;
- unsigned hash;
-
- memset(&arg, 0, sizeof(arg));
- arg.is_res = is_res;
- arg.pos = pos;
-
- hash = is_res * 128 + pos;
-
- return set_insert(call->params, &arg, sizeof(arg), hash);
+ unsigned hash = arg->is_res * 128 + arg->pos;
+ if (context & ABI_CONTEXT_CALLEE) {
+ arg->callee = 1;
+ set_insert(call->params, arg, sizeof(*arg), hash);
+ }
+ if (context & ABI_CONTEXT_CALLER) {
+ arg->callee = 0;
+ set_insert(call->params, arg, sizeof(*arg), hash);
+ }
}
/* Set the flags for a call. */
}
-void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
+void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
+ ir_mode *load_mode, unsigned alignment,
+ unsigned space_before, unsigned space_after,
+ be_abi_context_t context)
{
- be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
- arg->on_stack = 1;
- arg->load_mode = load_mode;
- arg->alignment = alignment;
- arg->space_before = space_before;
- arg->space_after = space_after;
+ be_abi_call_arg_t arg;
+ memset(&arg, 0, sizeof(arg));
assert(alignment > 0 && "Alignment must be greater than 0");
+ arg.on_stack = 1;
+ arg.load_mode = load_mode;
+ arg.alignment = alignment;
+ arg.space_before = space_before;
+ arg.space_after = space_after;
+ arg.is_res = 0;
+ arg.pos = arg_pos;
+
+ remember_call_arg(&arg, call, context);
}
-void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
+void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
{
- be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
- arg->in_reg = 1;
- arg->reg = reg;
+ be_abi_call_arg_t arg;
+ memset(&arg, 0, sizeof(arg));
+
+ arg.in_reg = 1;
+ arg.reg = reg;
+ arg.is_res = 0;
+ arg.pos = arg_pos;
+
+ remember_call_arg(&arg, call, context);
}
-void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
+void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
{
- be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
- arg->in_reg = 1;
- arg->reg = reg;
+ be_abi_call_arg_t arg;
+ memset(&arg, 0, sizeof(arg));
+
+ arg.in_reg = 1;
+ arg.reg = reg;
+ arg.is_res = 1;
+ arg.pos = arg_pos;
+
+ remember_call_arg(&arg, call, context);
}
/* Get the flags of a ABI call object. */
if (stack_dir > 0) {
frame->order[0] = args;
frame->order[2] = locals;
- }
- else {
+ } else {
/* typical decreasing stack: locals have the
* lowest addresses, arguments the highest */
frame->order[0] = locals;
return frame;
}
-/**
- * Returns non-zero if the call argument at given position
- * is transfered on the stack.
- */
-static inline int is_on_stack(be_abi_call_t *call, int pos)
-{
- be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
- return arg && !arg->in_reg;
-}
-
/*
____ _ _
/ ___|__ _| | |___
*/
static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
{
- ir_graph *irg = env->birg->irg;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ ir_graph *irg = get_irn_irg(irn);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
ir_type *call_tp = get_Call_type(irn);
ir_node *call_ptr = get_Call_ptr(irn);
int n_params = get_method_n_params(call_tp);
const arch_register_t *sp = arch_env->sp;
be_abi_call_t *call = be_abi_call_new(sp->reg_class);
ir_mode *mach_mode = sp->reg_class->mode;
- struct obstack *obst = be_get_birg_obst(irg);
int no_alloc = call->flags.bits.frame_is_setup_on_call;
int n_res = get_method_n_ress(call_tp);
int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
/* Insert code to put the stack arguments on the stack. */
assert(get_Call_n_params(irn) == n_params);
- assert(obstack_object_size(obst) == 0);
stack_param_idx = ALLOCAN(int, n_params);
for (i = 0; i < n_params; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
assert(arg);
if (arg->on_stack) {
int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
/* Collect all arguments which are passed in registers. */
reg_param_idxs = ALLOCAN(int, n_params);
for (i = 0; i < n_params; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
if (arg && arg->in_reg) {
reg_param_idxs[n_reg_params++] = i;
}
for (i = 0; i < n_stack_params; ++i) {
int p = stack_param_idx[i];
- be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
ir_node *param = get_Call_param(irn, p);
ir_node *addr = curr_sp;
ir_node *mem = NULL;
*/
n_reg_results = n_res;
- assert(obstack_object_size(obst) == 0);
n_ins = 0;
in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
for (i = 0; i < n_res; ++i) {
int pn;
ir_node *proj = res_projs[i];
- be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
/* returns values on stack not supported yet */
assert(arg->in_reg);
/* Set the register classes and constraints of the Call parameters. */
for (i = 0; i < n_reg_params; ++i) {
int index = reg_param_idxs[i];
- be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
assert(arg->reg != NULL);
be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
/* Set the register constraints of the results. */
for (i = 0; i < n_res; ++i) {
ir_node *proj = res_projs[i];
- const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
int pn = get_Proj_proj(proj);
assert(arg->in_reg);
*/
static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
{
- ir_node *block;
- ir_graph *irg;
- ir_node *alloc_mem;
- ir_node *alloc_res;
- ir_type *type;
- dbg_info *dbg;
+ ir_node *block = get_nodes_block(alloc);
+ ir_graph *irg = get_Block_irg(block);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ir_node *alloc_mem = NULL;
+ ir_node *alloc_res = NULL;
+ ir_type *type = get_Alloc_type(alloc);
+ dbg_info *dbg;
const ir_edge_t *edge;
ir_node *new_alloc;
ir_node *count;
ir_node *size;
- ir_node *addr;
ir_node *ins[2];
unsigned stack_alignment;
+ /* all non-stack Alloc nodes should already be lowered before the backend */
assert(get_Alloc_where(alloc) == stack_alloc);
- block = get_nodes_block(alloc);
- irg = get_Block_irg(block);
- alloc_mem = NULL;
- alloc_res = NULL;
- type = get_Alloc_type(alloc);
-
foreach_out_edge(alloc, edge) {
ir_node *irn = get_edge_src_irn(edge);
We cannot omit it. */
env->call->flags.bits.try_omit_fp = 0;
- stack_alignment = 1 << env->arch_env->stack_alignment;
+ stack_alignment = 1 << arch_env->stack_alignment;
size = adjust_alloc_size(stack_alignment, size, block, dbg);
- new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
+ new_alloc = be_new_AddSP(arch_env->sp, block, curr_sp, size);
set_irn_dbg_info(new_alloc, dbg);
if (alloc_mem != NULL) {
/* fix projnum of alloca res */
set_Proj_proj(alloc_res, pn_be_AddSP_res);
- addr = alloc_res;
curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
return curr_sp;
*/
static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
{
- ir_node *block;
- ir_graph *irg;
- ir_node *subsp, *mem, *res, *size, *sync;
- ir_type *type;
+ ir_node *block = get_nodes_block(free);
+ ir_graph *irg = get_irn_irg(free);
+ ir_type *type = get_Free_type(free);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ir_mode *sp_mode = arch_env->sp->reg_class->mode;
+ dbg_info *dbg = get_irn_dbg_info(free);
+ ir_node *subsp, *mem, *res, *size, *sync;
ir_node *in[2];
- ir_mode *sp_mode;
unsigned stack_alignment;
- dbg_info *dbg;
+ /* all non-stack-alloc Free nodes should already be lowered before the
+ * backend phase */
assert(get_Free_where(free) == stack_alloc);
- block = get_nodes_block(free);
- irg = get_irn_irg(block);
- type = get_Free_type(free);
- sp_mode = env->arch_env->sp->reg_class->mode;
- dbg = get_irn_dbg_info(free);
-
/* we might need to multiply the size with the element size */
if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
size = get_Free_size(free);
}
- stack_alignment = 1 << env->arch_env->stack_alignment;
+ stack_alignment = 1 << arch_env->stack_alignment;
size = adjust_alloc_size(stack_alignment, size, block, dbg);
/* The stack pointer will be modified in an unknown manner.
We cannot omit it. */
env->call->flags.bits.try_omit_fp = 0;
- subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
+ subsp = be_new_SubSP(arch_env->sp, block, curr_sp, size);
set_irn_dbg_info(subsp, dbg);
mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
break;
default:
panic("invalid call");
- break;
}
}
/**
* Adjust all call nodes in the graph to the ABI conventions.
*/
-static void process_calls(be_abi_irg_t *env)
+static void process_calls(ir_graph *irg)
{
- ir_graph *irg = env->birg->irg;
+ be_abi_irg_t *abi = be_get_irg_abi(irg);
- env->call->flags.bits.irg_is_leaf = 1;
- irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
+ abi->call->flags.bits.irg_is_leaf = 1;
+ irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi);
- ir_heights = heights_new(env->birg->irg);
- irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
+ ir_heights = heights_new(irg);
+ irg_block_walk_graph(irg, NULL, process_ops_in_block, abi);
heights_free(ir_heights);
}
*
* @return the stack argument layout type
*/
-static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
+static ir_type *compute_arg_type(be_abi_irg_t *env, ir_graph *irg,
+ be_abi_call_t *call,
ir_type *method_type, ir_type *val_param_tp,
ir_entity ***param_map)
{
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
int dir = env->call->flags.bits.left_to_right ? 1 : -1;
- int inc = env->birg->main_env->arch_env->stack_dir * dir;
+ int inc = arch_env->stack_dir * dir;
int n = get_method_n_params(method_type);
int curr = inc > 0 ? 0 : n - 1;
- struct obstack *obst = be_get_birg_obst(env->irg);
+ struct obstack *obst = be_get_be_obst(irg);
int ofs = 0;
char buf[128];
ir_type *res;
int i;
- ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
+ ident *id = get_entity_ident(get_irg_entity(irg));
ir_entity **map;
*param_map = map = OALLOCN(obst, ir_entity*, n);
res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
for (i = 0; i < n; ++i, curr += inc) {
ir_type *param_type = get_method_param_type(method_type, curr);
- be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
map[i] = NULL;
if (arg->on_stack) {
ir_node *mem, int n_res)
{
be_abi_call_t *call = env->call;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ ir_graph *irg = get_Block_irg(bl);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
dbg_info *dbgi;
pmap *reg_map = pmap_create();
ir_node *keep = pmap_get(env->keep_map, bl);
if (keep) {
stack = get_irn_n(keep, 0);
kill_node(keep);
- remove_End_keepalive(get_irg_end(env->birg->irg), keep);
+ remove_End_keepalive(get_irg_end(irg), keep);
}
/* Insert results for Return into the register map. */
for (i = 0; i < n_res; ++i) {
ir_node *res = get_Return_res(irn, i);
- be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
assert(arg->in_reg && "return value must be passed in register");
pmap_insert(reg_map, (void *) arg->reg, res);
}
/* clear SP entry, since it has already been grown. */
pmap_insert(reg_map, (void *) arch_env->sp, NULL);
for (i = 0; i < n_res; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
in[n] = be_abi_reg_map_get(reg_map, arg->reg);
regs[n++] = arg->reg;
}
/* we have to pop the shadow parameter in in case of struct returns */
pop = call->pop;
- ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
+ ret = be_new_Return(dbgi, irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
for (i = 0; i < n; ++i) {
* In the default case we move the entity to the frame type and create
* a backing store into the first block.
*/
-static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
+static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_graph *irg,
+ ent_pos_pair *value_param_list)
{
- be_abi_call_t *call = env->call;
- ir_graph *irg = env->birg->irg;
+ be_abi_call_t *call = env->call;
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
ent_pos_pair *entry, *new_list;
ir_type *frame_tp;
int i, n = ARR_LEN(value_param_list);
new_list = NULL;
for (i = 0; i < n; ++i) {
int pos = value_param_list[i].pos;
- be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
if (arg->in_reg) {
DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
if (new_list != NULL) {
/* ok, change the graph */
ir_node *start_bl = get_irg_start_block(irg);
- ir_node *first_bl = NULL;
- ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
- const ir_edge_t *edge;
+ ir_node *first_bl = get_first_block_succ(start_bl);
+ ir_node *frame, *imem, *nmem, *store, *mem, *args;
optimization_state_t state;
unsigned offset;
- foreach_block_succ(start_bl, edge) {
- first_bl = get_edge_src_irn(edge);
- break;
- }
assert(first_bl && first_bl != start_bl);
/* we had already removed critical edges, so the following
assertion should be always true. */
store = NULL;
mem = imem;
args = get_irg_args(irg);
- args_bl = get_nodes_block(args);
for (entry = new_list; entry != NULL; entry = entry->next) {
int i = entry->pos;
ir_type *tp = get_entity_type(entry->ent);
ir_node *addr;
/* address for the backing store */
- addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
+ addr = be_new_FrameAddr(arch_env->sp->reg_class, first_bl, frame, entry->ent);
if (store)
mem = new_r_Proj(store, mode_M, pn_Store_M);
for (entry = new_list; entry != NULL; entry = entry->next) {
ir_entity *ent = entry->ent;
- /* If the entity is still on the argument type, move it to the frame type.
- This happens if the value_param type was build due to compound
- params. */
+ /* If the entity is still on the argument type, move it to the
+ * frame type.
+ * This happens if the value_param type was build due to compound
+ * params. */
if (get_entity_owner(ent) != frame_tp) {
ir_type *tp = get_entity_type(ent);
unsigned align = get_type_alignment_bytes(tp);
offset += align - 1;
offset &= ~(align - 1);
set_entity_owner(ent, frame_tp);
- add_class_member(frame_tp, ent);
/* must be automatic to set a fixed layout */
set_entity_offset(ent, offset);
offset += get_type_size_bytes(tp);
/**
* Modify the irg itself and the frame type.
*/
-static void modify_irg(be_abi_irg_t *env)
+static void modify_irg(ir_graph *irg)
{
- be_abi_call_t *call = env->call;
- const arch_env_t *arch_env= env->birg->main_env->arch_env;
- const arch_register_t *sp = arch_env->sp;
- ir_graph *irg = env->birg->irg;
+ be_abi_irg_t *env = be_get_irg_abi(irg);
+ be_abi_call_t *call = env->call;
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ const arch_register_t *sp = arch_env->sp;
+ ir_type *method_type = get_entity_type(get_irg_entity(irg));
+ struct obstack *obst = be_get_be_obst(irg);
+ be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
ir_node *end;
ir_node *old_mem;
ir_node *new_mem_proj;
ir_node *mem;
- ir_type *method_type = get_entity_type(get_irg_entity(irg));
- struct obstack *obst = be_get_birg_obst(irg);
int n_params;
int i, n;
}
}
- arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
+ arg_type = compute_arg_type(env, irg, call, method_type, tp, ¶m_map);
/* Convert the Sel nodes in the irg to frame addr nodes: */
ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
ctx.frame = get_irg_frame(irg);
- ctx.sp_class = env->arch_env->sp->reg_class;
- ctx.link_class = env->arch_env->link_class;
+ ctx.sp_class = arch_env->sp->reg_class;
+ ctx.link_class = arch_env->link_class;
ctx.frame_tp = get_irg_frame_type(irg);
/* layout the stackframe now */
* In the default case we move the entity to the frame type and create
* a backing store into the first block.
*/
- fix_address_of_parameter_access(env, ctx.value_param_list);
+ fix_address_of_parameter_access(env, irg, ctx.value_param_list);
DEL_ARR_F(ctx.value_param_list);
irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
}
bet_type = call->cb->get_between_type(env->cb);
- stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
+ stack_frame_init(stack_layout, arg_type, bet_type,
+ get_irg_frame_type(irg), arch_env->stack_dir, param_map);
+ stack_layout->sp_relative = call->flags.bits.try_omit_fp;
/* Count the register params and add them to the number of Projs for the RegParams node */
for (i = 0; i < n_params; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
if (arg->in_reg && args[i]) {
assert(arg->reg != sp && "cannot use stack pointer as parameter register");
assert(i == get_Proj_proj(args[i]));
set_irg_initial_mem(irg, mem);
/* Generate the Prologue */
- fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
+ fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &stack_layout->initial_bias);
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
ir_mode *mode;
nr = MIN(nr, n_params);
- arg = get_call_arg(call, 0, nr);
+ arg = get_call_arg(call, 0, nr, 1);
param_type = get_method_param_type(method_type, nr);
if (arg->in_reg) {
}
/** Fix the state inputs of calls that still hang on unknowns */
-static void fix_call_state_inputs(be_abi_irg_t *env)
+static void fix_call_state_inputs(ir_graph *irg)
{
- const arch_env_t *arch_env = env->arch_env;
+ be_abi_irg_t *env = be_get_irg_abi(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
int i, n, n_states;
arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
/** patches SymConsts to work in position independent code */
static void fix_pic_symconsts(ir_node *node, void *data)
{
- ir_graph *irg;
ir_node *pic_base;
ir_node *add;
ir_node *block;
ir_mode *mode;
ir_node *load;
ir_node *load_res;
- be_abi_irg_t *env = data;
+ ir_graph *irg = get_irn_irg(node);
int arity, i;
- be_main_env_t *be = env->birg->main_env;
+ be_main_env_t *be = be_get_irg_main_env(irg);
+ (void) data;
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
entity = get_SymConst_entity(pred);
block = get_nodes_block(pred);
- irg = get_irn_irg(pred);
/* calls can jump to relative addresses, so we can directly jump to
the (relatively) known call address or the trampoline */
/* everything else is accessed relative to EIP */
mode = get_irn_mode(pred);
- pic_base = arch_code_generator_get_pic_base(env->birg->cg);
+ pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(irg));
/* all ok now for locally constructed stuff */
if (can_address_relative(entity)) {
dbgi = get_irn_dbg_info(pred);
pic_symbol = get_pic_symbol(be, entity);
pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
- pic_symbol, NULL);
+ pic_symbol, NULL);
add = new_r_Add(block, pic_base, pic_symconst, mode);
mark_irn_visited(add);
}
}
-be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
+be_abi_irg_t *be_abi_introduce(ir_graph *irg)
{
- be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
- ir_node *old_frame = get_irg_frame(birg->irg);
- ir_graph *irg = birg->irg;
- struct obstack *obst = be_get_birg_obst(irg);
+ be_abi_irg_t *env = XMALLOCZ(be_abi_irg_t);
+ ir_node *old_frame = get_irg_frame(irg);
+ struct obstack *obst = be_get_be_obst(irg);
+ be_options_t *options = be_get_irg_options(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ir_entity *entity = get_irg_entity(irg);
+ ir_type *method_type = get_entity_type(entity);
pmap_entry *ent;
ir_node *dummy;
unsigned *limited_bitset;
arch_register_req_t *sp_req;
- be_omit_fp = birg->main_env->options->omit_fp;
- be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
+ be_omit_fp = options->omit_fp;
+ be_omit_leaf_fp = options->omit_leaf_fp;
obstack_init(obst);
- env->arch_env = birg->main_env->arch_env;
- env->method_type = get_entity_type(get_irg_entity(irg));
- env->call = be_abi_call_new(env->arch_env->sp->reg_class);
- arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
-
env->ignore_regs = pset_new_ptr_default();
env->keep_map = pmap_create();
env->dce_survivor = new_survive_dce();
- env->birg = birg;
- env->irg = irg;
sp_req = OALLOCZ(obst, arch_register_req_t);
env->sp_req = sp_req;
sp_req->type = arch_register_req_type_limited
| arch_register_req_type_produces_sp;
- sp_req->cls = arch_register_get_class(env->arch_env->sp);
+ sp_req->cls = arch_register_get_class(arch_env->sp);
limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
- rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
+ rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
sp_req->limited = limited_bitset;
- if (env->arch_env->sp->type & arch_register_type_ignore) {
+ if (arch_env->sp->type & arch_register_type_ignore) {
sp_req->type |= arch_register_req_type_ignore;
}
- env->init_sp = dummy = new_r_Dummy(irg, env->arch_env->sp->reg_class->mode);
+ /* break here if backend provides a custom API.
+ * Note: we shouldn't have to setup any be_abi_irg_t* stuff at all,
+ * but need more cleanup to make this work
+ */
+ be_set_irg_abi(irg, env);
+ if (arch_env->custom_abi)
+ return env;
+
+ env->call = be_abi_call_new(arch_env->sp->reg_class);
+ arch_env_get_call_abi(arch_env, method_type, env->call);
- env->calls = NEW_ARR_F(ir_node*, 0);
+ env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
+ env->calls = NEW_ARR_F(ir_node*, 0);
- if (birg->main_env->options->pic) {
+ if (options->pic) {
irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
}
/* Lower all call nodes in the IRG. */
- process_calls(env);
+ process_calls(irg);
/*
Beware: init backend abi call object after processing calls,
otherwise some information might be not yet available.
*/
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+ env->cb = env->call->cb->init(env->call, irg);
/* Process the IRG */
- modify_irg(env);
+ modify_irg(irg);
/* fix call inputs for state registers */
- fix_call_state_inputs(env);
+ fix_call_state_inputs(irg);
/* We don't need the keep map anymore. */
pmap_destroy(env->keep_map);
return env;
}
-void be_abi_free(be_abi_irg_t *env)
+void be_abi_free(ir_graph *irg)
{
- be_abi_call_free(env->call);
+ be_abi_irg_t *env = be_get_irg_abi(irg);
+
+ if (env->call != NULL)
+ be_abi_call_free(env->call);
free_survive_dce(env->dce_survivor);
- del_pset(env->ignore_regs);
- pmap_destroy(env->regs);
+ if (env->ignore_regs != NULL)
+ del_pset(env->ignore_regs);
+ if (env->regs != NULL)
+ pmap_destroy(env->regs);
free(env);
+
+ be_set_irg_abi(irg, NULL);
}
void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
}
}
-/* Returns the stack layout from a abi environment. */
-const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
-{
- return &abi->frame;
-}
-
/*
-
_____ _ ____ _ _
| ___(_)_ __ / ___|| |_ __ _ ___| | __
| |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
if (arch_irn_get_n_outs(insn) == 0)
return;
+ if (get_irn_mode(node) == mode_T)
+ return;
req = arch_get_register_req_out(node);
if (! (req->type & arch_register_req_type_produces_sp))
ARR_APP1(ir_node*, env->sp_nodes, node);
}
-void be_abi_fix_stack_nodes(be_abi_irg_t *env)
+void be_abi_fix_stack_nodes(ir_graph *irg)
{
+ be_abi_irg_t *abi = be_get_irg_abi(irg);
+ be_lv_t *lv = be_get_irg_liveness(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
be_ssa_construction_env_t senv;
int i, len;
ir_node **phis;
- be_irg_t *birg = env->birg;
- be_lv_t *lv = be_get_birg_liveness(birg);
fix_stack_walker_env_t walker_env;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
- irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
+ irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
/* nothing to be done if we didn't find any node, in fact we mustn't
* continue, as for endless loops incsp might have had no users and is bad
return;
}
- be_ssa_construction_init(&senv, birg);
+ be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
ARR_LEN(walker_env.sp_nodes));
be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
len = ARR_LEN(phis);
for (i = 0; i < len; ++i) {
ir_node *phi = phis[i];
- be_set_phi_reg_req(phi, env->sp_req);
- arch_set_irn_register(phi, env->arch_env->sp);
+ be_set_phi_reg_req(phi, abi->sp_req);
+ arch_set_irn_register(phi, arch_env->sp);
}
be_ssa_construction_destroy(&senv);
/**
* Fix all stack accessing operations in the block bl.
*
- * @param env the abi environment
* @param bl the block to process
* @param real_bias the bias value
*
* @return the bias at the end of this block
*/
-static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
+static int process_stack_bias(ir_node *bl, int real_bias)
{
- int omit_fp = env->call->flags.bits.try_omit_fp;
- ir_node *irn;
- int wanted_bias = real_bias;
+ int wanted_bias = real_bias;
+ ir_graph *irg = get_Block_irg(bl);
+ be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
+ bool sp_relative = layout->sp_relative;
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ir_node *irn;
sched_foreach(bl, irn) {
int ofs;
*/
ir_entity *ent = arch_get_frame_entity(irn);
if (ent != NULL) {
- int bias = omit_fp ? real_bias : 0;
- int offset = get_stack_entity_offset(&env->frame, ent, bias);
+ int bias = sp_relative ? real_bias : 0;
+ int offset = get_stack_entity_offset(layout, ent, bias);
arch_set_frame_offset(irn, offset);
DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
ent, offset, bias));
if (be_is_IncSP(irn)) {
/* fill in real stack frame size */
if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
- ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ir_type *frame_type = get_irg_frame_type(irg);
ofs = (int) get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
- ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ir_type *frame_type = get_irg_frame_type(irg);
ofs = - (int)get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else {
if (be_get_IncSP_align(irn)) {
/* patch IncSP to produce an aligned stack pointer */
- ir_type *between_type = env->frame.between_type;
+ ir_type *between_type = layout->between_type;
int between_size = get_type_size_bytes(between_type);
- int alignment = 1 << env->arch_env->stack_alignment;
+ int alignment = 1 << arch_env->stack_alignment;
int delta = (real_bias + ofs + between_size) & (alignment - 1);
assert(ofs >= 0);
if (delta > 0) {
* A helper struct for the bias walker.
*/
struct bias_walk {
- be_abi_irg_t *env; /**< The ABI irg environment. */
int start_block_bias; /**< The bias at the end of the start block. */
int between_size;
ir_node *start_block; /**< The start block of the current graph. */
{
struct bias_walk *bw = data;
if (bl != bw->start_block) {
- process_stack_bias(bw->env, bl, bw->start_block_bias);
+ process_stack_bias(bl, bw->start_block_bias);
}
}
*/
static void lower_outer_frame_sels(ir_node *sel, void *ctx)
{
- be_abi_irg_t *env = ctx;
- ir_node *ptr;
- ir_entity *ent;
- ir_type *owner;
+ ir_node *ptr;
+ ir_entity *ent;
+ ir_type *owner;
+ be_stack_layout_t *layout;
+ ir_graph *irg;
+ (void) ctx;
if (! is_Sel(sel))
return;
- ent = get_Sel_entity(sel);
- owner = get_entity_owner(ent);
- ptr = get_Sel_ptr(sel);
+ ent = get_Sel_entity(sel);
+ owner = get_entity_owner(ent);
+ ptr = get_Sel_ptr(sel);
+ irg = get_irn_irg(sel);
+ layout = be_get_irg_stack_layout(irg);
- if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
+ if (owner == layout->frame_type || owner == layout->arg_type) {
/* found access to outer frame or arguments */
- int offset = get_stack_entity_offset(&env->frame, ent, 0);
+ int offset = get_stack_entity_offset(layout, ent, 0);
if (offset != 0) {
ir_node *bl = get_nodes_block(sel);
}
}
-void be_abi_fix_stack_bias(be_abi_irg_t *env)
+void be_abi_fix_stack_bias(ir_graph *irg)
{
- ir_graph *irg = env->birg->irg;
+ be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
ir_type *frame_tp;
int i;
struct bias_walk bw;
- stack_frame_compute_initial_offset(&env->frame);
- // stack_layout_dump(stdout, frame);
+ stack_frame_compute_initial_offset(stack_layout);
+ // stack_layout_dump(stdout, stack_layout);
/* Determine the stack bias at the end of the start block. */
- bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
- bw.between_size = get_type_size_bytes(env->frame.between_type);
+ bw.start_block_bias = process_stack_bias(get_irg_start_block(irg),
+ stack_layout->initial_bias);
+ bw.between_size = get_type_size_bytes(stack_layout->between_type);
/* fix the bias is all other blocks */
- bw.env = env;
bw.start_block = get_irg_start_block(irg);
irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
ir_graph *irg = get_entity_irg(ent);
if (irg != NULL) {
- irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
+ irg_walk_graph(irg, NULL, lower_outer_frame_sels, NULL);
}
}
}
return pmap_get(abi->regs, (void *) reg);
}
-/**
- * Returns non-zero if the ABI has omitted the frame pointer in
- * the current graph.
- */
-int be_abi_omit_fp(const be_abi_irg_t *abi)
-{
- return abi->call->flags.bits.try_omit_fp;
-}
-
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
void be_init_abi(void)
{
FIRM_DBG_REGISTER(dbg, "firm.be.abi");
}
-
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);