#include "config.h"
#include "obst.h"
-#include "offset.h"
#include "irgopt.h"
#include "irprintf_t.h"
#include "irgopt.h"
#include "irbitset.h"
+#include "iropt_t.h"
#include "height.h"
#include "pdeq.h"
#include "irtools.h"
unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
+ unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
int pos;
const arch_register_t *reg;
};
/**
- * The ABI information for the current birg.
+ * The ABI information for the current graph.
*/
struct _be_abi_irg_t {
- be_irg_t *birg; /**< The back end IRG. */
ir_graph *irg;
const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
{
const be_abi_call_arg_t *p = a, *q = b;
(void) n;
- return !(p->is_res == q->is_res && p->pos == q->pos);
+ return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
}
/**
* @param call the abi call
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
+ * @param callee context type - if we are callee or caller
*/
-static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
+static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
{
be_abi_call_arg_t arg;
unsigned hash;
memset(&arg, 0, sizeof(arg));
arg.is_res = is_res;
arg.pos = pos;
+ arg.callee = callee;
hash = is_res * 128 + pos;
/**
* Set an ABI call object argument.
- *
- * @param call the abi call
- * @param is_res true for call results, false for call arguments
- * @param pos position of the argument
*/
-static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
+static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
{
- be_abi_call_arg_t arg;
- unsigned hash;
-
- memset(&arg, 0, sizeof(arg));
- arg.is_res = is_res;
- arg.pos = pos;
-
- hash = is_res * 128 + pos;
-
- return set_insert(call->params, &arg, sizeof(arg), hash);
+ unsigned hash = arg->is_res * 128 + arg->pos;
+ if (context & ABI_CONTEXT_CALLEE) {
+ arg->callee = 1;
+ set_insert(call->params, arg, sizeof(*arg), hash);
+ }
+ if (context & ABI_CONTEXT_CALLER) {
+ arg->callee = 0;
+ set_insert(call->params, arg, sizeof(*arg), hash);
+ }
}
/* Set the flags for a call. */
}
-void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
+void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
+ ir_mode *load_mode, unsigned alignment,
+ unsigned space_before, unsigned space_after,
+ be_abi_context_t context)
{
- be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
- arg->on_stack = 1;
- arg->load_mode = load_mode;
- arg->alignment = alignment;
- arg->space_before = space_before;
- arg->space_after = space_after;
+ be_abi_call_arg_t arg;
+ memset(&arg, 0, sizeof(arg));
assert(alignment > 0 && "Alignment must be greater than 0");
+ arg.on_stack = 1;
+ arg.load_mode = load_mode;
+ arg.alignment = alignment;
+ arg.space_before = space_before;
+ arg.space_after = space_after;
+ arg.is_res = 0;
+ arg.pos = arg_pos;
+
+ remember_call_arg(&arg, call, context);
}
-void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
+void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
{
- be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
- arg->in_reg = 1;
- arg->reg = reg;
+ be_abi_call_arg_t arg;
+ memset(&arg, 0, sizeof(arg));
+
+ arg.in_reg = 1;
+ arg.reg = reg;
+ arg.is_res = 0;
+ arg.pos = arg_pos;
+
+ remember_call_arg(&arg, call, context);
}
-void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
+void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
{
- be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
- arg->in_reg = 1;
- arg->reg = reg;
+ be_abi_call_arg_t arg;
+ memset(&arg, 0, sizeof(arg));
+
+ arg.in_reg = 1;
+ arg.reg = reg;
+ arg.is_res = 1;
+ arg.pos = arg_pos;
+
+ remember_call_arg(&arg, call, context);
}
/* Get the flags of a ABI call object. */
return frame;
}
-/**
- * Returns non-zero if the call argument at given position
- * is transfered on the stack.
- */
-static inline int is_on_stack(be_abi_call_t *call, int pos)
-{
- be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
- return arg && !arg->in_reg;
-}
-
/*
____ _ _
/ ___|__ _| | |___
*/
static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
{
- ir_graph *irg = env->birg->irg;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ ir_graph *irg = env->irg;
+ const arch_env_t *arch_env = env->arch_env;
ir_type *call_tp = get_Call_type(irn);
ir_node *call_ptr = get_Call_ptr(irn);
int n_params = get_method_n_params(call_tp);
const arch_register_t *sp = arch_env->sp;
be_abi_call_t *call = be_abi_call_new(sp->reg_class);
ir_mode *mach_mode = sp->reg_class->mode;
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
int no_alloc = call->flags.bits.frame_is_setup_on_call;
int n_res = get_method_n_ress(call_tp);
int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
assert(obstack_object_size(obst) == 0);
stack_param_idx = ALLOCAN(int, n_params);
for (i = 0; i < n_params; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
assert(arg);
if (arg->on_stack) {
int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
/* Collect all arguments which are passed in registers. */
reg_param_idxs = ALLOCAN(int, n_params);
for (i = 0; i < n_params; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
if (arg && arg->in_reg) {
reg_param_idxs[n_reg_params++] = i;
}
for (i = 0; i < n_stack_params; ++i) {
int p = stack_param_idx[i];
- be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
ir_node *param = get_Call_param(irn, p);
ir_node *addr = curr_sp;
ir_node *mem = NULL;
ir_node *store;
ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
- mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
+ mem = new_r_Proj(store, mode_M, pn_Store_M);
} else {
/* Make a mem copy for compound arguments. */
ir_node *copy;
assert(mode_is_reference(get_irn_mode(param)));
copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
- mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
+ mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular);
}
curr_ofs += param_size;
ARR_APP1(ir_node *, env->calls, low_call);
/* create new stack pointer */
- curr_sp = new_r_Proj(bl, low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
+ curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
arch_register_req_type_ignore | arch_register_req_type_produces_sp);
arch_set_irn_register(curr_sp, sp);
for (i = 0; i < n_res; ++i) {
int pn;
ir_node *proj = res_projs[i];
- be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
/* returns values on stack not supported yet */
assert(arg->in_reg);
if (proj == NULL) {
ir_type *res_type = get_method_res_type(call_tp, i);
ir_mode *mode = get_type_mode(res_type);
- proj = new_r_Proj(bl, low_call, mode, pn);
+ proj = new_r_Proj(low_call, mode, pn);
res_projs[i] = proj;
} else {
set_Proj_pred(proj, low_call);
/* Set the register classes and constraints of the Call parameters. */
for (i = 0; i < n_reg_params; ++i) {
int index = reg_param_idxs[i];
- be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
assert(arg->reg != NULL);
be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
/* Set the register constraints of the results. */
for (i = 0; i < n_res; ++i) {
ir_node *proj = res_projs[i];
- const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
int pn = get_Proj_proj(proj);
assert(arg->in_reg);
in[n++] = curr_sp;
foreach_pset_new(&destroyed_regs, reg, iter) {
- ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj);
+ ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
}
if (! mem_proj) {
- mem_proj = new_r_Proj(bl, low_call, mode_M, pn_be_Call_M_regular);
+ mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
keep_alive(mem_proj);
}
}
dbg_info *dbg;
const ir_edge_t *edge;
- ir_node *new_alloc, *size, *addr, *ins[2];
+ ir_node *new_alloc;
+ ir_node *count;
+ ir_node *size;
+ ir_node *ins[2];
unsigned stack_alignment;
assert(get_Alloc_where(alloc) == stack_alloc);
return curr_sp;
}
- dbg = get_irn_dbg_info(alloc);
- size = get_Alloc_size(alloc);
+ dbg = get_irn_dbg_info(alloc);
+ count = get_Alloc_count(alloc);
- /* we might need to multiply the size with the element size */
+ /* we might need to multiply the count with the element size */
if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
- ir_mode *mode = get_irn_mode(size);
+ ir_mode *mode = get_irn_mode(count);
tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
mode);
ir_node *cnst = new_rd_Const(dbg, irg, tv);
- size = new_rd_Mul(dbg, block, size, cnst, mode);
+ size = new_rd_Mul(dbg, block, count, cnst, mode);
+ } else {
+ size = count;
}
/* The stack pointer will be modified in an unknown manner.
ir_node *addsp_mem;
ir_node *sync;
- addsp_mem = new_r_Proj(block, new_alloc, mode_M, pn_be_AddSP_M);
+ addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
/* We need to sync the output mem of the AddSP with the input mem
edge into the alloc node. */
/* fix projnum of alloca res */
set_Proj_proj(alloc_res, pn_be_AddSP_res);
- addr = alloc_res;
- curr_sp = new_r_Proj(block, new_alloc, get_irn_mode(curr_sp),
- pn_be_AddSP_sp);
+ curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
return curr_sp;
}
subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
set_irn_dbg_info(subsp, dbg);
- mem = new_r_Proj(block, subsp, mode_M, pn_be_SubSP_M);
- res = new_r_Proj(block, subsp, sp_mode, pn_be_SubSP_sp);
+ mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
+ res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
/* we need to sync the memory */
in[0] = get_Free_mem(free);
break;
default:
panic("invalid call");
- break;
}
}
*/
static void process_calls(be_abi_irg_t *env)
{
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
env->call->flags.bits.irg_is_leaf = 1;
irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
- ir_heights = heights_new(env->birg->irg);
+ ir_heights = heights_new(env->irg);
irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
heights_free(ir_heights);
}
ir_entity ***param_map)
{
int dir = env->call->flags.bits.left_to_right ? 1 : -1;
- int inc = env->birg->main_env->arch_env->stack_dir * dir;
+ int inc = env->arch_env->stack_dir * dir;
int n = get_method_n_params(method_type);
int curr = inc > 0 ? 0 : n - 1;
- struct obstack *obst = be_get_birg_obst(env->irg);
+ struct obstack *obst = be_get_be_obst(env->irg);
int ofs = 0;
char buf[128];
ir_type *res;
int i;
- ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
+ ident *id = get_entity_ident(get_irg_entity(env->irg));
ir_entity **map;
*param_map = map = OALLOCN(obst, ir_entity*, n);
res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
for (i = 0; i < n; ++i, curr += inc) {
ir_type *param_type = get_method_param_type(method_type, curr);
- be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
map[i] = NULL;
if (arg->on_stack) {
arg->stack_ent = copy_entity_own(val_ent, res);
set_entity_link(val_ent, arg->stack_ent);
set_entity_link(arg->stack_ent, NULL);
- /* must be automatic to set a fixed layout */
- set_entity_allocation(arg->stack_ent, allocation_automatic);
} else {
/* create a new entity */
snprintf(buf, sizeof(buf), "param_%d", i);
add_type |= arch_register_req_type_produces_sp;
}
- proj = new_r_Proj(bl, irn, get_irn_mode(pred), n);
+ proj = new_r_Proj(irn, get_irn_mode(pred), n);
be_node_set_reg_class_in(irn, n, reg->reg_class);
if (in_req)
be_set_constr_single_reg_in(irn, n, reg, 0);
}
if (mem) {
- *mem = new_r_Proj(bl, irn, mode_M, n);
+ *mem = new_r_Proj(irn, mode_M, n);
}
return irn;
ir_node *mem, int n_res)
{
be_abi_call_t *call = env->call;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ const arch_env_t *arch_env = env->arch_env;
dbg_info *dbgi;
pmap *reg_map = pmap_create();
ir_node *keep = pmap_get(env->keep_map, bl);
if (keep) {
stack = get_irn_n(keep, 0);
kill_node(keep);
- remove_End_keepalive(get_irg_end(env->birg->irg), keep);
+ remove_End_keepalive(get_irg_end(env->irg), keep);
}
/* Insert results for Return into the register map. */
for (i = 0; i < n_res; ++i) {
ir_node *res = get_Return_res(irn, i);
- be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
assert(arg->in_reg && "return value must be passed in register");
pmap_insert(reg_map, (void *) arg->reg, res);
}
/* clear SP entry, since it has already been grown. */
pmap_insert(reg_map, (void *) arch_env->sp, NULL);
for (i = 0; i < n_res; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
in[n] = be_abi_reg_map_get(reg_map, arg->reg);
regs[n++] = arg->reg;
}
/* we have to pop the shadow parameter in in case of struct returns */
pop = call->pop;
- ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
+ ret = be_new_Return(dbgi, env->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
for (i = 0; i < n; ++i) {
argument_ent = copy_entity_own(ent, frame_tp);
/* must be automatic to set a fixed layout */
- set_entity_allocation(argument_ent, allocation_automatic);
set_entity_offset(argument_ent, offset);
offset += get_type_size_bytes(tp);
static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
{
be_abi_call_t *call = env->call;
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
ent_pos_pair *entry, *new_list;
ir_type *frame_tp;
int i, n = ARR_LEN(value_param_list);
new_list = NULL;
for (i = 0; i < n; ++i) {
int pos = value_param_list[i].pos;
- be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
if (arg->in_reg) {
DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
if (new_list != NULL) {
/* ok, change the graph */
ir_node *start_bl = get_irg_start_block(irg);
- ir_node *first_bl = NULL;
- ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
- const ir_edge_t *edge;
+ ir_node *first_bl = get_first_block_succ(start_bl);
+ ir_node *frame, *imem, *nmem, *store, *mem, *args;
optimization_state_t state;
unsigned offset;
- foreach_block_succ(start_bl, edge) {
- first_bl = get_edge_src_irn(edge);
- break;
- }
assert(first_bl && first_bl != start_bl);
/* we had already removed critical edges, so the following
assertion should be always true. */
save_optimization_state(&state);
set_optimize(0);
- nmem = new_r_Proj(start_bl, get_irg_start(irg), mode_M, pn_Start_M);
+ nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
restore_optimization_state(&state);
/* reroute all edges to the new memory source */
store = NULL;
mem = imem;
args = get_irg_args(irg);
- args_bl = get_nodes_block(args);
for (entry = new_list; entry != NULL; entry = entry->next) {
int i = entry->pos;
ir_type *tp = get_entity_type(entry->ent);
addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
if (store)
- mem = new_r_Proj(first_bl, store, mode_M, pn_Store_M);
+ mem = new_r_Proj(store, mode_M, pn_Store_M);
/* the backing store itself */
store = new_r_Store(first_bl, mem, addr,
- new_r_Proj(args_bl, args, mode, i), 0);
+ new_r_Proj(args, mode, i), 0);
}
/* the new memory Proj gets the last Proj from store */
set_Proj_pred(nmem, store);
for (entry = new_list; entry != NULL; entry = entry->next) {
ir_entity *ent = entry->ent;
- /* If the entity is still on the argument type, move it to the frame type.
- This happens if the value_param type was build due to compound
- params. */
+ /* If the entity is still on the argument type, move it to the
+ * frame type.
+ * This happens if the value_param type was build due to compound
+ * params. */
if (get_entity_owner(ent) != frame_tp) {
ir_type *tp = get_entity_type(ent);
unsigned align = get_type_alignment_bytes(tp);
offset += align - 1;
offset &= ~(align - 1);
set_entity_owner(ent, frame_tp);
- add_class_member(frame_tp, ent);
/* must be automatic to set a fixed layout */
- set_entity_allocation(ent, allocation_automatic);
set_entity_offset(ent, offset);
offset += get_type_size_bytes(tp);
}
/**
* Update the entity of Sels to the outer value parameters.
*/
-static void update_outer_frame_sels(ir_node *irn, void *env) {
+static void update_outer_frame_sels(ir_node *irn, void *env)
+{
lower_frame_sels_env_t *ctx = env;
ir_node *ptr;
ir_entity *ent;
if (! is_method_entity(ent))
continue;
- if (get_entity_peculiarity(ent) == peculiarity_description)
+
+ irg = get_entity_irg(ent);
+ if (irg == NULL)
continue;
/*
*/
ctx->static_link_pos = 0;
- irg = get_entity_irg(ent);
irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
}
}
static void modify_irg(be_abi_irg_t *env)
{
be_abi_call_t *call = env->call;
- const arch_env_t *arch_env= env->birg->main_env->arch_env;
+ const arch_env_t *arch_env= env->arch_env;
const arch_register_t *sp = arch_env->sp;
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
ir_node *end;
ir_node *old_mem;
ir_node *new_mem_proj;
ir_node *mem;
ir_type *method_type = get_entity_type(get_irg_entity(irg));
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
int n_params;
int i, n;
/* Count the register params and add them to the number of Projs for the RegParams node */
for (i = 0; i < n_params; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
if (arg->in_reg && args[i]) {
assert(arg->reg != sp && "cannot use stack pointer as parameter register");
assert(i == get_Proj_proj(args[i]));
add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
assert(nr >= 0);
- proj = new_r_Proj(start_bl, env->start, mode, nr + 1);
+ proj = new_r_Proj(env->start, mode, nr + 1);
pmap_insert(env->regs, (void *) reg, proj);
be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
arch_set_irn_register(proj, reg);
/* create a new initial memory proj */
assert(is_Proj(old_mem));
arch_set_out_register_req(env->start, 0, arch_no_register_req);
- new_mem_proj = new_r_Proj(start_bl, env->start, mode_M, 0);
+ new_mem_proj = new_r_Proj(env->start, mode_M, 0);
mem = new_mem_proj;
set_irg_initial_mem(irg, mem);
ir_mode *mode;
nr = MIN(nr, n_params);
- arg = get_call_arg(call, 0, nr);
+ arg = get_call_arg(call, 0, nr, 1);
param_type = get_method_param_type(method_type, nr);
if (arg->in_reg) {
ir_mode *load_mode = arg->load_mode;
ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
- repl = new_r_Proj(start_bl, load, load_mode, pn_Load_res);
+ repl = new_r_Proj(load, load_mode, pn_Load_res);
if (mode != load_mode) {
repl = new_r_Conv(start_bl, repl, mode);
}
/** Fix the state inputs of calls that still hang on unknowns */
-static
-void fix_call_state_inputs(be_abi_irg_t *env)
+static void fix_call_state_inputs(be_abi_irg_t *env)
{
const arch_env_t *arch_env = env->arch_env;
int i, n, n_states;
{
ir_type *type = get_entity_type(method);
ident *old_id = get_entity_ld_ident(method);
- ident *id = id_mangle3("L", old_id, "$stub");
+ ident *id = id_mangle3("", old_id, "$stub");
ir_type *parent = be->pic_trampolines_type;
ir_entity *ent = new_entity(parent, old_id, type);
set_entity_ld_ident(ent, id);
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_uninitialized);
+ set_entity_visibility(ent, ir_visibility_private);
return ent;
}
static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
{
ident *old_id = get_entity_ld_ident(entity);
- ident *id = id_mangle3("L", old_id, "$non_lazy_ptr");
+ ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
ir_type *e_type = get_entity_type(entity);
ir_type *type = new_type_pointer(e_type);
ir_type *parent = be->pic_symbols_type;
ir_entity *ent = new_entity(parent, old_id, type);
set_entity_ld_ident(ent, id);
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_uninitialized);
+ set_entity_visibility(ent, ir_visibility_private);
return ent;
}
*/
static int can_address_relative(ir_entity *entity)
{
- return get_entity_visibility(entity) != visibility_external_allocated;
+ return get_entity_visibility(entity) != ir_visibility_external
+ && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
}
/** patches SymConsts to work in position independent code */
ir_node *pic_base;
ir_node *add;
ir_node *block;
- ir_node *unknown;
ir_mode *mode;
ir_node *load;
ir_node *load_res;
be_abi_irg_t *env = data;
int arity, i;
- be_main_env_t *be = env->birg->main_env;
+ be_main_env_t *be = be_birg_from_irg(env->irg)->main_env;
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
/* everything else is accessed relative to EIP */
mode = get_irn_mode(pred);
- unknown = new_r_Unknown(irg, mode);
- pic_base = arch_code_generator_get_pic_base(env->birg->cg);
+ pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(env->irg));
/* all ok now for locally constructed stuff */
if (can_address_relative(entity)) {
module. The loads are always safe and can therefore float
and need no memory input */
load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
- load_res = new_r_Proj(block, load, mode, pn_Load_res);
+ load_res = new_r_Proj(load, mode, pn_Load_res);
set_irn_n(node, i, load_res);
}
}
-be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
+be_abi_irg_t *be_abi_introduce(ir_graph *irg)
{
- be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
- ir_node *old_frame = get_irg_frame(birg->irg);
- ir_graph *irg = birg->irg;
- struct obstack *obst = be_get_birg_obst(irg);
+ be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
+ ir_node *old_frame = get_irg_frame(irg);
+ struct obstack *obst = be_get_be_obst(irg);
+ be_options_t *options = be_get_irg_options(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
pmap_entry *ent;
ir_node *dummy;
- optimization_state_t state;
unsigned *limited_bitset;
arch_register_req_t *sp_req;
- be_omit_fp = birg->main_env->options->omit_fp;
- be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
+ be_omit_fp = options->omit_fp;
+ be_omit_leaf_fp = options->omit_leaf_fp;
obstack_init(obst);
- env->arch_env = birg->main_env->arch_env;
+ env->arch_env = arch_env;
env->method_type = get_entity_type(get_irg_entity(irg));
- env->call = be_abi_call_new(env->arch_env->sp->reg_class);
- arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
+ env->call = be_abi_call_new(arch_env->sp->reg_class);
+ arch_env_get_call_abi(arch_env, env->method_type, env->call);
env->ignore_regs = pset_new_ptr_default();
env->keep_map = pmap_create();
env->dce_survivor = new_survive_dce();
- env->birg = birg;
env->irg = irg;
sp_req = OALLOCZ(obst, arch_register_req_t);
sp_req->type = arch_register_req_type_limited
| arch_register_req_type_produces_sp;
- sp_req->cls = arch_register_get_class(env->arch_env->sp);
+ sp_req->cls = arch_register_get_class(arch_env->sp);
limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
- rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
+ rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
sp_req->limited = limited_bitset;
- if (env->arch_env->sp->type & arch_register_type_ignore) {
+ if (arch_env->sp->type & arch_register_type_ignore) {
sp_req->type |= arch_register_req_type_ignore;
}
- /* Beware: later we replace this node by the real one, ensure it is not CSE'd
- to another Unknown or the stack pointer gets used */
- save_optimization_state(&state);
- set_optimize(0);
- env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
- restore_optimization_state(&state);
+ env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
env->calls = NEW_ARR_F(ir_node*, 0);
- if (birg->main_env->options->pic) {
+ if (options->pic) {
irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
}
Beware: init backend abi call object after processing calls,
otherwise some information might be not yet available.
*/
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+ env->cb = env->call->cb->init(env->call, arch_env, irg);
/* Process the IRG */
modify_irg(env);
if (arch_irn_get_n_outs(insn) == 0)
return;
+ if (get_irn_mode(node) == mode_T)
+ return;
req = arch_get_register_req_out(node);
if (! (req->type & arch_register_req_type_produces_sp))
be_ssa_construction_env_t senv;
int i, len;
ir_node **phis;
- be_irg_t *birg = env->birg;
- be_lv_t *lv = be_get_birg_liveness(birg);
+ ir_graph *irg = env->irg;
+ be_lv_t *lv = be_get_irg_liveness(irg);
fix_stack_walker_env_t walker_env;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
- irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
+ irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
/* nothing to be done if we didn't find any node, in fact we mustn't
* continue, as for endless loops incsp might have had no users and is bad
return;
}
- be_ssa_construction_init(&senv, birg);
+ be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
ARR_LEN(walker_env.sp_nodes));
be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
if (be_is_IncSP(irn)) {
/* fill in real stack frame size */
if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
- ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ir_type *frame_type = get_irg_frame_type(env->irg);
ofs = (int) get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
- ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ir_type *frame_type = get_irg_frame_type(env->irg);
ofs = - (int)get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else {
* Walker: finally lower all Sels of outer frame or parameter
* entities.
*/
-static void lower_outer_frame_sels(ir_node *sel, void *ctx) {
+static void lower_outer_frame_sels(ir_node *sel, void *ctx)
+{
be_abi_irg_t *env = ctx;
ir_node *ptr;
ir_entity *ent;
void be_abi_fix_stack_bias(be_abi_irg_t *env)
{
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
ir_type *frame_tp;
int i;
struct bias_walk bw;
frame_tp = get_irg_frame_type(irg);
for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
ir_entity *ent = get_class_member(frame_tp, i);
+ ir_graph *irg = get_entity_irg(ent);
- if (is_method_entity(ent) && get_entity_peculiarity(ent) != peculiarity_description) {
- ir_graph *irg = get_entity_irg(ent);
-
+ if (irg != NULL) {
irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
}
}
return abi->call->flags.bits.try_omit_fp;
}
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
void be_init_abi(void)
{
FIRM_DBG_REGISTER(dbg, "firm.be.abi");
}
-
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);