#include "besched.h"
#include "beirg.h"
#include "bessaconstr.h"
+#include "bemodule.h"
+
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
typedef struct _be_abi_call_arg_t {
unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
* The ABI information for the current birg.
*/
struct _be_abi_irg_t {
- struct obstack obst;
be_irg_t *birg; /**< The back end IRG. */
+ ir_graph *irg;
const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
ir_node *init_sp; /**< The node representing the stack pointer
at the start of the function. */
- ir_node *reg_params; /**< The reg params node. */
+ ir_node *start; /**< The be_Start params node. */
pmap *regs; /**< A map of all callee-save and ignore regs to
their Projs to the RegParams node. */
arch_register_req_t *sp_req;
be_stack_layout_t frame; /**< The stack frame model. */
-
- DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
};
static heights_t *ir_heights;
ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
ir_entity *ent = search_ent_with_offset(base, 0);
- frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
+ if (ent == NULL) {
+ frame->initial_offset
+ = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
+ } else {
+ frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
+ }
return frame->initial_offset;
}
return frame;
}
-#if 0
-/** Dumps the stack layout to file. */
-static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
-{
- int i, j, n;
-
- ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
- for (j = 0; j < N_FRAME_TYPES; ++j) {
- ir_type *t = frame->order[j];
-
- ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
- for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
- ir_entity *ent = get_compound_member(t, i);
- ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
- }
- }
-}
-#endif
-
/**
* Returns non-zero if the call argument at given position
* is transfered on the stack.
const arch_register_t *sp = arch_env->sp;
be_abi_call_t *call = be_abi_call_new(sp->reg_class);
ir_mode *mach_mode = sp->reg_class->mode;
- struct obstack *obst = &env->obst;
+ struct obstack *obst = be_get_birg_obst(irg);
int no_alloc = call->flags.bits.frame_is_setup_on_call;
int n_res = get_method_n_ress(call_tp);
int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
/* Insert code to put the stack arguments on the stack. */
assert(get_Call_n_params(irn) == n_params);
+ assert(obstack_object_size(obst) == 0);
+ stack_param_idx = ALLOCAN(int, n_params);
for (i = 0; i < n_params; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
assert(arg);
stack_size += round_up2(arg->space_before, arg->alignment);
stack_size += round_up2(arg_size, arg->alignment);
stack_size += round_up2(arg->space_after, arg->alignment);
- obstack_int_grow(obst, i);
- ++n_stack_params;
+
+ stack_param_idx[n_stack_params++] = i;
}
}
- stack_param_idx = obstack_finish(obst);
/* Collect all arguments which are passed in registers. */
+ reg_param_idxs = ALLOCAN(int, n_params);
for (i = 0; i < n_params; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
if (arg && arg->in_reg) {
- obstack_int_grow(obst, i);
- ++n_reg_params;
+ reg_param_idxs[n_reg_params++] = i;
}
}
- reg_param_idxs = obstack_finish(obst);
/*
* If the stack is decreasing and we do not want to store sequentially,
dbgi = get_irn_dbg_info(irn);
/* If there are some parameters which shall be passed on the stack. */
if (n_stack_params > 0) {
- int curr_ofs = 0;
+ int curr_ofs = 0;
+ ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
+ unsigned n_in = 0;
/*
* Reverse list of stack parameters if call arguments are from left to right.
curr_mem = get_Call_mem(irn);
if (! do_seq) {
- obstack_ptr_grow(obst, curr_mem);
+ in[n_in++] = curr_mem;
}
for (i = 0; i < n_stack_params; ++i) {
*/
if (do_seq) {
curr_ofs = 0;
- addr = curr_sp = be_new_IncSP(sp, bl, curr_sp, param_size + arg->space_before, 0);
+ addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
+ param_size + arg->space_before, 0);
add_irn_dep(curr_sp, curr_mem);
- }
- else {
+ } else {
curr_ofs += arg->space_before;
curr_ofs = round_up2(curr_ofs, arg->alignment);
ir_node *store;
ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
- mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
- }
-
- /* Make a mem copy for compound arguments. */
- else {
+ mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
+ } else {
+ /* Make a mem copy for compound arguments. */
ir_node *copy;
assert(mode_is_reference(get_irn_mode(param)));
if (do_seq)
curr_mem = mem;
else
- obstack_ptr_grow(obst, mem);
+ in[n_in++] = mem;
}
- in = (ir_node **) obstack_finish(obst);
-
/* We need the sync only, if we didn't build the stores sequentially. */
if (! do_seq) {
if (n_stack_params >= 1) {
- curr_mem = new_r_Sync(bl, n_stack_params + 1, in);
+ curr_mem = new_r_Sync(bl, n_in, in);
} else {
curr_mem = get_Call_mem(irn);
}
}
- obstack_free(obst, in);
}
/* check for the return_twice property */
*/
n_reg_results = n_res;
+ assert(obstack_object_size(obst) == 0);
+ n_ins = 0;
+ in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
+
/* make the back end call node and set its register requirements. */
for (i = 0; i < n_reg_params; ++i) {
- obstack_ptr_grow(obst, get_Call_param(irn, reg_param_idxs[i]));
+ in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
}
/* add state registers ins */
ir_fprintf(stderr, "Adding %+F\n", regnode);
#endif
ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
- obstack_ptr_grow(obst, regnode);
+ in[n_ins++] = regnode;
}
- n_ins = n_reg_params + pset_new_size(&states);
-
- in = obstack_finish(obst);
+ assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
/* ins collected, build the call */
if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
*/
be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
- DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
+ DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
/* Set the register classes and constraints of the Call parameters. */
for (i = 0; i < n_reg_params; ++i) {
be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
arch_set_irn_register(proj, arg->reg);
}
- obstack_free(obst, in);
exchange(irn, low_call);
/* kill the ProjT node */
int n = 0;
int curr_res_proj = pn_be_Call_first_res + n_reg_results;
pset_new_iterator_t iter;
+ int n_ins;
+
+ n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
+ in = ALLOCAN(ir_node *, n_ins);
/* also keep the stack pointer */
- ++n;
set_irn_link(curr_sp, (void*) sp);
- obstack_ptr_grow(obst, curr_sp);
+ in[n++] = curr_sp;
foreach_pset_new(&destroyed_regs, reg, iter) {
ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj);
arch_set_irn_register(proj, reg);
set_irn_link(proj, (void*) reg);
- obstack_ptr_grow(obst, proj);
+ in[n++] = proj;
++curr_res_proj;
- ++n;
}
for (i = 0; i < n_reg_results; ++i) {
ir_node *proj = res_projs[i];
const arch_register_t *reg = arch_get_irn_register(proj);
set_irn_link(proj, (void*) reg);
- obstack_ptr_grow(obst, proj);
+ in[n++] = proj;
}
- n += n_reg_results;
+ assert(n <= n_ins);
/* create the Keep for the caller save registers */
- in = (ir_node **) obstack_finish(obst);
- keep = be_new_Keep(NULL, bl, n, in);
+ keep = be_new_Keep(bl, n, in);
for (i = 0; i < n; ++i) {
const arch_register_t *reg = get_irn_link(in[i]);
be_node_set_reg_class_in(keep, i, reg->reg_class);
}
- obstack_free(obst, in);
}
/* Clean up the stack. */
}
be_abi_call_free(call);
- obstack_free(obst, stack_param_idx);
pset_new_destroy(&states);
pset_new_destroy(&destroyed_regs);
pn_be_AddSP_sp);
return curr_sp;
-} /* adjust_alloc */
+}
/**
* Adjust a Free.
curr_sp = res;
return curr_sp;
-} /* adjust_free */
-
-/* the following function is replaced by the usage of the heights module */
-#if 0
-/**
- * Walker for dependent_on().
- * This function searches a node tgt recursively from a given node
- * but is restricted to the given block.
- * @return 1 if tgt was reachable from curr, 0 if not.
- */
-static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
-{
- int n, i;
-
- if (get_nodes_block(curr) != bl)
- return 0;
-
- if (curr == tgt)
- return 1;
-
- /* Phi functions stop the recursion inside a basic block */
- if (! is_Phi(curr)) {
- for (i = 0, n = get_irn_arity(curr); i < n; ++i) {
- if (check_dependence(get_irn_n(curr, i), tgt, bl))
- return 1;
- }
- }
-
- return 0;
}
-#endif /* if 0 */
/**
* Check if a node is somehow data dependent on another one.
*/
static void process_ops_in_block(ir_node *bl, void *data)
{
- be_abi_irg_t *env = data;
- ir_node *curr_sp = env->init_sp;
- ir_node *irn;
- int n;
+ be_abi_irg_t *env = data;
+ ir_node *curr_sp = env->init_sp;
+ ir_node *irn;
+ ir_node **nodes;
+ int n;
+ int n_nodes;
+
+ n_nodes = 0;
+ for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
+ ++n_nodes;
+ }
- for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
- obstack_ptr_grow(&env->obst, irn);
+ nodes = ALLOCAN(ir_node*, n_nodes);
+ for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
+ nodes[n] = irn;
+ }
/* If there were call nodes in the block. */
if (n > 0) {
ir_node *keep;
- ir_node **nodes;
int i;
- nodes = obstack_finish(&env->obst);
-
/* order the call nodes according to data dependency */
- qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
+ qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
- for (i = n - 1; i >= 0; --i) {
+ for (i = n_nodes - 1; i >= 0; --i) {
ir_node *irn = nodes[i];
- DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
+ DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
switch (get_irn_opcode(irn)) {
case iro_Call:
if (! be_omit_fp) {
}
}
- obstack_free(&env->obst, nodes);
-
/* Keep the last stack state in the block by tying it to Keep node,
* the proj from calls is already kept */
if (curr_sp != env->init_sp &&
!(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
nodes[0] = curr_sp;
- keep = be_new_Keep(env->arch_env->sp->reg_class, bl, 1, nodes);
+ keep = be_new_Keep(bl, 1, nodes);
pmap_insert(env->keep_map, bl, keep);
}
}
set_irn_link(bl, curr_sp);
-} /* process_ops_in_block */
+}
/**
* Adjust all call nodes in the graph to the ABI conventions.
int inc = env->birg->main_env->arch_env->stack_dir * dir;
int n = get_method_n_params(method_type);
int curr = inc > 0 ? 0 : n - 1;
+ struct obstack *obst = be_get_birg_obst(env->irg);
int ofs = 0;
char buf[128];
ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
ir_entity **map;
- *param_map = map = OALLOCN(&env->obst, ir_entity*, n);
+ *param_map = map = OALLOCN(obst, ir_entity*, n);
res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
for (i = 0; i < n; ++i, curr += inc) {
ir_type *param_type = get_method_param_type(method_type, curr);
arg->stack_ent = copy_entity_own(val_ent, res);
set_entity_link(val_ent, arg->stack_ent);
set_entity_link(arg->stack_ent, NULL);
- /* must be automatic to set a fixed layout */
- set_entity_allocation(arg->stack_ent, allocation_automatic);
} else {
/* create a new entity */
snprintf(buf, sizeof(buf), "param_%d", i);
return p->reg->reg_class - q->reg->reg_class;
}
-static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
+static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
{
pmap_entry *ent;
int n = pmap_count(reg_map);
int i = 0;
- reg_node_map_t *res = OALLOCN(obst, reg_node_map_t, n);
foreach_pmap(reg_map, ent) {
res[i].reg = ent->key;
}
qsort(res, n, sizeof(res[0]), cmp_regs);
- return res;
}
/**
* Creates a barrier.
*/
-static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
+static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
+ int in_req)
{
- int n_regs = pmap_count(regs);
- int n;
- ir_node *irn;
- ir_node **in;
+ int n_regs = pmap_count(regs);
+ int n;
+ ir_node *irn;
+ ir_node **in;
reg_node_map_t *rm;
- rm = reg_map_to_arr(&env->obst, regs);
-
- for (n = 0; n < n_regs; ++n)
- obstack_ptr_grow(&env->obst, rm[n].irn);
+ in = ALLOCAN(ir_node*, n_regs+1);
+ rm = ALLOCAN(reg_node_map_t, n_regs);
+ reg_map_to_arr(rm, regs);
+ for (n = 0; n < n_regs; ++n) {
+ in[n] = rm[n].irn;
+ }
if (mem) {
- obstack_ptr_grow(&env->obst, *mem);
- n++;
+ in[n++] = *mem;
}
- in = (ir_node **) obstack_finish(&env->obst);
irn = be_new_Barrier(bl, n, in);
- obstack_free(&env->obst, in);
for (n = 0; n < n_regs; ++n) {
- ir_node *pred = rm[n].irn;
- const arch_register_t *reg = rm[n].reg;
- arch_register_type_t add_type = 0;
- ir_node *proj;
+ ir_node *pred = rm[n].irn;
+ const arch_register_t *reg = rm[n].reg;
+ arch_register_type_t add_type = 0;
+ ir_node *proj;
+ const backend_info_t *info;
/* stupid workaround for now... as not all nodes report register
* requirements. */
- if (!is_Phi(pred)) {
+ info = be_get_info(skip_Proj(pred));
+ if (info != NULL && info->out_infos != NULL) {
const arch_register_req_t *ireq = arch_get_register_req_out(pred);
if (ireq->type & arch_register_req_type_ignore)
add_type |= arch_register_req_type_ignore;
*mem = new_r_Proj(bl, irn, mode_M, n);
}
- obstack_free(&env->obst, rm);
return irn;
}
be_abi_reg_map_set(reg_map, arch_env->sp, stack);
/* Make the Epilogue node and call the arch's epilogue maker. */
- create_barrier(env, bl, &mem, reg_map, 1);
+ create_barrier(bl, &mem, reg_map, 1);
call->cb->epilogue(env->cb, bl, &mem, reg_map);
/*
*/
in_max = pmap_count(reg_map) + n_res + 2;
- in = OALLOCN(&env->obst, ir_node*, in_max);
- regs = OALLOCN(&env->obst, arch_register_t const*, in_max);
+ in = ALLOCAN(ir_node*, in_max);
+ regs = ALLOCAN(arch_register_t const*, in_max);
in[0] = mem;
in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
}
/* Free the space of the Epilog's in array and the register <-> proj map. */
- obstack_free(&env->obst, in);
pmap_destroy(reg_map);
return ret;
argument_ent = copy_entity_own(ent, frame_tp);
/* must be automatic to set a fixed layout */
- set_entity_allocation(argument_ent, allocation_automatic);
set_entity_offset(argument_ent, offset);
offset += get_type_size_bytes(tp);
ent_pos_pair *entry, *new_list;
ir_type *frame_tp;
int i, n = ARR_LEN(value_param_list);
- DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
new_list = NULL;
for (i = 0; i < n; ++i) {
set_entity_owner(ent, frame_tp);
add_class_member(frame_tp, ent);
/* must be automatic to set a fixed layout */
- set_entity_allocation(ent, allocation_automatic);
set_entity_offset(ent, offset);
offset += get_type_size_bytes(tp);
}
continue;
if (block != start_block) {
ir_node *jmp = new_r_Jmp(start_block);
-
set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
+ set_irg_initial_exec(irg, jmp);
return;
}
}
if (! is_method_entity(ent))
continue;
- if (get_entity_peculiarity(ent) == peculiarity_description)
+
+ irg = get_entity_irg(ent);
+ if (irg == NULL)
continue;
/*
*/
ctx->static_link_pos = 0;
- irg = get_entity_irg(ent);
irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
}
}
const arch_env_t *arch_env= env->birg->main_env->arch_env;
const arch_register_t *sp = arch_env->sp;
ir_graph *irg = env->birg->irg;
- ir_node *start_bl;
ir_node *end;
ir_node *old_mem;
ir_node *new_mem_proj;
ir_node *mem;
ir_type *method_type = get_entity_type(get_irg_entity(irg));
+ struct obstack *obst = be_get_birg_obst(irg);
int n_params;
int i, n;
reg_node_map_t *rm;
const arch_register_t *fp_reg;
ir_node *frame_pointer;
- ir_node *reg_params_bl;
+ ir_node *start_bl;
ir_node **args;
ir_node *arg_tuple;
const ir_edge_t *edge;
lower_frame_sels_env_t ctx;
ir_entity **param_map;
- DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
-
DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
/* Must fetch memory here, otherwise the start Barrier gets the wrong
ctx.link_class = env->arch_env->link_class;
ctx.frame_tp = get_irg_frame_type(irg);
+ /* layout the stackframe now */
+ if (get_type_state(ctx.frame_tp) == layout_undefined) {
+ default_layout_compound_type(ctx.frame_tp);
+ }
+
/* we will possible add new entities to the frame: set the layout to undefined */
assert(get_type_state(ctx.frame_tp) == layout_fixed);
set_type_state(ctx.frame_tp, layout_undefined);
env->regs = pmap_create();
n_params = get_method_n_params(method_type);
- args = OALLOCNZ(&env->obst, ir_node*, n_params);
+ args = OALLOCNZ(obst, ir_node*, n_params);
/*
* for inner function we must now fix access to outer frame entities.
}
}
+ /* handle start block here (place a jump in the block) */
+ fix_start_block(irg);
+
pmap_insert(env->regs, (void *) sp, NULL);
pmap_insert(env->regs, (void *) arch_env->bp, NULL);
- reg_params_bl = get_irg_start_block(irg);
- env->reg_params = be_new_RegParams(reg_params_bl, pmap_count(env->regs));
- add_irn_dep(env->reg_params, get_irg_start(irg));
+ start_bl = get_irg_start_block(irg);
+ env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
/*
* make proj nodes for the callee save registers.
* the old Proj from start for that argument.
*/
- rm = reg_map_to_arr(&env->obst, env->regs);
+ rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
+ reg_map_to_arr(rm, env->regs);
for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
arch_register_t *reg = (void *) rm[i].reg;
ir_mode *mode = reg->reg_class->mode;
add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
assert(nr >= 0);
- proj = new_r_Proj(reg_params_bl, env->reg_params, mode, nr);
+ proj = new_r_Proj(start_bl, env->start, mode, nr + 1);
pmap_insert(env->regs, (void *) reg, proj);
- be_set_constr_single_reg_out(env->reg_params, nr, reg, add_type);
+ be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
arch_set_irn_register(proj, reg);
DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
}
- obstack_free(&env->obst, rm);
/* create a new initial memory proj */
assert(is_Proj(old_mem));
- new_mem_proj = new_r_Proj(get_nodes_block(old_mem),
- new_r_Unknown(irg, mode_T), mode_M,
- get_Proj_proj(old_mem));
+ arch_set_out_register_req(env->start, 0, arch_no_register_req);
+ new_mem_proj = new_r_Proj(start_bl, env->start, mode_M, 0);
mem = new_mem_proj;
+ set_irg_initial_mem(irg, mem);
/* Generate the Prologue */
fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- start_bl = get_irg_start_block(irg);
env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
- create_barrier(env, start_bl, &mem, env->regs, 0);
+ create_barrier(start_bl, &mem, env->regs, 0);
env->init_sp = be_abi_reg_map_get(env->regs, sp);
arch_set_irn_register(env->init_sp, sp);
pset_insert_ptr(env->ignore_regs, fp_reg);
/* rewire old mem users to new mem */
- set_Proj_pred(new_mem_proj, get_Proj_pred(old_mem));
exchange(old_mem, mem);
+ /* keep the mem (for functions with an endless loop = no return) */
+ keep_alive(mem);
+
set_irg_initial_mem(irg, mem);
/* Now, introduce stack param nodes for all parameters passed on the stack */
if (arg->in_reg) {
repl = pmap_get(env->regs, (void *) arg->reg);
} else if (arg->on_stack) {
- ir_node *addr = be_new_FrameAddr(sp->reg_class, reg_params_bl, frame_pointer, arg->stack_ent);
+ ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
/* For atomic parameters which are actually used, we create a Load node. */
if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
ir_mode *mode = get_type_mode(param_type);
ir_mode *load_mode = arg->load_mode;
- ir_node *load = new_r_Load(reg_params_bl, new_NoMem(), addr, load_mode, cons_floats);
- repl = new_r_Proj(reg_params_bl, load, load_mode, pn_Load_res);
+ ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
+ repl = new_r_Proj(start_bl, load, load_mode, pn_Load_res);
if (mode != load_mode) {
- repl = new_r_Conv(reg_params_bl, repl, mode);
+ repl = new_r_Conv(start_bl, repl, mode);
}
} else {
/* The stack parameter is not primitive (it is a struct or array),
exchange(irn, ret);
}
}
+
/* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
the code is dead and will never be executed. */
-
- obstack_free(&env->obst, args);
-
- /* handle start block here (place a jump in the block) */
- fix_start_block(irg);
}
/** Fix the state inputs of calls that still hang on unknowns */
ir_type *parent = be->pic_trampolines_type;
ir_entity *ent = new_entity(parent, old_id, type);
set_entity_ld_ident(ent, id);
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_uninitialized);
+ set_entity_visibility(ent, ir_visibility_local);
return ent;
}
ident *old_id = get_entity_ld_ident(entity);
ident *id = id_mangle3("L", old_id, "$non_lazy_ptr");
ir_type *e_type = get_entity_type(entity);
- ir_type *type = new_type_pointer(id, e_type, mode_P_data);
+ ir_type *type = new_type_pointer(e_type);
ir_type *parent = be->pic_symbols_type;
ir_entity *ent = new_entity(parent, old_id, type);
set_entity_ld_ident(ent, id);
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_uninitialized);
+ set_entity_visibility(ent, ir_visibility_local);
return ent;
}
*/
static int can_address_relative(ir_entity *entity)
{
- return get_entity_visibility(entity) != visibility_external_allocated;
+ return get_entity_visibility(entity) != ir_visibility_external;
}
/** patches SymConsts to work in position independent code */
ir_node *pic_base;
ir_node *add;
ir_node *block;
- ir_node *unknown;
ir_mode *mode;
ir_node *load;
ir_node *load_res;
/* everything else is accessed relative to EIP */
mode = get_irn_mode(pred);
- unknown = new_r_Unknown(irg, mode);
pic_base = arch_code_generator_get_pic_base(env->birg->cg);
/* all ok now for locally constructed stuff */
be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
ir_node *old_frame = get_irg_frame(birg->irg);
ir_graph *irg = birg->irg;
+ struct obstack *obst = be_get_birg_obst(irg);
pmap_entry *ent;
ir_node *dummy;
- optimization_state_t state;
unsigned *limited_bitset;
arch_register_req_t *sp_req;
be_omit_fp = birg->main_env->options->omit_fp;
be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
- obstack_init(&env->obst);
+ obstack_init(obst);
env->arch_env = birg->main_env->arch_env;
env->method_type = get_entity_type(get_irg_entity(irg));
env->keep_map = pmap_create();
env->dce_survivor = new_survive_dce();
env->birg = birg;
+ env->irg = irg;
- sp_req = OALLOCZ(&env->obst, arch_register_req_t);
+ sp_req = OALLOCZ(obst, arch_register_req_t);
env->sp_req = sp_req;
sp_req->type = arch_register_req_type_limited
| arch_register_req_type_produces_sp;
sp_req->cls = arch_register_get_class(env->arch_env->sp);
- limited_bitset = rbitset_obstack_alloc(&env->obst, sp_req->cls->n_regs);
+ limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
sp_req->limited = limited_bitset;
if (env->arch_env->sp->type & arch_register_type_ignore) {
sp_req->type |= arch_register_req_type_ignore;
}
- /* Beware: later we replace this node by the real one, ensure it is not CSE'd
- to another Unknown or the stack pointer gets used */
- save_optimization_state(&state);
- set_optimize(0);
- env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
- restore_optimization_state(&state);
-
- FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
+ env->init_sp = dummy = new_r_Dummy(irg, env->arch_env->sp->reg_class->mode);
env->calls = NEW_ARR_F(ir_node*, 0);
free_survive_dce(env->dce_survivor);
del_pset(env->ignore_regs);
pmap_destroy(env->regs);
- obstack_free(&env->obst, NULL);
free(env);
}
*/
static void collect_stack_nodes_walker(ir_node *node, void *data)
{
+ ir_node *insn = node;
fix_stack_walker_env_t *env = data;
const arch_register_req_t *req;
- if (get_irn_mode(node) == mode_T)
+ if (is_Proj(node)) {
+ insn = get_Proj_pred(node);
+ }
+
+ if (arch_irn_get_n_outs(insn) == 0)
return;
req = arch_get_register_req_out(node);
int bias = omit_fp ? real_bias : 0;
int offset = get_stack_entity_offset(&env->frame, ent, bias);
arch_set_frame_offset(irn, offset);
- DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
+ DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
ent, offset, bias));
}
frame_tp = get_irg_frame_type(irg);
for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
ir_entity *ent = get_class_member(frame_tp, i);
+ ir_graph *irg = get_entity_irg(ent);
- if (is_method_entity(ent) && get_entity_peculiarity(ent) != peculiarity_description) {
- ir_graph *irg = get_entity_irg(ent);
-
+ if (irg != NULL) {
irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
}
}
{
return abi->call->flags.bits.try_omit_fp;
}
+
+void be_init_abi(void)
+{
+ FIRM_DBG_REGISTER(dbg, "firm.be.abi");
+}
+
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);