#include "TEMPLATE_emitter.h"
#include "gen_TEMPLATE_emitter.h"
+#include "gen_TEMPLATE_regalloc_if.h"
#include "TEMPLATE_nodes_attr.h"
#include "TEMPLATE_new_nodes.h"
be_emit_tarval(attr->value);
}
+static void emit_register(const arch_register_t *reg)
+{
+ be_emit_string(arch_register_get_name(reg));
+}
+
void TEMPLATE_emit_source_register(const ir_node *node, int pos)
{
const arch_register_t *reg = get_in_reg(node, pos);
- be_emit_string(arch_register_get_name(reg));
+ emit_register(reg);
}
void TEMPLATE_emit_dest_register(const ir_node *node, int pos)
{
const arch_register_t *reg = get_out_reg(node, pos);
- be_emit_string(arch_register_get_name(reg));
+ emit_register(reg);
}
/**
be_emit_finish_line_gas(node);
}
+static void emit_be_Start(const ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(node);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ unsigned size = get_type_size_bytes(frame_type);
+
+ /* emit function prolog */
+
+ /* allocate stackframe */
+ if (size > 0) {
+ be_emit_cstring("\tsub ");
+ emit_register(&TEMPLATE_registers[REG_SP]);
+ be_emit_irprintf(", %u, ", size);
+ emit_register(&TEMPLATE_registers[REG_SP]);
+ be_emit_finish_line_gas(node);
+ }
+}
+
static void emit_be_Return(const ir_node *node)
{
+ ir_graph *irg = get_irn_irg(node);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ unsigned size = get_type_size_bytes(frame_type);
+
+ /* emit function epilog here */
+
+ /* deallocate stackframe */
+ if (size > 0) {
+ be_emit_cstring("\tadd ");
+ emit_register(&TEMPLATE_registers[REG_SP]);
+ be_emit_irprintf(", %u, ", size);
+ emit_register(&TEMPLATE_registers[REG_SP]);
+ be_emit_finish_line_gas(node);
+ }
+
+ /* return */
be_emit_cstring("\tret");
be_emit_finish_line_gas(node);
}
/* custom emitters not provided by the spec */
set_emitter(op_TEMPLATE_Jmp, emit_TEMPLATE_Jmp);
- set_emitter(op_be_Return, emit_be_Return);
set_emitter(op_be_IncSP, emit_be_IncSP);
+ set_emitter(op_be_Return, emit_be_Return);
+ set_emitter(op_be_Start, emit_be_Start);
/* no need to emit anything for the following nodes */
set_emitter(op_Phi, emit_nothing);
set_emitter(op_be_Keep, emit_nothing);
- set_emitter(op_be_Start, emit_nothing);
}
typedef void (*emit_func_ptr) (const ir_node *);
return &TEMPLATE_reg_classes[CLASS_TEMPLATE_gp];
}
-
-
-typedef struct {
- be_abi_call_flags_bits_t flags;
- ir_graph *irg;
-} TEMPLATE_abi_env_t;
-
-static void *TEMPLATE_abi_init(const be_abi_call_t *call, ir_graph *irg)
-{
- TEMPLATE_abi_env_t *env = XMALLOC(TEMPLATE_abi_env_t);
- be_abi_call_flags_t fl = be_abi_call_get_flags(call);
- env->flags = fl.bits;
- env->irg = irg;
- return env;
-}
-
/**
* Get the between type for that call.
* @param self The callback object.
* @return The between type of for that call.
*/
-static ir_type *TEMPLATE_get_between_type(void *self)
+static ir_type *TEMPLATE_get_between_type(ir_graph *irg)
{
static ir_type *between_type = NULL;
static ir_entity *old_bp_ent = NULL;
- (void) self;
+ (void) irg;
if (!between_type) {
ir_entity *ret_addr_ent;
return between_type;
}
-/**
- * Build the prolog, return the BASE POINTER register
- */
-static const arch_register_t *TEMPLATE_abi_prologue(void *self, ir_node **mem,
- pmap *reg_map, int *stack_bias)
-{
- TEMPLATE_abi_env_t *env = (TEMPLATE_abi_env_t*)self;
- const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
- (void) reg_map;
- (void) mem;
- (void) stack_bias;
-
- if (env->flags.try_omit_fp)
- return arch_env->sp;
- return arch_env->bp;
-}
-
-/* Build the epilog */
-static void TEMPLATE_abi_epilogue(void *self, ir_node *bl, ir_node **mem,
- pmap *reg_map)
-{
- (void) self;
- (void) bl;
- (void) mem;
- (void) reg_map;
-}
-
static const be_abi_callbacks_t TEMPLATE_abi_callbacks = {
- TEMPLATE_abi_init,
- free,
TEMPLATE_get_between_type,
- TEMPLATE_abi_prologue,
- TEMPLATE_abi_epilogue,
};
/**
ir_graph *irg;
} amd64_abi_env_t;
-static void *amd64_abi_init(const be_abi_call_t *call, ir_graph *irg)
-{
- amd64_abi_env_t *env = XMALLOC(amd64_abi_env_t);
- be_abi_call_flags_t fl = be_abi_call_get_flags(call);
- env->flags = fl.bits;
- env->irg = irg;
- return env;
-}
-
/**
* Get the between type for that call.
* @param self The callback object.
* @return The between type of for that call.
*/
-static ir_type *amd64_get_between_type(void *self)
+static ir_type *amd64_get_between_type(ir_graph *irg)
{
static ir_type *between_type = NULL;
static ir_entity *old_bp_ent = NULL;
- (void) self;
+ (void) irg;
if(!between_type) {
ir_entity *ret_addr_ent;
return between_type;
}
-/**
- * Build the prolog, return the BASE POINTER register
- */
-static const arch_register_t *amd64_abi_prologue(void *self, ir_node **mem,
- pmap *reg_map, int *stack_bias)
-{
- amd64_abi_env_t *env = (amd64_abi_env_t*)self;
- const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
- (void) mem;
- (void) stack_bias;
- (void) aenv;
- (void) reg_map;
-
- if (!env->flags.try_omit_fp) {
- /* FIXME: maybe later here should be some code to generate
- * the usual abi prologue */
- return aenv->bp;
- }
-
- return aenv->sp;
-}
-
-/* Build the epilog */
-static void amd64_abi_epilogue(void *self, ir_node *bl, ir_node **mem,
- pmap *reg_map)
-{
- amd64_abi_env_t *env = (amd64_abi_env_t*)self;
- const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
- ir_node *curr_sp = be_abi_reg_map_get(reg_map, aenv->sp);
- ir_node *curr_bp = be_abi_reg_map_get(reg_map, aenv->bp);
- (void) bl;
- (void) mem;
-
- if (env->flags.try_omit_fp) {
- curr_sp = be_new_IncSP(aenv->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
- }
-
- be_abi_reg_map_set(reg_map, aenv->sp, curr_sp);
- be_abi_reg_map_set(reg_map, aenv->bp, curr_bp);
-}
-
static const be_abi_callbacks_t amd64_abi_callbacks = {
- amd64_abi_init,
- free,
amd64_get_between_type,
- amd64_abi_prologue,
- amd64_abi_epilogue,
};
static const arch_register_t *gpreg_param_reg_std[] = {
return reg;
}
+static void arm_emit_register(const arch_register_t *reg)
+{
+ be_emit_string(arch_register_get_name(reg));
+}
+
void arm_emit_source_register(const ir_node *node, int pos)
{
const arch_register_t *reg = get_in_reg(node, pos);
- be_emit_string(arch_register_get_name(reg));
+ arm_emit_register(reg);
}
void arm_emit_dest_register(const ir_node *node, int pos)
{
const arch_register_t *reg = get_out_reg(node, pos);
- be_emit_string(arch_register_get_name(reg));
+ arm_emit_register(reg);
}
void arm_emit_offset(const ir_node *node)
be_emit_cstring(", ");
arm_emit_source_register(irn, 0);
be_emit_irprintf(", #0x%X", offs);
+ be_emit_finish_line_gas(irn);
} else {
/* omitted IncSP(0) */
return;
}
- be_emit_finish_line_gas(irn);
}
static void emit_be_Copy(const ir_node *irn)
assert(sp_change == 0);
}
+static void emit_be_Start(const ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(node);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ unsigned size = get_type_size_bytes(frame_type);
+
+ /* allocate stackframe */
+ if (size > 0) {
+ be_emit_cstring("\tsub ");
+ arm_emit_register(&arm_registers[REG_SP]);
+ be_emit_cstring(", ");
+ arm_emit_register(&arm_registers[REG_SP]);
+ be_emit_irprintf(", #0x%X", size);
+ be_emit_finish_line_gas(node);
+ }
+}
+
static void emit_be_Return(const ir_node *node)
{
+ ir_graph *irg = get_irn_irg(node);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ unsigned size = get_type_size_bytes(frame_type);
+
+ /* deallocate stackframe */
+ if (size > 0) {
+ be_emit_cstring("\tadd ");
+ arm_emit_register(&arm_registers[REG_SP]);
+ be_emit_cstring(", ");
+ arm_emit_register(&arm_registers[REG_SP]);
+ be_emit_irprintf(", #0x%X", size);
+ be_emit_finish_line_gas(node);
+ }
+
be_emit_cstring("\tmov pc, lr");
be_emit_finish_line_gas(node);
}
set_emitter(op_be_MemPerm, emit_be_MemPerm);
set_emitter(op_be_Perm, emit_be_Perm);
set_emitter(op_be_Return, emit_be_Return);
+ set_emitter(op_be_Start, emit_be_Start);
/* no need to emit anything for the following nodes */
set_emitter(op_Phi, emit_nothing);
set_emitter(op_be_Keep, emit_nothing);
- set_emitter(op_be_Start, emit_nothing);
}
/**
ir_node *new_block = be_transform_node(block);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *start;
- ir_node *incsp;
- ir_node *sp;
size_t i;
/* stackpointer is important at function prolog */
}
start = be_prolog_create_start(abihelper, dbgi, new_block);
- sp = be_prolog_get_reg_value(abihelper, sp_reg);
- incsp = be_new_IncSP(sp_reg, new_block, sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
- be_prolog_set_reg_value(abihelper, sp_reg, incsp);
-
return start;
}
ir_node *sp_proj = get_stack_pointer_for(node);
int n_res = get_Return_n_ress(node);
ir_node *bereturn;
- ir_node *incsp;
int i;
be_epilog_begin(abihelper);
}
/* epilog code: an incsp */
- sp_proj = be_epilog_get_reg_value(abihelper, sp_reg);
- incsp = be_new_IncSP(sp_reg, new_block, sp_proj,
- BE_STACK_FRAME_SIZE_SHRINK, 0);
- be_epilog_set_reg_value(abihelper, sp_reg, incsp);
-
bereturn = be_epilog_create_return(abihelper, dbgi, new_block);
-
return bereturn;
}
implementation in beflags */
arch_irn_flags_simple_jump = 1U << 3, /**< a simple jump instruction */
arch_irn_flags_not_scheduled = 1U << 4, /**< node must not be scheduled*/
- /** mark node as belonging to the prolog. No spill instructions must appear
- * in a schedule before a prolog node */
- arch_irn_flags_prolog = 1U << 5,
- /** mark node as belonging to the epilog. No spill instructions must appear
- * after an epilog node */
- arch_irn_flags_epilog = 1U << 6,
- arch_irn_flags_backend = 1U << 7, /**< begin of custom backend
+ arch_irn_flags_backend = 1U << 5, /**< begin of custom backend
flags */
} arch_irn_flags_t;
ENUM_BITSET(arch_irn_flags_t)
int start_block_bias; /**< The stack bias at the end of the start block. */
- void *cb; /**< ABI Callback self pointer. */
-
pmap *keep_map; /**< mapping blocks to keep nodes. */
ir_node **calls; /**< flexible array containing all be_Call nodes */
be_abi_reg_map_set(reg_map, arch_env->sp, stack);
- /* Make the Epilogue node and call the arch's epilogue maker. */
- call->cb->epilogue(env->cb, bl, &mem, reg_map);
-
/*
Maximum size of the in array for Return nodes is
return args + callee save/ignore registers + memory + stack pointer
/* we have to pop the shadow parameter in in case of struct returns */
pop = call->pop;
ret = be_new_Return(dbgi, irg, bl, n_res, pop, n, in);
- arch_irn_add_flags(ret, arch_irn_flags_epilog);
/* Set the register classes of the return's parameter accordingly. */
for (i = 0; i < n; ++i) {
}
}
- bet_type = call->cb->get_between_type(env->cb);
+ stack_layout->sp_relative = call->flags.bits.try_omit_fp;
+ bet_type = call->cb->get_between_type(irg);
stack_frame_init(stack_layout, arg_type, bet_type,
get_irg_frame_type(irg), param_map);
- stack_layout->sp_relative = call->flags.bits.try_omit_fp;
/* Count the register params and add them to the number of Projs for the RegParams node */
for (i = 0; i < n_params; ++i) {
}
}
+ fp_reg = call->flags.bits.try_omit_fp ? arch_env->sp : arch_env->bp;
+ rbitset_clear(birg->allocatable_regs, fp_reg->global_index);
+
/* handle start block here (place a jump in the block) */
fix_start_block(irg);
pmap_insert(env->regs, (void *) arch_env->bp, NULL);
start_bl = get_irg_start_block(irg);
env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
- arch_irn_add_flags(env->start, arch_irn_flags_prolog);
set_irg_start(irg, env->start);
/*
* make proj nodes for the callee save registers.
* memorize them, since Return nodes get those as inputs.
*
- * Note, that if a register corresponds to an argument, the regs map contains
- * the old Proj from start for that argument.
+ * Note, that if a register corresponds to an argument, the regs map
+ * contains the old Proj from start for that argument.
*/
-
rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
reg_map_to_arr(rm, env->regs);
for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
ir_node *proj;
if (reg == sp)
- add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
+ add_type |= arch_register_req_type_produces_sp;
+ if (!rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
+ add_type |= arch_register_req_type_ignore;
+ }
assert(nr >= 0);
proj = new_r_Proj(env->start, mode, nr + 1);
mem = new_mem_proj;
set_irg_initial_mem(irg, mem);
- /* Generate the Prologue */
- fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &stack_layout->initial_bias);
-
- env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
- arch_irn_add_flags(env->init_sp, arch_irn_flags_prolog);
- be_abi_reg_map_set(env->regs, sp, env->init_sp);
-
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- arch_set_irn_register(env->init_sp, sp);
+ /* set new frame_pointer */
frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
set_irg_frame(irg, frame_pointer);
- rbitset_clear(birg->allocatable_regs, fp_reg->global_index);
/* rewire old mem users to new mem */
exchange(old_mem, mem);
ir_type *method_type = get_entity_type(entity);
be_irg_t *birg = be_birg_from_irg(irg);
struct obstack *obst = &birg->obst;
+ ir_node *dummy = new_r_Dummy(irg,
+ arch_env->sp->reg_class->mode);
unsigned r;
- ir_node *dummy;
-
/* determine allocatable registers */
assert(birg->allocatable_regs == NULL);
birg->allocatable_regs = rbitset_obstack_alloc(obst, arch_env->n_registers);
env->call = be_abi_call_new(arch_env->sp->reg_class);
arch_env_get_call_abi(arch_env, method_type, env->call);
- env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
+ env->init_sp = dummy;
env->calls = NEW_ARR_F(ir_node*, 0);
if (options->pic) {
/* Lower all call nodes in the IRG. */
process_calls(irg);
- /*
- Beware: init backend abi call object after processing calls,
- otherwise some information might be not yet available.
- */
- env->cb = env->call->cb->init(env->call, irg);
-
/* Process the IRG */
modify_irg(irg);
exchange(dummy, env->init_sp);
exchange(old_frame, get_irg_frame(irg));
- env->call->cb->done(env->cb);
- env->cb = NULL;
return env;
}
ir_node *be_abi_get_ignore_irn(ir_graph *irg, const arch_register_t *reg)
{
const be_abi_irg_t *abi = be_get_irg_abi(irg);
- assert(reg->type & arch_register_type_ignore);
assert(pmap_contains(abi->regs, (void *) reg));
return (ir_node*)pmap_get(abi->regs, (void *) reg);
}
};
struct be_abi_callbacks_t {
- /**
- * Initialize the callback object.
- * @param call The call object.
- * @param irg The graph with the method.
- * @return Some pointer. This pointer is passed to all other callback functions as self object.
- */
- void *(*init)(const be_abi_call_t *call, ir_graph *irg);
-
- /**
- * Destroy the callback object.
- * @param self The callback object.
- */
- void (*done)(void *self);
-
/**
* Get the between type for that call.
* @param self The callback object.
* @return The between type of for that call.
*/
- ir_type *(*get_between_type)(void *self);
-
- /**
- * Generate the prologue.
- * @param self The callback object.
- * @param mem A pointer to the mem node. Update this if you define new memory.
- * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
- * @param stack_bias Points to the current stack bias, can be modified if needed.
- * @return The register which shall be used as a stack frame base.
- *
- * All nodes which define registers in @p reg_map must keep @p reg_map current.
- */
- const arch_register_t *(*prologue)(void *self, ir_node **mem, pmap *reg_map, int *stack_bias);
-
- /**
- * Generate the epilogue.
- * @param self The callback object.
- * @param mem Memory one can attach to.
- * @param reg_map A mapping mapping all callee_save/ignore/return registers to their defining nodes.
- *
- * All nodes which define registers in @p reg_map must keep @p reg_map current.
- * Also, the @p mem variable must be updated, if memory producing nodes are inserted.
- */
- void (*epilogue)(void *self, ir_node *bl, ir_node **mem, pmap *reg_map);
+ ir_type *(*get_between_type)(ir_graph *irg);
};
/**
ir_node *start = be_new_Start(dbgi, block, n_start_outs);
int o;
- arch_irn_add_flags(start, arch_irn_flags_prolog);
-
assert(env->prolog.value_map == NULL);
env->prolog.value_map = NEW_ARR_F(ir_node*, n_start_outs);
ret = be_new_Return(dbgi, get_irn_irg(block), block, n_res, pop,
n_return_in, in);
- arch_irn_add_flags(ret, arch_irn_flags_epilog);
for (i = 0; i < n_return_in; ++i) {
const reg_flag_t *regflag = &env->epilog.regs[i];
const arch_register_t *reg = regflag->reg;
}
}
op = skip_Proj_const(node);
- if (arch_irn_get_flags(op) & arch_irn_flags_prolog)
- arch_irn_add_flags(last_keep, arch_irn_flags_prolog);
- if (arch_irn_get_flags(op) & arch_irn_flags_epilog)
- arch_irn_add_flags(last_keep, arch_irn_flags_epilog);
return last_keep;
}
const ir_edge_t *edge;
ir_mode *mode = get_irn_mode(node);
ir_node *last_keep;
+ ir_node **existing_projs;
(void) data;
if (mode != mode_T) {
if (!has_real_user(node)) {
return;
rbitset_alloca(found_projs, n_outs);
+ existing_projs = ALLOCANZ(ir_node*, n_outs);
foreach_out_edge(node, edge) {
ir_node *succ = get_edge_src_irn(edge);
ir_mode *mode = get_irn_mode(succ);
continue;
if (mode == mode_M || mode == mode_X)
continue;
+ pn = get_Proj_proj(succ);
+ existing_projs[pn] = succ;
if (!has_real_user(succ))
continue;
- pn = get_Proj_proj(succ);
assert(pn < n_outs);
rbitset_set(found_projs, pn);
}
continue;
}
- value = new_r_Proj(node, arch_register_class_mode(cls), i);
+ value = existing_projs[i];
+ if (value == NULL)
+ value = new_r_Proj(node, arch_register_class_mode(cls), i);
last_keep = add_to_keep(last_keep, cls, value);
}
}
if (flags & arch_irn_flags_not_scheduled) {
fprintf(F, " not_scheduled");
}
- if (flags & arch_irn_flags_prolog) {
- fprintf(F, " prolog");
- }
- if (flags & arch_irn_flags_epilog) {
- fprintf(F, " epilog");
- }
}
fprintf(F, " (%d)\n", flags);
}
#include "lc_opts.h"
#include "lc_opts_enum.h"
-/* we have prolog, "normal" and epilog */
-#define N_PRIORITY_CLASSES 3
-
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
/**
/** scheduling info per node, copied from the global scheduler object */
unsigned *scheduled;
/** the set of candidates */
- ir_nodeset_t cands[N_PRIORITY_CLASSES];
+ ir_nodeset_t cands;
ir_node *block; /**< the current block */
sched_env_t *sched_env; /**< the scheduler environment */
const list_sched_selector_t *selector;
void *selector_block_env;
} block_sched_env_t;
-/**
- * map prolog/normal/epilog into 3 priority levels
- */
-static unsigned get_priority(const ir_node *node)
-{
- arch_irn_flags_t flags = arch_irn_get_flags(node);
- if (flags & arch_irn_flags_prolog) {
- assert(! (flags & arch_irn_flags_epilog));
- return 0;
- } else if (flags & arch_irn_flags_epilog) {
- return 2;
- }
- return 1;
-}
-
/**
* Returns non-zero if the node is already scheduled
*/
/* Keeps must be scheduled immediately */
add_to_sched(env, irn);
} else {
- unsigned priority = get_priority(irn);
- ir_nodeset_insert(&env->cands[priority], irn);
+ ir_nodeset_insert(&env->cands, irn);
/* Notify selector about the ready node. */
if (env->selector->node_ready)
*/
static void add_to_sched(block_sched_env_t *env, ir_node *irn)
{
- unsigned priority = get_priority(irn);
-
assert(! (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled));
sched_add_before(env->block, irn);
DB((dbg, LEVEL_2, "\tschedule %+F\n", irn));
/* Remove the node from the ready set */
- ir_nodeset_remove(&env->cands[priority], irn);
+ ir_nodeset_remove(&env->cands, irn);
selected(env, irn);
}
block_sched_env_t be;
const ir_edge_t *edge;
- unsigned p;
+ ir_nodeset_t *cands = &be.cands;
/* Initialize the block's list head that will hold the schedule. */
sched_init_block(block);
be.block = block;
be.selector = selector;
be.sched_env = env;
- for (p = 0; p < N_PRIORITY_CLASSES; ++p) {
- ir_nodeset_init_size(&be.cands[p], get_irn_n_edges(block));
- }
+ ir_nodeset_init_size(cands, get_irn_n_edges(block));
DB((dbg, LEVEL_1, "scheduling %+F\n", block));
}
/* Iterate over all remaining nodes */
- for (p = 0; p < N_PRIORITY_CLASSES; ++p) {
- ir_nodeset_t *p_cands = &be.cands[p];
- while (ir_nodeset_size(p_cands) > 0) {
- ir_node *irn = be.selector->select(be.selector_block_env, p_cands);
- DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
-
- /* remove the scheduled node from the ready list. */
- ir_nodeset_remove(p_cands, irn);
- /* Add the node to the schedule. */
- add_to_sched(&be, irn);
- }
+ while (ir_nodeset_size(cands) > 0) {
+ ir_node *irn = be.selector->select(be.selector_block_env, cands);
+ DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
+
+ /* remove the scheduled node from the ready list. */
+ ir_nodeset_remove(cands, irn);
+ /* Add the node to the schedule. */
+ add_to_sched(&be, irn);
}
if (selector->finish_block)
selector->finish_block(be.selector_block_env);
-
- for (p = 0; p < N_PRIORITY_CLASSES; ++p) {
- /** all cand lists should be empty. Otherwise there was some invalid
- * dependencies between priority classes (ie. priority 0 value depending
- * on a priority 1 value) */
- assert(ir_nodeset_size(&be.cands[p]) == 0);
- ir_nodeset_init_size(&be.cands[p], get_irn_n_edges(block));
- }
}
/* List schedule a graph. */
}
if (be_is_IncSP(irn)) {
const be_incsp_attr_t *attr = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn);
- if (attr->offset == BE_STACK_FRAME_SIZE_EXPAND) {
- fprintf(f, " [Setup Stackframe] ");
- } else if (attr->offset == BE_STACK_FRAME_SIZE_SHRINK) {
- fprintf(f, " [Destroy Stackframe] ");
- } else {
- fprintf(f, " [%d] ", attr->offset);
- }
+ fprintf(f, " [%d] ", attr->offset);
}
break;
case dump_node_info_txt:
case beo_IncSP: {
const be_incsp_attr_t *a = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn);
fprintf(f, "align: %d\n", a->align);
- if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
- fprintf(f, "offset: FRAME_SIZE\n");
- else if (a->offset == BE_STACK_FRAME_SIZE_SHRINK)
- fprintf(f, "offset: -FRAME SIZE\n");
- else
- fprintf(f, "offset: %d\n", a->offset);
+ fprintf(f, "offset: %d\n", a->offset);
break;
}
case beo_Call: {
extern ir_op *op_be_Start;
extern ir_op *op_be_FrameAddr;
-/**
- * A "symbolic constant" for the size of the stack frame to use with IncSP nodes.
- * It gets back-patched to the real size as soon it is known.
- */
-#define BE_STACK_FRAME_SIZE_EXPAND INT_MAX
-#define BE_STACK_FRAME_SIZE_SHRINK INT_MIN
-
/**
* Determines if irn is a be_node.
*/
pred_offs = be_get_IncSP_offset(pred);
curr_offs = be_get_IncSP_offset(node);
-
- if (pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
- if (curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
- return node;
- }
- offs = 0;
- } else if (pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
- if (curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
- return node;
- }
- offs = 0;
- } else if (curr_offs == BE_STACK_FRAME_SIZE_EXPAND ||
- curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
- return node;
- } else {
- offs = curr_offs + pred_offs;
- }
+ offs = curr_offs + pred_offs;
/* add node offset to pred and remove our IncSP */
be_set_IncSP_offset(pred, offs);
assert(!is_Proj(before) && !be_is_Keep(before));
- /* adjust before point to not be in the epilog */
- while (true) {
- ir_node *before_prev = sched_prev(before);
- if (! (arch_irn_get_flags(before_prev) & arch_irn_flags_epilog))
- break;
- before = sched_prev(before);
- }
-
/* put reload into list */
rel = OALLOC(&env->obst, reloader_t);
rel->next = info->reloaders;
node = skip_Proj(node);
while (true) {
ir_node *next = sched_next(node);
- if (!is_Phi(next) && !be_is_Keep(next) && !be_is_CopyKeep(next)
- && !(arch_irn_get_flags(next) & arch_irn_flags_prolog))
+ if (!is_Phi(next) && !be_is_Keep(next) && !be_is_CopyKeep(next))
break;
node = next;
}
if (be_is_IncSP(irn)) {
ofs = be_get_IncSP_offset(irn);
/* fill in real stack frame size */
- if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
- ir_type *frame_type = get_irg_frame_type(irg);
- ofs = (int) get_type_size_bytes(frame_type);
- be_set_IncSP_offset(irn, ofs);
- } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
- ir_type *frame_type = get_irg_frame_type(irg);
- ofs = - (int)get_type_size_bytes(frame_type);
- be_set_IncSP_offset(irn, ofs);
+ if (be_get_IncSP_align(irn)) {
+ /* patch IncSP to produce an aligned stack pointer */
+ ir_type *between_type = layout->between_type;
+ int between_size = get_type_size_bytes(between_type);
+ int alignment = 1 << arch_env->stack_alignment;
+ int delta = (real_bias + ofs + between_size) & (alignment - 1);
+ assert(ofs >= 0);
+ if (delta > 0) {
+ be_set_IncSP_offset(irn, ofs + alignment - delta);
+ real_bias += alignment - delta;
+ }
} else {
- if (be_get_IncSP_align(irn)) {
- /* patch IncSP to produce an aligned stack pointer */
- ir_type *between_type = layout->between_type;
- int between_size = get_type_size_bytes(between_type);
- int alignment = 1 << arch_env->stack_alignment;
- int delta = (real_bias + ofs + between_size) & (alignment - 1);
- assert(ofs >= 0);
- if (delta > 0) {
- be_set_IncSP_offset(irn, ofs + alignment - delta);
- real_bias += alignment - delta;
- }
- } else {
- /* adjust so real_bias corresponds with wanted_bias */
- int delta = wanted_bias - real_bias;
- assert(delta <= 0);
- if (delta != 0) {
- be_set_IncSP_offset(irn, ofs + delta);
- real_bias += delta;
- }
+ /* adjust so real_bias corresponds with wanted_bias */
+ int delta = wanted_bias - real_bias;
+ assert(delta <= 0);
+ if (delta != 0) {
+ be_set_IncSP_offset(irn, ofs + delta);
+ real_bias += delta;
}
}
real_bias += ofs;
do {
after = next;
next = sched_next(after);
- } while (is_Proj(next) || is_Phi(next) || be_is_Keep(next)
- || (arch_irn_get_flags(next) & arch_irn_flags_prolog));
+ } while (is_Proj(next) || is_Phi(next) || be_is_Keep(next));
} else {
after = state;
}
return block_info;
}
-static ir_node *get_reload_point(ir_node *before)
-{
- while (true) {
- ir_node *prev = sched_prev(before);
- if (! (arch_irn_get_flags(prev) & arch_irn_flags_epilog))
- break;
- before = prev;
- }
- return before;
-}
-
/**
* For the given block @p block, decide for each values
* whether it is used from a register or is reloaded
}
/* create a reload to match state if necessary */
if (need_val != NULL && need_val != current_state) {
- ir_node *before = get_reload_point(node);
+ ir_node *before = node;
DBG((dbg, LEVEL_3, "\t... reloading %+F\n", need_val));
create_reload(env, need_val, before, current_state);
current_state = need_val;
ir_mode *ia32_mode_fpcw = NULL;
/** The current omit-fp state */
-static unsigned ia32_curr_fp_ommitted = 0;
static ir_type *omit_fp_between_type = NULL;
static ir_type *between_type = NULL;
static ir_entity *old_bp_ent = NULL;
if (is_ia32_Pop(node) || is_ia32_PopMem(node))
return -4;
- if (is_ia32_Leave(node) || (be_is_Copy(node)
- && arch_get_irn_register(node) == &ia32_registers[REG_ESP])) {
+ if (is_ia32_Leave(node) || is_ia32_CopyEbpEsp(node)) {
return SP_BIAS_RESET;
}
return 0;
}
-/**
- * Generate the routine prologue.
- *
- * @param self The callback object.
- * @param mem A pointer to the mem node. Update this if you define new memory.
- * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
- * @param stack_bias Points to the current stack bias, can be modified if needed.
- *
- * @return The register which shall be used as a stack frame base.
- *
- * All nodes which define registers in @p reg_map must keep @p reg_map current.
- */
-static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
-{
- ia32_abi_env_t *env = (ia32_abi_env_t*)self;
- ir_graph *irg = env->irg;
- const arch_env_t *arch_env = be_get_irg_arch_env(irg);
-
- ia32_curr_fp_ommitted = env->flags.try_omit_fp;
- if (! env->flags.try_omit_fp) {
- ir_node *bl = get_irg_start_block(env->irg);
- ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
- ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
- ir_node *noreg = ia32_new_NoReg_gp(irg);
- ir_node *push;
-
- /* mark bp register as ignore */
- be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
- get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
-
- /* push ebp */
- push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
- arch_irn_add_flags(push, arch_irn_flags_prolog);
- curr_sp = new_r_Proj(push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
- *mem = new_r_Proj(push, mode_M, pn_ia32_Push_M);
- set_irn_pinned(push, op_pin_state_pinned);
-
- /* the push must have SP out register */
- arch_set_irn_register(curr_sp, arch_env->sp);
-
- /* this modifies the stack bias, because we pushed 32bit */
- *stack_bias -= 4;
-
- /* move esp to ebp */
- curr_bp = be_new_Copy(arch_env->bp->reg_class, bl, curr_sp);
- arch_irn_add_flags(curr_bp, arch_irn_flags_prolog);
- be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
- arch_register_req_type_ignore);
- set_irn_pinned(curr_bp, op_pin_state_pinned);
-
- /* beware: the copy must be done before any other sp use */
- curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
- arch_irn_add_flags(curr_sp, arch_irn_flags_prolog);
- be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
- arch_register_req_type_produces_sp);
-
- be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
- be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
-
- return arch_env->bp;
- }
-
- return arch_env->sp;
-}
-
-/**
- * Generate the routine epilogue.
- * @param self The callback object.
- * @param bl The block for the epilog
- * @param mem A pointer to the mem node. Update this if you define new memory.
- * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
- * @return The register which shall be used as a stack frame base.
- *
- * All nodes which define registers in @p reg_map must keep @p reg_map current.
- */
-static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
-{
- ia32_abi_env_t *env = (ia32_abi_env_t*)self;
- const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
- ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
- ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
-
- if (env->flags.try_omit_fp) {
- /* simply remove the stack frame here */
- curr_sp = be_new_IncSP(arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
- arch_irn_add_flags(curr_sp, arch_irn_flags_epilog);
- set_irn_pinned(curr_sp, op_pin_state_pinned);
- } else {
- ir_mode *mode_bp = arch_env->bp->reg_class->mode;
-
- if (ia32_cg_config.use_leave) {
- ir_node *leave;
-
- /* leave */
- leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
- curr_bp = new_r_Proj(leave, mode_bp, pn_ia32_Leave_frame);
- curr_sp = new_r_Proj(leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
- arch_irn_add_flags(leave, arch_irn_flags_epilog);
- set_irn_pinned(leave, op_pin_state_pinned);
- } else {
- ir_node *pop;
-
- /* copy ebp to esp */
- curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], bl, curr_bp);
- arch_set_irn_register(curr_sp, arch_env->sp);
- be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
- arch_register_req_type_ignore);
- arch_irn_add_flags(curr_sp, arch_irn_flags_epilog);
- set_irn_pinned(curr_sp, op_pin_state_pinned);
-
- /* pop ebp */
- pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
- curr_bp = new_r_Proj(pop, mode_bp, pn_ia32_Pop_res);
- curr_sp = new_r_Proj(pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
- arch_irn_add_flags(pop, arch_irn_flags_epilog);
- set_irn_pinned(pop, op_pin_state_pinned);
-
- *mem = new_r_Proj(pop, mode_M, pn_ia32_Pop_M);
- }
- arch_set_irn_register(curr_sp, arch_env->sp);
- arch_set_irn_register(curr_bp, arch_env->bp);
- }
-
- be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
- be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
-}
-
-/**
- * Initialize the callback object.
- * @param call The call object.
- * @param irg The graph with the method.
- * @return Some pointer. This pointer is passed to all other callback functions as self object.
- */
-static void *ia32_abi_init(const be_abi_call_t *call, ir_graph *irg)
-{
- ia32_abi_env_t *env = XMALLOC(ia32_abi_env_t);
- be_abi_call_flags_t fl = be_abi_call_get_flags(call);
- env->flags = fl.bits;
- env->irg = irg;
- return env;
-}
-
-/**
- * Destroy the callback object.
- * @param self The callback object.
- */
-static void ia32_abi_done(void *self)
-{
- free(self);
-}
-
/**
* Build the between type and entities if not already build.
*/
* it will contain the return address and space to store the old base pointer.
* @return The Firm type modeling the ABI between type.
*/
-static ir_type *ia32_abi_get_between_type(void *self)
+static ir_type *ia32_abi_get_between_type(ir_graph *irg)
{
- ia32_abi_env_t *env = (ia32_abi_env_t*)self;
-
+ const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ia32_build_between_type();
- return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
+ return layout->sp_relative ? omit_fp_between_type : between_type;
}
/**
* Return the stack entity that contains the return address.
*/
-ir_entity *ia32_get_return_address_entity(void)
+ir_entity *ia32_get_return_address_entity(ir_graph *irg)
{
+ const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ia32_build_between_type();
- return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
+ return layout->sp_relative ? omit_fp_ret_addr_ent : ret_addr_ent;
}
/**
* Return the stack entity that contains the frame address.
*/
-ir_entity *ia32_get_frame_address_entity(void)
+ir_entity *ia32_get_frame_address_entity(ir_graph *irg)
{
+ const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ia32_build_between_type();
- return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
+ return layout->sp_relative ? NULL : old_bp_ent;
}
/**
switch (get_ia32_irn_opcode(irn)) {
case iro_ia32_Add:
-#if 0
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
inverse->costs += 2;
}
-#endif
break;
case iro_ia32_Sub:
-#if 0
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
}
inverse->costs += 1;
}
-#endif
break;
case iro_ia32_Xor:
-#if 0
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
inverse->costs += 1;
}
-#endif
break;
case iro_ia32_Not: {
inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
}
static const be_abi_callbacks_t ia32_abi_callbacks = {
- ia32_abi_init,
- ia32_abi_done,
ia32_abi_get_between_type,
- ia32_abi_prologue,
- ia32_abi_epilogue
};
/* register allocator interface */
be_node_needs_frame_entity(env, node, mode, align);
}
+static int determine_ebp_input(ir_node *ret)
+{
+ const arch_register_t *bp = &ia32_registers[REG_EBP];
+ int arity = get_irn_arity(ret);
+ int i;
+
+ for (i = 0; i < arity; ++i) {
+ ir_node *input = get_irn_n(ret, i);
+ if (arch_get_irn_register(input) == bp)
+ return i;
+ }
+ panic("no ebp input found at %+F", ret);
+}
+
+static void introduce_epilog(ir_node *ret)
+{
+ const arch_register_t *sp = &ia32_registers[REG_ESP];
+ const arch_register_t *bp = &ia32_registers[REG_EBP];
+ ir_graph *irg = get_irn_irg(ret);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ unsigned frame_size = get_type_size_bytes(frame_type);
+ be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
+ ir_node *block = get_nodes_block(ret);
+ ir_node *first_sp = get_irn_n(ret, n_be_Return_sp);
+ ir_node *curr_sp = first_sp;
+ ir_mode *mode_gp = mode_Iu;
+
+ if (!layout->sp_relative) {
+ int n_ebp = determine_ebp_input(ret);
+ ir_node *curr_bp = get_irn_n(ret, n_ebp);
+ if (ia32_cg_config.use_leave) {
+ ir_node *leave = new_bd_ia32_Leave(NULL, block, curr_bp);
+ curr_bp = new_r_Proj(leave, mode_gp, pn_ia32_Leave_frame);
+ curr_sp = new_r_Proj(leave, mode_gp, pn_ia32_Leave_stack);
+ arch_set_irn_register(curr_bp, bp);
+ arch_set_irn_register(curr_sp, sp);
+ sched_add_before(ret, leave);
+ } else {
+ ir_node *pop;
+ ir_node *curr_mem = get_irn_n(ret, n_be_Return_mem);
+ /* copy ebp to esp */
+ curr_sp = new_bd_ia32_CopyEbpEsp(NULL, block, curr_bp);
+ arch_set_irn_register(curr_sp, sp);
+ sched_add_before(ret, curr_sp);
+
+ /* pop ebp */
+ pop = new_bd_ia32_PopEbp(NULL, block, curr_mem, curr_sp);
+ curr_bp = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_res);
+ curr_sp = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_stack);
+ curr_mem = new_r_Proj(pop, mode_M, pn_ia32_Pop_M);
+ arch_set_irn_register(curr_bp, bp);
+ arch_set_irn_register(curr_sp, sp);
+ sched_add_before(ret, pop);
+
+ set_irn_n(ret, n_be_Return_mem, curr_mem);
+ }
+ set_irn_n(ret, n_ebp, curr_bp);
+ } else {
+ ir_node *incsp = be_new_IncSP(sp, block, curr_sp, -(int)frame_size, 0);
+ sched_add_before(ret, incsp);
+ curr_sp = incsp;
+ }
+ set_irn_n(ret, n_be_Return_sp, curr_sp);
+
+ /* keep verifier happy... */
+ if (get_irn_n_edges(first_sp) == 0 && is_Proj(first_sp)) {
+ kill_node(first_sp);
+ }
+}
+
+/**
+ * put the Prolog code at the beginning, epilog code before each return
+ */
+static void introduce_prolog_epilog(ir_graph *irg)
+{
+ const arch_register_t *sp = &ia32_registers[REG_ESP];
+ const arch_register_t *bp = &ia32_registers[REG_EBP];
+ ir_node *start = get_irg_start(irg);
+ ir_node *block = get_nodes_block(start);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ unsigned frame_size = get_type_size_bytes(frame_type);
+ be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
+ ir_node *initial_sp = be_abi_get_ignore_irn(irg, sp);
+ ir_node *curr_sp = initial_sp;
+ ir_mode *mode_gp = mode_Iu;
+
+ if (!layout->sp_relative) {
+ layout->initial_bias = -4;
+
+ /* push ebp */
+ ir_node *mem = get_irg_initial_mem(irg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
+ ir_node *initial_bp = be_abi_get_ignore_irn(irg, bp);
+ ir_node *curr_bp = initial_bp;
+ ir_node *push
+ = new_bd_ia32_Push(NULL, block, noreg, noreg, mem, curr_bp, curr_sp);
+ curr_sp = new_r_Proj(push, mode_gp, pn_ia32_Push_stack);
+ mem = new_r_Proj(push, mode_M, pn_ia32_Push_M);
+ arch_set_irn_register(curr_sp, sp);
+ sched_add_after(start, push);
+
+ /* move esp to ebp */
+ curr_bp = be_new_Copy(bp->reg_class, block, curr_sp);
+ sched_add_after(push, curr_bp);
+ be_set_constr_single_reg_out(curr_bp, 0, bp, arch_register_req_type_ignore);
+ curr_sp = be_new_CopyKeep_single(sp->reg_class, block, curr_sp, curr_bp, mode_gp);
+ sched_add_after(curr_bp, curr_sp);
+ be_set_constr_single_reg_out(curr_sp, 0, sp, arch_register_req_type_produces_sp);
+ edges_reroute(initial_bp, curr_bp);
+ set_irn_n(push, n_ia32_Push_val, initial_bp);
+
+ ir_node *incsp = be_new_IncSP(sp, block, curr_sp, frame_size, 0);
+ edges_reroute(initial_sp, incsp);
+ set_irn_n(push, n_ia32_Push_stack, initial_sp);
+ sched_add_after(curr_sp, incsp);
+ } else {
+ ir_node *incsp = be_new_IncSP(sp, block, curr_sp, frame_size, 0);
+ edges_reroute(initial_sp, incsp);
+ be_set_IncSP_pred(incsp, curr_sp);
+ sched_add_after(start, incsp);
+ }
+
+ /* introduce epilog for every return node */
+ {
+ ir_node *end_block = get_irg_end_block(irg);
+ int arity = get_irn_arity(end_block);
+ int i;
+
+ for (i = 0; i < arity; ++i) {
+ ir_node *ret = get_irn_n(end_block, i);
+ assert(be_is_Return(ret));
+ introduce_epilog(ret);
+ }
+ }
+}
+
/**
* We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, NULL);
+
+ introduce_prolog_epilog(irg);
}
/**
/**
* Return the stack entity that contains the return address.
*/
-ir_entity *ia32_get_return_address_entity(void);
+ir_entity *ia32_get_return_address_entity(ir_graph *irg);
/**
* Return the stack entity that contains the frame address.
*/
-ir_entity *ia32_get_frame_address_entity(void);
+ir_entity *ia32_get_frame_address_entity(ir_graph *irg);
#endif
ir_node *flags_proj;
ir_mode *flags_mode;
ir_node *schedpoint;
+ ir_node *op = left;
const ir_edge_t *edge;
if (get_nodes_block(left) != block)
return;
- if (is_Proj(left)) {
- pn = get_Proj_proj(left);
- left = get_Proj_pred(left);
+ if (is_Proj(op)) {
+ pn = get_Proj_proj(op);
+ op = get_Proj_pred(op);
}
/* walk schedule up and abort when we find left or some other node
schedpoint = node;
for (;;) {
schedpoint = sched_prev(schedpoint);
- if (schedpoint == left)
+ if (schedpoint == op)
break;
if (arch_irn_is(schedpoint, modify_flags))
return;
}
}
- switch (produces_test_flag(left, pn)) {
+ switch (produces_test_flag(op, pn)) {
case produces_flag_zero:
break;
return;
}
- if (get_irn_mode(left) != mode_T) {
- set_irn_mode(left, mode_T);
+ if (get_irn_mode(op) != mode_T) {
+ set_irn_mode(op, mode_T);
/* If there are other users, reroute them to result proj */
- if (get_irn_n_edges(left) != 2) {
- ir_node *res = new_r_Proj(left, mode_Iu, pn_ia32_res);
+ if (get_irn_n_edges(op) != 2) {
+ ir_node *res = new_r_Proj(op, mode_Iu, pn_ia32_res);
- edges_reroute(left, res);
+ edges_reroute(op, res);
/* Reattach the result proj to left */
- set_Proj_pred(res, left);
+ set_Proj_pred(res, op);
}
+ } else {
+ if (get_irn_n_edges(left) == 2)
+ kill_node(left);
}
flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
- flags_proj = new_r_Proj(left, flags_mode, pn_ia32_flags);
+ flags_proj = new_r_Proj(op, flags_mode, pn_ia32_flags);
arch_set_irn_register(flags_proj, &ia32_registers[REG_EFLAGS]);
assert(get_irn_mode(node) != mode_T);
units => [ "GP" ],
},
+CopyEbpEsp => {
+ state => "exc_pinned",
+ reg_req => { in => [ "ebp" ], out => [ "esp:I|S" ] },
+ ins => [ "ebp" ],
+ outs => [ "esp" ],
+ emit => '. movl %S0, %D0',
+ latency => 1,
+ units => [ "GP" ],
+ mode => $mode_gp,
+},
+
PopMem => {
state => "exc_pinned",
reg_req => { in => [ "gp", "gp", "none", "esp" ], out => [ "none", "none", "none", "esp:I|S" ] },
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_tarval *tv = get_Const_tarval(param);
+ ir_graph *irg = get_irn_irg(node);
unsigned long value = get_tarval_long(tv);
ir_node *block = be_transform_node(get_nodes_block(node));
set_ia32_am_offs_int(load, 0);
set_ia32_use_frame(load);
- set_ia32_frame_ent(load, ia32_get_return_address_entity());
+ set_ia32_frame_ent(load, ia32_get_return_address_entity(irg));
if (get_irn_pinned(node) == op_pin_state_floats) {
assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_tarval *tv = get_Const_tarval(param);
+ ir_graph *irg = get_irn_irg(node);
unsigned long value = get_tarval_long(tv);
ir_node *block = be_transform_node(get_nodes_block(node));
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, mode_Iu);
- ent = ia32_get_frame_address_entity();
+ ent = ia32_get_frame_address_entity(irg);
if (ent != NULL) {
set_ia32_am_offs_int(load, 0);
set_ia32_use_frame(load);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *mem;
ir_node *start;
- ir_node *sp;
+ir_node *sp;
size_t i;
/* stackpointer is important at function prolog */
}
start = be_prolog_create_start(abihelper, dbgi, new_block);
+#if 0
mem = be_prolog_get_memory(abihelper);
sp = be_prolog_get_reg_value(abihelper, sp_reg);
arch_irn_add_flags(sp, arch_irn_flags_prolog);
be_prolog_set_reg_value(abihelper, sp_reg, sp);
be_prolog_set_memory(abihelper, mem);
+#endif
return start;
}
}
}
+#if 0
/* we need a restore instruction */
if (!cconv->omit_fp) {
ir_node *fp = be_prolog_get_reg_value(abihelper, fp_reg);
arch_irn_add_flags(sp, arch_irn_flags_epilog);
be_epilog_set_reg_value(abihelper, sp_reg, sp);
}
+#endif
bereturn = be_epilog_create_return(abihelper, dbgi, new_block);
- arch_irn_add_flags(bereturn, arch_irn_flags_epilog);
-
return bereturn;
}
int first;
(void) env;
- first = -1;
+ first = is_Block(irn) ? 0 : -1;
for (i = get_irn_arity(irn) - 1; i >= first; --i) {
ir_node *op = get_irn_n(irn, i);
bitset_t *bs = (bitset_t*)get_irn_link(op);