* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
-static void TEMPLATE_set_stack_bias(const void *self, ir_node *irn, int bias) {
+static void TEMPLATE_set_frame_offset(const void *self, ir_node *irn, int offset) {
/* TODO: correct offset if irn accesses the stack */
}
+static int TEMPLATE_get_sp_bias(const void *self, const ir_node *irn) {
+ return 0;
+}
+
/* fill register allocator interface */
static const arch_irn_ops_if_t TEMPLATE_irn_ops_if = {
TEMPLATE_get_flags,
TEMPLATE_get_frame_entity,
TEMPLATE_set_frame_entity,
- TEMPLATE_set_stack_bias
+ TEMPLATE_set_frame_offset,
+ TEMPLATE_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
entity *ent = be_get_frame_entity(n);
offset = get_entity_offset_bytes(ent);
} else if (irn_op == op_be_IncSP) {
- int offs = be_get_IncSP_offset(n);
- be_stack_dir_t dir = be_get_IncSP_direction(n);
- offset = (dir == be_stack_dir_expand) ? -offs : offs;
+ offset = - be_get_IncSP_offset(n);
} else {
return "node_offset_to_str will fuer diesen Knotentyp noch implementiert werden";
}
/** Emit an IncSP node */
static void emit_be_IncSP(const ir_node *irn, arm_emit_env_t *emit_env) {
FILE *F = emit_env->out;
- unsigned offs = be_get_IncSP_offset(irn);
- if (offs) {
+ int offs = be_get_IncSP_offset(irn);
+
+ if (offs != 0) {
char cmd_buf[SNPRINTF_BUF_LEN], cmnt_buf[SNPRINTF_BUF_LEN];
lc_esnprintf(arm_get_arg_env(), cmd_buf, SNPRINTF_BUF_LEN, "add %1D, %1S, #%O", irn, irn, irn );
lc_esnprintf(arm_get_arg_env(), cmnt_buf, SNPRINTF_BUF_LEN, "/* IncSP(%O) */", irn);
* access must be done relative the the fist IncSP ...
*/
static int get_sp_expand_offset(ir_node *inc_sp) {
- unsigned offset = be_get_IncSP_offset(inc_sp);
- be_stack_dir_t dir = be_get_IncSP_direction(inc_sp);
+ int offset = be_get_IncSP_offset(inc_sp);
- if (offset == BE_STACK_FRAME_SIZE)
+ if (offset == BE_STACK_FRAME_SIZE_EXPAND)
return 0;
- return dir == be_stack_dir_expand ? (int)offset : -(int)offset;
+
+ return offset;
}
static ir_node *gen_StackParam(ir_node *irn, arm_code_gen_t *cg) {
return NULL;
}
-static void arm_set_frame_entity(const void *self, const ir_node *irn, entity *ent) {
+static void arm_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
/* TODO: set the entity assigned to the frame */
}
/* TODO: correct offset if irn accesses the stack */
}
+static int arm_get_sp_bias(const void *self, const ir_node *irn) {
+ return 0;
+}
+
/* fill register allocator interface */
static const arch_irn_ops_if_t arm_irn_ops_if = {
arm_get_frame_entity,
arm_set_frame_entity,
arm_set_stack_bias,
+ arm_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
// TODO: Activate Omit fp in epilogue
if(env->flags.try_omit_fp) {
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE_SHRINK);
curr_lr = be_new_CopyKeep_single(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr, curr_sp, get_irn_mode(curr_lr));
be_node_set_reg_class(curr_lr, 1, &arm_reg_classes[CLASS_arm_gp]);
* moving the stack pointer along the stack's direction.
*/
if(stack_dir < 0 && !do_seq && !no_alloc) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size);
}
assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
if (do_seq) {
curr_ofs = 0;
addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
- param_size + arg->space_before, be_stack_dir_expand);
+ param_size + arg->space_before);
}
else {
curr_ofs += arg->space_before;
/* Clean up the stack frame if we allocated it */
if(!no_alloc)
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, -stack_size);
}
be_abi_call_free(call);
int stack_nr = get_Proj_proj(stack);
if(flags.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
frame = stack;
}
arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
}
- stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
}
be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
pmap_entry *ent;
if(env->call->flags.bits.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
+ stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
}
else {
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
barrier = create_barrier(env, bl, &mem, env->regs, 0);
static void collect_stack_nodes_walker(ir_node *irn, void *data)
{
struct fix_stack_walker_info *info = data;
- ir_mode *mode;
if (is_Block(irn))
return;
- mode = get_irn_mode(irn);
-
- if (arch_irn_is(info->aenv, irn, modify_sp) && mode != mode_T && mode != mode_M)
+ if (arch_irn_is(info->aenv, irn, modify_sp)) {
+ assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T);
pset_insert_ptr(info->nodes, irn);
+ }
}
void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
be_free_dominance_frontiers(df);
}
-/**
- * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
- * into -1 or 1, respectively.
- * @param irn The node.
- * @return 1, if the direction of the IncSP was along, -1 if against.
- */
-static int get_dir(ir_node *irn)
-{
- return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
-}
-
static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
{
- const arch_env_t *aenv = env->birg->main_env->arch_env;
+ const arch_env_t *arch_env = env->birg->main_env->arch_env;
int omit_fp = env->call->flags.bits.try_omit_fp;
ir_node *irn;
sched_foreach(bl, irn) {
/*
- If the node modifies the stack pointer by a constant offset,
- record that in the bias.
- */
- if(be_is_IncSP(irn)) {
- int ofs = be_get_IncSP_offset(irn);
- int dir = get_dir(irn);
-
- if(ofs == BE_STACK_FRAME_SIZE) {
- ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
- be_set_IncSP_offset(irn, ofs);
- }
-
- if(omit_fp)
- bias += dir * ofs;
+ Check, if the node relates to an entity on the stack frame.
+ If so, set the true offset (including the bias) for that
+ node.
+ */
+ entity *ent = arch_get_frame_entity(arch_env, irn);
+ if(ent) {
+ int offset = get_stack_entity_offset(env->frame, ent, bias);
+ arch_set_frame_offset(arch_env, irn, offset);
+ DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
}
/*
- Else check, if the node relates to an entity on the stack frame.
- If so, set the true offset (including the bias) for that
- node.
- */
- else {
- entity *ent = arch_get_frame_entity(aenv, irn);
- if(ent) {
- int offset = get_stack_entity_offset(env->frame, ent, bias);
- arch_set_frame_offset(aenv, irn, offset);
- DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
+ If the node modifies the stack pointer by a constant offset,
+ record that in the bias.
+ */
+ if(arch_irn_is(arch_env, irn, modify_sp)) {
+ int ofs = arch_get_sp_bias(arch_env, irn);
+
+ if(be_is_IncSP(irn)) {
+ if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
+ ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+ be_set_IncSP_offset(irn, ofs);
+ } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
+ ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+ be_set_IncSP_offset(irn, ofs);
+ }
}
+
+ if(omit_fp)
+ bias += ofs;
}
}
{
}
-static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
+static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias)
{
}
+static int abi_get_sp_bias(const void *self, const ir_node *irn)
+{
+ return 0;
+}
+
static const arch_irn_ops_if_t abi_irn_ops = {
abi_get_irn_reg_req,
abi_set_irn_reg,
abi_get_flags,
abi_get_frame_entity,
abi_set_frame_entity,
- abi_set_stack_bias,
+ abi_set_frame_offset,
+ abi_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
ops->impl->set_frame_entity(ops, irn, ent);
}
+int arch_get_sp_bias(const arch_env_t *env, ir_node *irn)
+{
+ const arch_irn_ops_t *ops = get_irn_ops(env, irn);
+ return ops->impl->get_sp_bias(ops, irn);
+}
+
arch_inverse_t *arch_get_inverse(const arch_env_t *env, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obstack)
{
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
*/
void (*set_frame_offset)(const void *self, ir_node *irn, int offset);
+ /**
+ * Returns the delta of the stackpointer for nodes that increment or
+ * decrement the stackpointer with a constant value. (push, pop
+ * nodes on most architectures).
+ * A positive value stands for an expanding stack area, a negative value for
+ * a shrinking one.
+ *
+ * @param self The this pointer
+ * @param irn The node
+ * @return 0 if the stackpointer is not modified with a constant
+ * value, otherwise the increment/decrement value
+ */
+ int (*get_sp_bias)(const void *self, const ir_node *irn);
+
/**
* Returns an inverse operation which yields the i-th argument
* of the given node as result.
extern entity *arch_get_frame_entity(const arch_env_t *env, ir_node *irn);
extern void arch_set_frame_entity(const arch_env_t *env, ir_node *irn, entity *ent);
+extern int arch_get_sp_bias(const arch_env_t *env, ir_node *irn);
extern int arch_get_op_estimated_cost(const arch_env_t *env, const ir_node *irn);
extern arch_inverse_t *arch_get_inverse(const arch_env_t *env, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obstack);
if(pset_find_ptr(phi_blocks, curr_bl)) {
ir_node *phi = get_irn_link(curr_bl);
- if(!phi) {
+ if(phi == NULL) {
int i, n_preds = get_irn_arity(curr_bl);
ir_graph *irg = get_irn_irg(curr_bl);
- ir_node **ins = xmalloc(n_preds * sizeof(ins[0]));
+ ir_node **ins = alloca(n_preds * sizeof(ins[0]));
for(i = 0; i < n_preds; ++i)
ins[i] = new_r_Bad(irg);
set_irn_link(curr_bl, phi);
sched_add_after(curr_bl, phi);
- free(ins);
for(i = 0; i < n_preds; ++i) {
ir_node *arg = search_def(phi, i, copies, copy_blocks, phis, phi_blocks, mode);
set_irn_n(phi, i, arg);
}
- if(phis)
+ if(phis != NULL)
pset_insert_ptr(phis, phi);
}
obstack_free(&obst, NULL);
}
+#if 0
/**
* Remove phis which are not necessary.
* During place_phi_functions() phi functions are put on the dominance
*/
static void remove_odd_phis(pset *copies, pset *unused_copies)
{
- ir_node *irn;
+ ir_node *irn;
- for(irn = pset_first(copies); irn; irn = pset_next(copies)) {
- if(is_Phi(irn)) {
- int i, n;
- int illegal = 0;
+ for(irn = pset_first(copies); irn; irn = pset_next(copies)) {
+ if(is_Phi(irn)) {
+ int i, n;
+ int illegal = 0;
- assert(sched_is_scheduled(irn) && "phi must be scheduled");
- for(i = 0, n = get_irn_arity(irn); i < n && !illegal; ++i)
- illegal = get_irn_n(irn, i) == NULL;
+ assert(sched_is_scheduled(irn) && "phi must be scheduled");
+ for(i = 0, n = get_irn_arity(irn); i < n && !illegal; ++i)
+ illegal = get_irn_n(irn, i) == NULL;
- if(illegal)
- sched_remove(irn);
- }
- }
+ if(illegal)
+ sched_remove(irn);
+ }
+ }
- for(irn = pset_first(unused_copies); irn; irn = pset_next(unused_copies)) {
+ for(irn = pset_first(unused_copies); irn; irn = pset_next(unused_copies)) {
sched_remove(irn);
}
}
+#endif
void be_ssa_constr_phis_ignore(dom_front_info_t *info, be_lv_t *lv, int n, ir_node *nodes[], pset *phis, pset *ignore_uses)
{
/* fix stack offsets */
BE_TIMER_PUSH(t_abi);
- be_abi_fix_stack_bias(birg.abi);
+ //be_abi_fix_stack_bias(birg.abi);
BE_TIMER_POP(t_abi);
BE_TIMER_PUSH(t_finish);
arch_code_generator_finish(birg.cg);
BE_TIMER_POP(t_finish);
+ /* fix stack offsets */
+ BE_TIMER_PUSH(t_abi);
+ be_abi_fix_stack_nodes(birg.abi, NULL);
+ be_remove_dead_nodes_from_schedule(birg.irg);
+ be_abi_fix_stack_bias(birg.abi);
+ BE_TIMER_POP(t_abi);
+
dump(DUMP_FINAL, irg, "-finish", dump_ir_block_graph_sched);
/* check schedule and register allocation */
/** The be_Stack attribute type. */
typedef struct {
be_node_attr_t node_attr;
- int offset; /**< The offset by which the stack shall be increased/decreased. */
- be_stack_dir_t dir; /**< The direction in which the stack shall be modified (expand or shrink). */
+ int offset; /**< The offset by which the stack shall be expanded/shrinked. */
} be_stack_attr_t;
/** The be_Frame attribute type. */
return a->num_ret_vals;
}
-ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, unsigned offset, be_stack_dir_t dir)
+ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset)
{
be_stack_attr_t *a;
ir_node *irn;
in[1] = mem;
irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, 2, in);
a = init_node_attr(irn, 1);
- a->dir = dir;
a->offset = offset;
be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
return get_irn_n(irn, 1);
}
-void be_set_IncSP_offset(ir_node *irn, unsigned offset)
+void be_set_IncSP_offset(ir_node *irn, int offset)
{
be_stack_attr_t *a = get_irn_attr(irn);
assert(be_is_IncSP(irn));
a->offset = offset;
}
-unsigned be_get_IncSP_offset(const ir_node *irn)
+int be_get_IncSP_offset(const ir_node *irn)
{
be_stack_attr_t *a = get_irn_attr(irn);
assert(be_is_IncSP(irn));
return a->offset;
}
-void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir)
-{
- be_stack_attr_t *a = get_irn_attr(irn);
- assert(be_is_IncSP(irn));
- a->dir = dir;
-}
-
-be_stack_dir_t be_get_IncSP_direction(const ir_node *irn)
-{
- be_stack_attr_t *a = get_irn_attr(irn);
- assert(be_is_IncSP(irn));
- return a->dir;
-}
-
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
{
ir_node *bl = get_nodes_block(irn);
}
}
+static int be_node_get_sp_bias(const void *self, const ir_node *irn)
+{
+ int result = 0;
+
+ if(be_is_IncSP(irn)) {
+ result = be_get_IncSP_offset(irn);
+ }
+
+ return result;
+}
+
/*
___ ____ _ _ _ _ _ _
|_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
be_node_get_frame_entity,
be_node_set_frame_entity,
be_node_set_frame_offset,
+ be_node_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
{
}
+static int phi_get_sp_bias(const void* self, const ir_node *irn)
+{
+ return 0;
+}
+
static const arch_irn_ops_if_t phi_irn_ops = {
phi_get_irn_reg_req,
phi_set_irn_reg,
phi_get_frame_entity,
phi_set_frame_entity,
phi_set_frame_offset,
+ phi_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
case beo_IncSP:
{
be_stack_attr_t *a = (be_stack_attr_t *) at;
- if (a->offset == BE_STACK_FRAME_SIZE)
+ if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
fprintf(f, "offset: FRAME_SIZE\n");
+ else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
+ fprintf(f, "offset: -FRAME SIZE\n");
else
fprintf(f, "offset: %u\n", a->offset);
- fprintf(f, "direction: %s\n", a->dir == be_stack_dir_expand ? "expand" : "shrink");
}
break;
case beo_Call:
#include "firm_config.h"
+#include <limits.h>
+
#include "irmode.h"
#include "irnode.h"
#include "entity_t.h"
beo_Last
} be_opcode_t;
-/** Expresses the direction of the stack pointer increment of IncSP nodes. */
-typedef enum {
- be_stack_dir_expand = 0,
- be_stack_dir_shrink = 1
-} be_stack_dir_t;
-
/** Not used yet. */
typedef enum {
be_frame_flag_spill = 1,
* A "symbolic constant" for the size of the stack frame to use with IncSP nodes.
* It gets back-patched to the real size as soon it is known.
*/
-#define BE_STACK_FRAME_SIZE ((unsigned) -1)
+#define BE_STACK_FRAME_SIZE_EXPAND INT_MAX
+#define BE_STACK_FRAME_SIZE_SHRINK INT_MIN
/**
* Determines if irn is a be_node.
* @param irg The graph to insert the node to.
* @param bl The block to insert the node into.
* @param old_sp The node defining the former stack pointer.
- * @param amount The mount of bytes the stack pointer shall be increased/decreased.
+ * @param amount The mount of bytes the stack shall be expanded/shrinked (see set_IncSP_offset)
* @param dir The direction in which the stack pointer shall be modified:
* Along the stack's growing direction or against.
* @return A new stack pointer increment/decrement node.
* @note This node sets a register constraint to the @p sp register on its output.
*/
-ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, unsigned amount, be_stack_dir_t dir);
+ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset);
/** Returns the previous node that computes the stack pointer. */
ir_node *be_get_IncSP_pred(ir_node *incsp);
/** Returns the memory input of the IncSP. */
ir_node *be_get_IncSP_mem(ir_node *irn);
-/** Sets a new offset to a IncSP node. */
-void be_set_IncSP_offset(ir_node *irn, unsigned offset);
+/**
+ * Sets a new offset to a IncSP node.
+ * A positive offset means expanding the stack, a negative offset shrinking
+ * an offset is == BE_STACK_FRAME_SIZE will be replaced by the real size of the
+ * stackframe in the fix_stack_offsets phase.
+ */
+void be_set_IncSP_offset(ir_node *irn, int offset);
/** Gets the offset from a IncSP node. */
-unsigned be_get_IncSP_offset(const ir_node *irn);
-
-/** Sets a new direction to a IncSP node. */
-void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir);
-
-/** Gets the direction from a IncSP node. */
-be_stack_dir_t be_get_IncSP_direction(const ir_node *irn);
+int be_get_IncSP_offset(const ir_node *irn);
/** Gets the call entity or NULL if this is no static call. */
entity *be_Call_get_entity(const ir_node *call);
rel->reloader = before;
rel->next = info->reloaders;
info->reloaders = rel;
- be_liveness_add_missing(env->chordal_env->lv);
}
void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
// reloads are placed now, but we might reuse the spill environment for further spilling decisions
del_set(env->spills);
env->spills = new_set(cmp_spillinfo, 1024);
+
+ be_remove_dead_nodes_from_schedule(env->chordal_env->irg);
+ //be_liveness_add_missing(env->chordal_env->lv);
+ be_liveness_recompute(env->chordal_env->lv);
}
/* Insert spill/reload nodes into the graph and fix usages */
be_insert_spills_reloads(env.senv);
- be_remove_dead_nodes_from_schedule(chordal_env->irg);
- be_liveness_recompute(chordal_env->lv);
-
/* clean up */
if(spill_env == NULL)
be_delete_spill_env(env.senv);
del_set(env.block_attr_set);
/* fix the remaining places with too high register pressure with beladies algorithm */
-
- /* we have to remove dead nodes from schedule to not confuse liveness calculation */
- be_remove_dead_nodes_from_schedule(env.irg);
- be_liveness_recompute(chordal_env->lv);
-
be_spill_belady_spill_env(chordal_env, env.senv);
be_delete_spill_env(env.senv);
}
struct dump_env {
- FILE *f;
- arch_env_t *env;
+ FILE *f;
+ arch_env_t *env;
};
static void dump_allocated_block(ir_node *block, void *data)
void dump_allocated_irg(arch_env_t *arch_env, ir_graph *irg, char *suffix)
{
char buf[1024];
- struct dump_env env;
+ struct dump_env env;
- env.env = arch_env;
+ env.env = arch_env;
ir_snprintf(buf, sizeof(buf), "%F-alloc%s.vcg", irg, suffix);
int cfchange_found = 0;
// TODO ask arch about delay branches
int delay_branches = 0;
- pset *uses = pset_new_ptr_default();
/*
* Tests for the following things:
}
// 3. Check for uses
- if(pset_find_ptr(uses, node)) {
- ir_fprintf(stderr, "Verify Warning: Value %+F used before it was defined in block %+F (%s)\n",
- node, block, get_irg_dump_name(env->irg));
- env->problem_found = 1;
- }
if(!is_Phi(node)) {
+ int nodetime = sched_get_time_step(node);
for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
- pset_insert_ptr(uses, get_irn_n(node, i));
+ ir_node *arg = get_irn_n(node, i);
+ if(get_nodes_block(arg) != block
+ || !sched_is_scheduled(arg))
+ continue;
+
+ if(sched_get_time_step(arg) >= nodetime) {
+ ir_fprintf(stderr, "Verify Warning: Value %+F used by %+F before it was defined in block %+F (%s)\n",
+ arg, node, block, get_irg_dump_name(env->irg));
+ env->problem_found = 1;
+ }
}
}
+
+ // 4. check for dead nodes
+ if(get_irn_n_edges(node) == 0) {
+ ir_fprintf(stderr, "Verify warning: Node %+F is dead but scheduled in block %+F (%s)\n",
+ node, block, get_irg_dump_name(env->irg));
+ env->problem_found = 1;
+ }
}
- del_pset(uses);
/* check that all delay branches are filled (at least with NOPs) */
if (cfchange_found && delay_branches != 0) {
}
static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
+
+ if(is_Proj(irn)) {
+ ir_node *pred = get_Proj_pred(irn);
+ if(is_ia32_Push(pred) && get_Proj_proj(irn) == 0) {
+ return arch_irn_flags_modify_sp;
+ }
+ if(is_ia32_Pop(pred) && get_Proj_proj(irn) == 1) {
+ return arch_irn_flags_modify_sp;
+ }
+ }
+
irn = my_skip_proj(irn);
if (is_ia32_irn(irn))
return get_ia32_flags(irn);
set_ia32_frame_ent(irn, ent);
}
-static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
+static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
char buf[64];
const ia32_irn_ops_t *ops = self;
if (get_ia32_frame_ent(irn)) {
ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
+ /* Pop nodes modify the stack pointer before reading the destination
+ * address, so fix this here
+ */
+ if(is_ia32_Pop(irn)) {
+ bias -= 4;
+ }
+
DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
+
snprintf(buf, sizeof(buf), "%d", bias);
if (get_ia32_op_type(irn) == ia32_Normal) {
}
}
+static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
+ if(is_Proj(irn)) {
+ int proj = get_Proj_proj(irn);
+ ir_node *pred = get_Proj_pred(irn);
+
+ if(is_ia32_Push(pred) && proj == 0)
+ return 4;
+ else if(is_ia32_Pop(pred) && proj == 1)
+ return -4;
+ }
+
+ return 0;
+}
+
typedef struct {
be_abi_call_flags_bits_t flags;
const arch_isa_t *isa;
if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE_SHRINK);
}
else {
const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
ia32_get_flags,
ia32_get_frame_entity,
ia32_set_frame_entity,
- ia32_set_stack_bias,
+ ia32_set_frame_offset,
+ ia32_get_sp_bias,
ia32_get_inverse,
ia32_get_op_estimated_cost,
ia32_possible_memory_operand,
return pop;
}
-static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, ir_node *schedpoint, const ir_node *oldsp) {
+static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint, const ir_node *oldsp) {
ir_mode *spmode = get_irn_mode(oldsp);
const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp);
ir_node *sp;
- sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, 0);
+ sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos);
arch_set_irn_register(env->cg->arch_env, sp, spreg);
sched_add_before(schedpoint, sp);
assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
push = create_push(env, node, sp, mem, ent, NULL);
- sp = create_spproj(env, push, node, sp);
+ sp = create_spproj(env, push, 0, node, sp);
if(entbits == 64) {
// add another push after the first one
push = create_push(env, node, sp, mem, ent, "4");
- sp = create_spproj(env, push, node, sp);
+ sp = create_spproj(env, push, 0, node, sp);
}
set_irn_n(node, i, new_Bad());
pop = create_pop(env, node, sp, ent, NULL);
if(entbits == 64) {
// add another pop after the first one
- sp = create_spproj(env, pop, node, sp);
+ sp = create_spproj(env, pop, 1, node, sp);
pop = create_pop(env, node, sp, ent, "4");
}
- if(i != 0) {
- sp = create_spproj(env, pop, node, sp);
- }
+ //if(i != 0) {
+ sp = create_spproj(env, pop, 1, node, sp);
+ //}
pops[i] = pop;
}
*/
static void emit_be_IncSP(const ir_node *irn, ia32_emit_env_t *emit_env) {
FILE *F = emit_env->out;
- unsigned offs = be_get_IncSP_offset(irn);
- be_stack_dir_t dir = be_get_IncSP_direction(irn);
+ int offs = be_get_IncSP_offset(irn);
char cmd_buf[SNPRINTF_BUF_LEN], cmnt_buf[SNPRINTF_BUF_LEN];
if (offs) {
- if (dir == be_stack_dir_expand)
+ if (offs > 0)
lc_esnprintf(ia32_get_arg_env(), cmd_buf, SNPRINTF_BUF_LEN, "sub %1S, %u", irn, offs);
else
- lc_esnprintf(ia32_get_arg_env(), cmd_buf, SNPRINTF_BUF_LEN, "add %1S, %u", irn, offs);
+ lc_esnprintf(ia32_get_arg_env(), cmd_buf, SNPRINTF_BUF_LEN, "add %1S, %u", irn, -offs);
lc_esnprintf(ia32_get_arg_env(), cmnt_buf, SNPRINTF_BUF_LEN, "/* %+F (IncSP) */", irn);
}
else {
return;
/* do not create push if IncSp doesn't expand stack or expand size is different from register size */
- if (be_get_IncSP_direction(sp) != be_stack_dir_expand ||
- be_get_IncSP_offset(sp) != (unsigned) get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode))
+ if (be_get_IncSP_offset(sp) != get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode))
return;
/* do not create push, if there is a path (inside the block) from the push value to IncSP */
if (be_is_IncSP(prev) && real_uses == 1) {
/* first IncSP has only one IncSP user, kill the first one */
- unsigned prev_offs = be_get_IncSP_offset(prev);
- be_stack_dir_t prev_dir = be_get_IncSP_direction(prev);
- unsigned curr_offs = be_get_IncSP_offset(irn);
- be_stack_dir_t curr_dir = be_get_IncSP_direction(irn);
+ int prev_offs = be_get_IncSP_offset(prev);
+ int curr_offs = be_get_IncSP_offset(irn);
- int new_ofs = prev_offs * (prev_dir == be_stack_dir_expand ? -1 : +1) +
- curr_offs * (curr_dir == be_stack_dir_expand ? -1 : +1);
-
- if (new_ofs < 0) {
- new_ofs = -new_ofs;
- curr_dir = be_stack_dir_expand;
- }
- else
- curr_dir = be_stack_dir_shrink;
- be_set_IncSP_offset(prev, 0);
- be_set_IncSP_offset(irn, (unsigned)new_ofs);
- be_set_IncSP_direction(irn, curr_dir);
+ be_set_IncSP_offset(prev, prev_offs + curr_offs);
/* Omit the optimized IncSP */
be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
},
"Push" => {
+ # We don't set class modify_stack here (but we will do this on proj 0)
"comment" => "push a gp register on the stack",
"reg_req" => { "in" => [ "esp", "gp", "none" ], "out" => [ "esp" ] },
"emit" => '
},
"Pop" => {
+ # We don't set class modify stack here (but we will do this on proj 1)
"comment" => "pop a gp register from the stack",
"reg_req" => { "in" => [ "esp", "none" ], "out" => [ "gp", "esp" ] },
"emit" => '
*
********************************************/
+#if 0
/**
* Decides in which block the transformed StackParam should be placed.
* If the StackParam has more than one user, the dominator block of
* the users will be returned. In case of only one user, this is either
* the user block or, in case of a Phi, the predecessor block of the Phi.
*/
- static ir_node *get_block_transformed_stack_param(ir_node *irn) {
- ir_node *dom_bl = NULL;
+static ir_node *get_block_transformed_stack_param(ir_node *irn) {
+ ir_node *dom_bl = NULL;
- if (get_irn_n_edges(irn) == 1) {
- ir_node *src = get_edge_src_irn(get_irn_out_edge_first(irn));
+ if (get_irn_n_edges(irn) == 1) {
+ ir_node *src = get_edge_src_irn(get_irn_out_edge_first(irn));
- if (! is_Phi(src)) {
- dom_bl = get_nodes_block(src);
- }
- else {
- /* Determine on which in position of the Phi the irn is */
- /* and get the corresponding cfg predecessor block. */
+ if (! is_Phi(src)) {
+ dom_bl = get_nodes_block(src);
+ }
+ else {
+ /* Determine on which in position of the Phi the irn is */
+ /* and get the corresponding cfg predecessor block. */
- int i = get_irn_pred_pos(src, irn);
- assert(i >= 0 && "kaputt");
- dom_bl = get_Block_cfgpred_block(get_nodes_block(src), i);
- }
- }
- else {
- dom_bl = node_users_smallest_common_dominator(irn, 1);
- }
+ int i = get_irn_pred_pos(src, irn);
+ assert(i >= 0 && "kaputt");
+ dom_bl = get_Block_cfgpred_block(get_nodes_block(src), i);
+ }
+ }
+ else {
+ dom_bl = node_users_smallest_common_dominator(irn, 1);
+ }
- assert(dom_bl && "dominator block not found");
+ assert(dom_bl && "dominator block not found");
- return dom_bl;
- }
+ return dom_bl;
+}
+#endif
static ir_node *gen_be_StackParam(ia32_transform_env_t *env) {
ir_node *new_op = NULL;
ir_mode *mode = env->mode;
/* choose the block where to place the load */
- env->block = get_block_transformed_stack_param(node);
+ //env->block = get_block_transformed_stack_param(node);
if (mode_is_float(mode)) {
FP_USED(env->cg);
attr->stack_entity_offset = offset;
}
+static int mips_get_sp_bias(const void *self, const ir_node *irn) {
+ return 0;
+}
+
/* fill register allocator interface */
static const arch_irn_ops_if_t mips_irn_ops_if = {
mips_get_frame_entity,
mips_set_frame_entity,
mips_set_frame_offset,
+ mips_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
int initial_frame_size = env->debug ? 24 : 4;
int fp_save_offset = env->debug ? 16 : 0;
- // restore sp
- //sp = be_new_IncSP(&mips_gp_regs[REG_SP], irg, block, sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
-
// copy fp to sp
sp = new_rd_mips_move(dbg, irg, block, fp, mode_Iu);
mips_set_irn_reg(NULL, sp, &mips_gp_regs[REG_SP]);
FILE *F = env->out;
int offset = be_get_IncSP_offset(node);
- if(offset == 0)
+ if(offset == 0) {
+ fprintf(F, "\t\t\t\t # omitted IncSP with 0\n");
return;
-
- if(be_get_IncSP_direction(node) != be_stack_dir_expand)
- offset = -offset;
+ }
fprintf(F, "\taddi $sp, $sp, %d\n", -offset);
}
set_ppc32_offset(irn, bias);
}
+static int ppc32_get_sp_bias(const void *self, const ir_node *irn) {
+ return 0;
+}
+
typedef struct
{
const be_abi_call_t *call;
ppc32_get_frame_entity,
ppc32_set_frame_entity,
ppc32_set_stack_bias,
+ ppc32_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
static void emit_be_IncSP(const ir_node *irn, ppc32_emit_env_t *emit_env) {
FILE *F = emit_env->out;
- unsigned offs = be_get_IncSP_offset(irn);
- be_stack_dir_t dir = be_get_IncSP_direction(irn);
+ int offs = be_get_IncSP_offset(irn);
- fprintf(F, "\t\t\t\t\t/* ignored IncSP with %c%i */\n", dir==be_stack_dir_expand ? '-' : ' ', offs);
+ fprintf(F, "\t\t\t\t\t/* ignored IncSP with %d */\n", -offs);
// if (offs) {
// assert(offs<=0x7fff);
-// lc_efprintf(ppc32_get_arg_env(), F, "\taddi %1S, %1S,%s%u\t\t\t/* %+F (IncSP) */\n", irn, irn,
-// (dir == be_stack_dir_expand) ? " -" : " ", offs, irn);
+// lc_efprintf(ppc32_get_arg_env(), F, "\taddi %1S, %1S, %d\t\t\t/* %+F (IncSP) */\n", irn, irn,
+// -offs, irn);
// }
// else {
// fprintf(F, "\t\t\t\t\t/* omitted IncSP with 0 */\n");