int pos;
const arch_register_t *reg;
+ entity *stack_ent;
} be_abi_call_arg_t;
struct _be_abi_call_t {
be_abi_call_flags_t flags;
- unsigned arg_gap;
+ type *between_type;
set *params;
};
+typedef struct _be_stack_frame_t {
+ type *arg_type;
+ type *between_type;
+ type *frame_type;
+
+ type *order[3]; /**< arg, between and frame types ordered. */
+
+ int initial_offset;
+ int stack_dir;
+} be_stack_frame_t;
+
+struct _be_stack_slot_t {
+ struct _be_stack_frame_t *frame;
+ entity *ent;
+};
+
struct _be_abi_irg_t {
struct obstack obst;
be_irg_t *birg;
unsigned omit_fp : 1;
unsigned dedicated_fp : 1;
unsigned left_to_right : 1;
+ unsigned save_old_fp : 1;
+
+ ir_node *store_bp_mem;
+ be_stack_frame_t *frame;
firm_dbg_module_t *dbg; /**< The debugging module. */
};
return get_or_set_call_arg(call, is_res, pos, 0);
}
-void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, unsigned arg_gap)
+void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, ir_type *between_type)
{
- call->flags = flags;
- call->arg_gap = arg_gap;
+ call->flags = flags;
+ call->between_type = between_type;
}
void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
{
be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
+ arg->in_reg = 1;
arg->reg = reg;
}
void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
{
be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
+ arg->in_reg = 1;
arg->reg = reg;
}
free(call);
}
+static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
+{
+ type *t = get_entity_type(ent);
+ int ofs = get_entity_offset_bytes(ent);
+
+ int i, index;
+
+ /* Find the type the entity is contained in. */
+ for(index = 0; index < 3; ++index) {
+ if(frame->order[index] == t)
+ break;
+ }
+
+ /* Add the size of all the types below the one of the entity to the entity's offset */
+ for(i = 0; i < index; ++i)
+ ofs += get_type_size_bytes(frame->order[i]);
+
+ /* correct the offset by the initial position of the frame pointer */
+ ofs -= frame->initial_offset;
+
+ /* correct the offset with the current bias. */
+ ofs += bias;
+
+ return ofs;
+}
+
+static int stack_frame_compute_initial_offset(be_stack_frame_t *frame, entity *ent)
+{
+ frame->initial_offset = 0;
+ frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
+ return frame->initial_offset;
+}
+
+static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
+{
+ frame->arg_type = args;
+ frame->between_type = between;
+ frame->frame_type = locals;
+ frame->initial_offset = 0;
+ frame->stack_dir = stack_dir;
+ frame->order[1] = between;
+
+ if(stack_dir > 0) {
+ frame->order[0] = args;
+ frame->order[2] = locals;
+ }
+
+ else {
+ frame->order[0] = locals;
+ frame->order[2] = args;
+ }
+
+ return frame;
+}
+
+static INLINE entity *get_sel_ent(ir_node *irn)
+{
+ if(get_irn_opcode(irn) == iro_Sel
+ && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
+
+ return get_Sel_entity(irn);
+ }
+
+ return NULL;
+}
+
+static void lower_frame_sels_walker(ir_node *irn, void *data)
+{
+ const arch_register_class_t *cls;
+ be_abi_irg_t *env = data;
+ const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+ ir_graph *irg = get_irn_irg(irn);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *nw = NULL;
+ opcode opc = get_irn_opcode(irn);
+
+ if(opc == iro_Load) {
+ ir_node *bl = get_nodes_block(irn);
+ ir_node *sel = get_Load_ptr(irn);
+ entity *ent = get_sel_ent(sel);
+ cls = arch_isa_get_reg_class_for_mode(isa, get_Load_mode(irn));
+ if(ent != NULL)
+ nw = be_new_FrameLoad(isa->sp->reg_class, cls, irg, bl, get_Load_mem(irn), frame, ent);
+ }
+
+ else if(opc == iro_Store) {
+ ir_node *bl = get_nodes_block(irn);
+ ir_node *val = get_Store_value(irn);
+ ir_node *sel = get_Store_ptr(irn);
+ entity *ent = get_sel_ent(sel);
+ cls = arch_isa_get_reg_class_for_mode(isa, get_irn_mode(val));
+ if(ent != NULL)
+ nw = be_new_FrameStore(isa->sp->reg_class, cls, irg, bl, get_Store_mem(irn), frame, val, ent);
+ }
+
+ else {
+ entity *ent = get_sel_ent(irn);
+ if(ent != NULL) {
+ ir_node *bl = get_nodes_block(irn);
+ nw = be_new_FrameAddr(isa->sp->reg_class, irg, bl, frame, ent);
+ }
+ }
+
+ if(nw != NULL)
+ exchange(irn, nw);
+}
+
static INLINE int is_on_stack(be_abi_call_t *call, int pos)
{
be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
/* Let the isa fill out the abi description for that call node. */
arch_isa_get_call_abi(isa, mt, call);
- assert(get_method_variadicity(mt) == variadicity_non_variadic);
+ // assert(get_method_variadicity(mt) == variadicity_non_variadic);
/* Insert code to put the stack arguments on the stack. */
/* TODO: Vargargs */
- for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
+ assert(get_Call_n_params(irn) == n_params);
+ for(i = 0; i < n_params; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
- if(arg && !arg->in_reg) {
+ assert(arg);
+ if(!arg->in_reg) {
stack_size += get_type_size_bytes(get_method_param_type(mt, i));
obstack_int_grow(obst, i);
n_pos++;
}
curr_res_proj++;
+ /* make the back end call node and set its register requirements. */
+ for(i = 0; i < n_low_args; ++i)
+ obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
+
+ in = obstack_finish(obst);
+ low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
+ obstack_free(obst, in);
+ exchange(irn, low_call);
+
/* Make additional projs for the caller save registers
and the Keep node which keeps them alive. */
if(pset_count(caller_save) > 0) {
ir_node **in;
if(!res_proj)
- res_proj = new_r_Proj(irg, bl, irn, mode_T, pn_Call_T_result);
+ res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
for(reg = pset_first(caller_save); reg; reg = pset_next(caller_save))
obstack_ptr_grow(obst, new_r_Proj(irg, bl, res_proj, reg->reg_class->mode, curr_res_proj++));
/* Get the result ProjT */
if(!res_proj)
- res_proj = new_r_Proj(irg, bl, irn, mode_T, pn_Call_T_result);
+ res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
/* Make a Proj for the stack pointer. */
sp_proj = new_r_Proj(irg, bl, res_proj, sp->reg_class->mode, curr_res_proj++);
pset_insert_ptr(env->stack_ops, last_inc_sp);
}
- /* at last make the backend call node and set its register requirements. */
- for(i = 0; i < n_low_args; ++i)
- obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
-
- in = obstack_finish(obst);
- low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
- obstack_free(obst, in);
-
- exchange(irn, low_call);
-
be_abi_call_free(call);
obstack_free(obst, pos);
del_pset(results);
if(store_old_fp) {
ir_node *irn;
- irn = new_r_Store(irg, bl, get_irg_initial_mem(irg), stack, frame);
- irn = new_r_Proj(irg, bl, irn, mode_M, pn_Store_M);
- stack = be_new_IncSP(sp, irg, bl, stack, irn, get_mode_size_bytes(bp->reg_class->mode), be_stack_dir_along);
+ irn = new_r_Store(irg, bl, get_irg_initial_mem(irg), stack, frame);
+ env->store_bp_mem = new_r_Proj(irg, bl, irn, mode_M, pn_Store_M);
+ stack = be_new_IncSP(sp, irg, bl, stack, env->store_bp_mem,
+ get_mode_size_bytes(bp->reg_class->mode), be_stack_dir_along);
}
frame = be_new_Copy(bp->reg_class, irg, bl, stack);
else {
stack = be_new_Copy(sp->reg_class, irg, bl, frame);
+ be_set_constr_single_reg(stack, -1, sp);
+ be_node_set_flags(stack, -1, arch_irn_flags_ignore);
if(store_old_fp) {
ir_mode *mode = sp->reg_class->mode;
ir_node *irn;
stack = be_new_IncSP(sp, irg, bl, stack, no_mem, get_mode_size_bytes(mode), be_stack_dir_against);
- irn = new_r_Load(irg, bl, no_mem, stack, mode);
+ irn = new_r_Load(irg, bl, env->store_bp_mem, stack, mode);
irn = new_r_Proj(irg, bl, irn, mode, pn_Load_res);
frame = be_new_Copy(bp->reg_class, irg, bl, irn);
}
-
- if(env->dedicated_fp) {
- be_set_constr_single_reg(frame, -1, bp);
- }
-
}
pmap_foreach(env->regs, ent) {
}
}
+static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
+{
+ int inc = env->birg->main_env->arch_env->isa->stack_dir * (env->left_to_right ? 1 : -1);
+ int n = get_method_n_params(method_type);
+ int curr = inc > 0 ? 0 : n - 1;
+ int ofs = 0;
+
+ char buf[128];
+ ir_type *res;
+ int i;
+
+ snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
+ res = new_type_class(new_id_from_str(buf));
+
+ for(i = 0; i < n; ++i, curr += inc) {
+ type *param_type = get_method_param_type(method_type, curr);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
+
+ if(!arg->in_reg) {
+ snprintf(buf, sizeof(buf), "param_%d", i);
+ arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
+ add_class_member(res, arg->stack_ent);
+ set_entity_offset_bytes(arg->stack_ent, ofs);
+ ofs += get_type_size_bytes(param_type);
+ }
+ }
+
+ set_type_size_bytes(res, ofs);
+ return res;
+}
+
+static type *get_bp_type(const arch_register_t *bp)
+{
+ static type *bp_type = NULL;
+ if(!bp_type) {
+ bp_type = new_type_primitive(new_id_from_str("bp_type"), bp->reg_class->mode);
+ set_type_size_bytes(bp_type, get_mode_size_bytes(bp->reg_class->mode));
+ }
+ return bp_type;
+}
+
/**
* Modify the irg itself and the frame type.
*/
ir_node *no_mem = get_irg_no_mem(irg);
type *method_type = get_entity_type(get_irg_entity(irg));
int n_params = get_method_n_params(method_type);
-
int max_arg = 0;
int reg_params_nr = 0;
int arg_offset = 0;
ir_node *reg_params, *reg_params_bl;
ir_node **args, **args_repl;
const ir_edge_t *edge;
+ ir_type *arg_type;
pmap_entry *ent;
- env->regs = pmap_create();
-
DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
+ /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
+ irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
+
+ env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
+ env->regs = pmap_create();
+
/* Find the maximum proj number of the argument tuple proj */
foreach_out_edge(arg_tuple, edge) {
ir_node *irn = get_edge_src_irn(edge);
/* Get the ABI constraints from the ISA */
arch_isa_get_call_abi(isa, method_type, call);
+ arg_type = compute_arg_type(env, call, method_type);
+ stack_frame_init(env->frame, arg_type, call->between_type, get_irg_frame_type(irg), isa->stack_dir);
+
/* Count the register params and add them to the number of Projs for the RegParams node */
for(i = 0; i < n_params; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
/* Insert the code to set up the stack frame */
frame_pointer = setup_frame(env);
-#if 0
- proj_sp = pmap_get(regs, (void *) sp);
- proj_bp = pmap_get(regs, (void *) bp);
- assert(proj_sp != NULL && "There must be a Proj for the stack pointer");
- assert(proj_sp != NULL && "There must be a Proj for the base pointer");
-
- /* Set the Proj for the stack pointer to ignore. */
- be_node_set_flags(reg_params, -(get_Proj_proj(proj_sp) + 1), arch_irn_flags_ignore);
-
- /*
- * If a frame pointer is needed and the frame pointer is in a dedicated register,
- * also exclude that from register allocation by setting the corresponding
- * Proj to ignore.
- */
- if(!env->omit_fp && env->dedicated_fp)
- be_node_set_flags(reg_params, -(get_Proj_proj(proj_bp) + 1), arch_irn_flags_ignore);
-
-
- if(env->omit_fp) {
- /* This is the stack pointer add/sub which allocates the frame. remind it for later fix up. */
- env->init_sp = be_new_IncSP(sp, irg, reg_params_bl, proj_sp, no_mem, 0, be_stack_dir_along);
- frame_pointer = env->init_sp;
- }
-
- else {
- env->init_sp = proj_sp;
- frame_pointer = be_new_Copy(sp->reg_class, irg, reg_params_bl, proj_sp);
- }
-
- /* Set the new frame pointer. */
- exchange(get_irg_frame(irg), frame_pointer);
- set_irg_frame(irg, frame_pointer);
-#endif
-
- /* compute the start offset for the stack parameters. */
- {
- int arg_offset = 0;
- int arg_size = 0;
- int inc_dir = isa->stack_dir * (env->left_to_right ? 1 : -1);
-
- for(i = 0; i < n_params; ++i) {
- be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
- if(!arg->in_reg)
- arg_size += get_type_size_bytes(get_method_param_type(method_type, i));
- }
-
- arg_offset = -isa->stack_dir * call->arg_gap + env->left_to_right * arg_size;
-
- /* Now, introduce stack param nodes for all parameters passed on the stack */
- for(i = 0; i < max_arg; ++i) {
- ir_node *arg_proj = args[i];
- if(arg_proj != NULL) {
- be_abi_call_arg_t *arg;
- ir_type *param_type;
- int nr = get_Proj_proj(arg_proj);
+ /* Now, introduce stack param nodes for all parameters passed on the stack */
+ for(i = 0; i < max_arg; ++i) {
+ ir_node *arg_proj = args[i];
+ if(arg_proj != NULL) {
+ be_abi_call_arg_t *arg;
+ ir_type *param_type;
+ int nr = get_Proj_proj(arg_proj);
+
+ nr = MIN(nr, n_params);
+ arg = get_call_arg(call, 0, nr);
+ param_type = get_method_param_type(method_type, nr);
+
+ if(arg->in_reg) {
+ args_repl[i] = new_r_Proj(irg, reg_params_bl, reg_params, get_irn_mode(arg_proj), reg_params_nr);
+ be_set_constr_single_reg(reg_params, -(reg_params_nr + 1), arg->reg);
+ reg_params_nr++;
+ }
- nr = MIN(nr, n_params);
- arg = get_call_arg(call, 0, nr);
- param_type = get_method_param_type(method_type, nr);
+ /* when the (stack) parameter is primitive, we insert a StackParam
+ node representing the load of that parameter */
+ else {
- if(arg->in_reg) {
- args_repl[i] = new_r_Proj(irg, reg_params_bl, reg_params, get_irn_mode(arg_proj), reg_params_nr);
- be_set_constr_single_reg(reg_params, -(reg_params_nr + 1), arg->reg);
- reg_params_nr++;
+ /* For atomic parameters which are actually used, we create a StackParam node. */
+ if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
+ ir_mode *mode = get_type_mode(param_type);
+ const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
+ args_repl[i] = be_new_StackParam(cls, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
}
- /* when the (stack) parameter is primitive, we insert a StackParam
- node representing the load of that parameter */
+ /* The stack parameter is not primitive (it is a struct or array),
+ we thus will create a node representing the parameter's address
+ on the stack. */
else {
- int size = get_type_size_bytes(param_type) * isa->stack_dir;
-
- if(inc_dir < 0)
- arg_offset -= size;
-
- /* For atomic parameters which are actually used, we create a StackParam node. */
- if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
- ir_mode *mode = get_type_mode(param_type);
- const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
- args_repl[i] = be_new_StackParam(cls, irg, reg_params_bl, mode, frame_pointer, arg_offset);
- }
-
- /* The stack parameter is not primitive (it is a struct or array),
- we thus will create a node representing the parameter's address
- on the stack. */
- else {
- assert(0 && "struct parameters are not supported");
- }
-
- if(inc_dir > 0)
- arg_offset += size;
+ args_repl[i] = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
}
}
}
env->omit_fp = (env->call->flags & BE_ABI_TRY_OMIT_FRAME_POINTER) != 0;
env->dedicated_fp = (env->call->flags & BE_ABI_FRAME_POINTER_DEDICATED) != 0;
env->left_to_right = (env->call->flags & BE_ABI_LEFT_TO_RIGHT) != 0;
+ env->save_old_fp = (env->call->flags & BE_ABI_SAVE_OLD_FRAME_POINTER) != 0;
env->birg = birg;
env->stack_ops = pset_new_ptr(32);
env->dbg = firm_dbg_register("firm.be.abi");
bias += dir * ofs;
}
- else
+ else {
arch_set_stack_bias(aenv, irn, bias);
+ }
}
return bias;
#ifndef _BEABI_H
#define _BEABI_H
+#include "type.h"
+
#include "be.h"
#include "bearch.h"
#include "beabi_t.h"
BE_ABI_LEFT_TO_RIGHT = 1, /**< Arguments are from left to right. */
BE_ABI_USE_PUSH = 2, /**< Use sequential stores for arguments. */
BE_ABI_TRY_OMIT_FRAME_POINTER = 4, /**< Try to omit the frame pointer. */
- BE_ABI_FRAME_POINTER_DEDICATED = 8 /**< If the function wants a frame pointer,
+ BE_ABI_FRAME_POINTER_DEDICATED = 8, /**< If the function wants a frame pointer,
use the one of the architecture, else
an arbitrary register is used. */
+ BE_ABI_SAVE_OLD_FRAME_POINTER = 16 /**< Always save the old frame pointer on the stack. */
} be_abi_call_flags_t;
-void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, unsigned arg_gap);
+void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, ir_type *add_frame);
void be_abi_call_param_stack(be_abi_call_t *call, int pos);
void be_abi_call_param_reg(be_abi_call_t *call, int pos, const arch_register_t *reg);
void be_abi_call_res_reg(be_abi_call_t *call, int pos, const arch_register_t *reg);
ops->impl->set_stack_bias(ops, irn, bias);
}
+entity *arch_get_frame_entity(const arch_env_t *env, ir_node *irn)
+{
+ const arch_irn_ops_t *ops = get_irn_ops(env, irn);
+ return ops->impl->get_frame_entity(ops, irn);
+}
+
int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn, int pos, bitset_t *bs)
{
*/
arch_irn_flags_t (*get_flags)(const void *self, const ir_node *irn);
+ /**
+ * Get the entity on the stack frame this node depends on.
+ * @param self The this pointer.
+ * @param irn The node in question.
+ * @return The entity on the stack frame or NULL, if the node does not has a stack frame entity.
+ */
+ entity *(*get_frame_entity)(const void *self, const ir_node *irn);
+
/**
* Set a bias for the stack pointer.
* If the node in question uses the stack pointer for indexing, it must
ra->allocate(&birg);
dump(DUMP_RA, irg, "-ra", dump_ir_block_graph_sched);
- be_abi_fix_stack_bias(birg.abi);
+ /* This is not ready yet: */
+ /* be_abi_fix_stack_bias(birg.abi); */
arch_code_generator_done(birg.cg);
dump(DUMP_FINAL, irg, "-end", dump_ir_block_graph_sched);
be_reg_data_t *reg_data;
} be_node_attr_t;
-typedef struct {
- be_node_attr_t node_attr;
- ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
- entity *ent; /**< The entity in the stack frame the spill writes to. */
-} be_spill_attr_t;
-
typedef struct {
be_node_attr_t node_attr;
int offset; /**< The offset by which the stack shall be increased/decreased. */
be_stack_dir_t dir; /**< The direction in which the stack shall be modified (along or in the other direction). */
} be_stack_attr_t;
+typedef struct {
+ be_node_attr_t node_attr;
+ entity *ent;
+ int offset;
+} be_frame_attr_t;
+
+typedef struct {
+ be_frame_attr_t frame_attr;
+ ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
+} be_spill_attr_t;
+
static ir_op *op_Spill;
static ir_op *op_Reload;
static ir_op *op_Perm;
static ir_op *op_AddSP;
static ir_op *op_RegParams;
static ir_op *op_StackParam;
-static ir_op *op_NoReg;
+static ir_op *op_FrameAddr;
+static ir_op *op_FrameLoad;
+static ir_op *op_FrameStore;
static int beo_base = -1;
op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_NoReg = new_ir_op(beo_base + beo_NoReg, "NoReg", op_pin_state_floats, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_Call = new_ir_op(beo_base + beo_Call, "Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_Return = new_ir_op(beo_base + beo_Return, "Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_AddSP = new_ir_op(beo_base + beo_AddSP, "AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
op_IncSP = new_ir_op(beo_base + beo_IncSP, "IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
op_RegParams = new_ir_op(beo_base + beo_RegParams, "RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_StackParam = new_ir_op(beo_base + beo_StackParam, "StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
+ op_StackParam = new_ir_op(beo_base + beo_StackParam, "StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "FrameAddr", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_FrameStore = new_ir_op(beo_base + beo_FrameStore, "FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
set_op_tag(op_Spill, &be_node_tag);
set_op_tag(op_Reload, &be_node_tag);
set_op_tag(op_Perm, &be_node_tag);
set_op_tag(op_Copy, &be_node_tag);
set_op_tag(op_Keep, &be_node_tag);
- set_op_tag(op_NoReg, &be_node_tag);
set_op_tag(op_Call, &be_node_tag);
set_op_tag(op_Return, &be_node_tag);
set_op_tag(op_AddSP, &be_node_tag);
set_op_tag(op_IncSP, &be_node_tag);
set_op_tag(op_RegParams, &be_node_tag);
set_op_tag(op_StackParam, &be_node_tag);
+ set_op_tag(op_FrameLoad, &be_node_tag);
+ set_op_tag(op_FrameStore, &be_node_tag);
+ set_op_tag(op_FrameAddr, &be_node_tag);
}
static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int max_reg_data)
in[0] = to_spill;
res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
a = init_node_attr(res, cls, irg, 0);
- a->ent = NULL;
+ a->frame_attr.ent = NULL;
+ a->frame_attr.offset = 0;
a->spill_ctx = ctx;
return res;
}
return irn;
}
-ir_node *be_new_NoReg(const arch_register_t *reg, ir_graph *irg, ir_node *bl)
+ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, entity *ent)
{
- be_node_attr_t *a;
+ be_frame_attr_t *a;
ir_node *irn;
ir_node *in[1];
- irn = new_ir_node(NULL, irg, bl, op_NoReg, reg->reg_class->mode, 0, in);
- a = init_node_attr(irn, reg->reg_class, irg, 1);
- be_node_set_flags(irn, -1, arch_irn_flags_ignore);
- be_set_constr_single_reg(irn, -1, reg);
- be_node_set_irn_reg(NULL, irn, reg);
+ in[0] = frame_pointer;
+ irn = new_ir_node(NULL, irg, bl, op_StackParam, mode, 1, in);
+ a = init_node_attr(irn, cls, irg, 1);
+ a->ent = ent;
return irn;
}
-ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, unsigned offset)
+ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
{
- be_stack_attr_t *a;
ir_node *irn;
ir_node *in[1];
- in[0] = frame_pointer;
- irn = new_ir_node(NULL, irg, bl, op_StackParam, mode, 1, in);
- a = init_node_attr(irn, cls, irg, 1);
- a->offset = offset;
+ irn = new_ir_node(NULL, irg, bl, op_RegParams, mode_T, 0, in);
+ init_node_attr(irn, NULL, irg, n_outs);
return irn;
}
-ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
+ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
+ ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, entity *ent)
+{
+ be_frame_attr_t *a;
+ ir_node *irn;
+ ir_node *in[2];
+
+ in[0] = mem;
+ in[1] = frame;
+ irn = new_ir_node(NULL, irg, bl, op_FrameLoad, mode_T, 2, in);
+ a = init_node_attr(irn, NULL, irg, 3);
+ a->ent = ent;
+ a->offset = 0;
+ be_node_set_reg_class(irn, 1, cls_frame);
+ be_node_set_reg_class(irn, -(pn_Load_res + 1), cls_data);
+ return irn;
+}
+
+ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
+ ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, entity *ent)
+{
+ be_frame_attr_t *a;
+ ir_node *irn;
+ ir_node *in[3];
+
+ in[0] = mem;
+ in[1] = frame;
+ in[2] = data;
+ irn = new_ir_node(NULL, irg, bl, op_FrameStore, mode_T, 3, in);
+ a = init_node_attr(irn, NULL, irg, 3);
+ a->ent = ent;
+ a->offset = 0;
+ be_node_set_reg_class(irn, 1, cls_frame);
+ be_node_set_reg_class(irn, 2, cls_data);
+ return irn;
+}
+
+ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, entity *ent)
{
+ be_frame_attr_t *a;
ir_node *irn;
ir_node *in[1];
- irn = new_ir_node(NULL, irg, bl, op_RegParams, mode_T, 0, in);
- init_node_attr(irn, NULL, irg, n_outs);
+ in[0] = frame;
+ irn = new_ir_node(NULL, irg, bl, op_FrameAddr, get_irn_mode(frame), 1, in);
+ a = init_node_attr(irn, NULL, irg, 1);
+ a->ent = ent;
+ a->offset = 0;
+ be_node_set_reg_class(irn, 0, cls_frame);
+ be_node_set_reg_class(irn, -1, cls_frame);
return irn;
}
int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
-int be_is_NoReg (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_NoReg ; }
+int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
+int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
+int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
+
+int be_has_frame_entity(const ir_node *irn)
+{
+ switch(be_get_irn_opcode(irn)) {
+ case beo_StackParam:
+ case beo_Spill:
+ case beo_FrameStore:
+ case beo_FrameLoad:
+ case beo_FrameAddr:
+ return 1;
+ }
+
+ return 0;
+}
+
+entity *be_get_frame_entity(ir_node *irn)
+{
+ if(be_has_frame_entity(irn)) {
+ be_frame_attr_t *a = get_irn_attr(irn);
+ return a->ent;
+ }
+
+ else if(be_get_irn_opcode(irn) == beo_Reload)
+ return be_get_spill_entity(irn);
+
+ return NULL;
+}
static void be_limited(void *data, bitset_t *bs)
{
{
be_spill_attr_t *a = get_irn_attr(irn);
assert(be_is_Spill(irn));
- a->ent = ent;
+ a->frame_attr.ent = ent;
}
static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
case beo_Spill:
{
be_spill_attr_t *a = get_irn_attr(irn);
- return a->ent;
+ return a->frame_attr.ent;
}
default:
assert(0 && "Must give spill/reload node");
return a->reg_data[out_pos].req.flags;
}
+static entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
+{
+ return be_get_frame_entity(irn);
+}
+
+static void *be_node_set_stack_bias(const void *self, ir_node *irn, int bias)
+{
+}
+
+
static const arch_irn_ops_if_t be_node_irn_ops_if = {
be_node_get_irn_reg_req,
be_node_set_irn_reg,
be_node_get_irn_reg,
be_node_classify,
be_node_get_flags,
+ be_node_get_frame_entity,
+ be_node_set_stack_bias
};
static const arch_irn_ops_t be_node_irn_ops = {
case dump_node_info_txt:
dump_node_reqs(f, irn);
+ if(be_has_frame_entity(irn)) {
+ be_frame_attr_t *a = (be_frame_attr_t *) at;
+ if (a->ent) {
+ unsigned ofs = get_entity_offset_bytes(a->ent);
+ ir_fprintf(f, "frame entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
+ }
+ }
+
switch(be_get_irn_opcode(irn)) {
case beo_Spill:
{
be_spill_attr_t *a = (be_spill_attr_t *) at;
-
ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
- if (a->ent) {
- unsigned ofs = get_entity_offset_bytes(a->ent);
- ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
- }
- else {
- ir_fprintf(f, "spill entity: n/a\n");
- }
}
break;
beo_IncSP,
beo_RegParams,
beo_StackParam,
+ beo_FrameLoad,
+ beo_FrameStore,
+ beo_FrameAddr,
beo_Last
} be_opcode_t;
be_stack_dir_against = 1
} be_stack_dir_t;
+typedef enum {
+ be_frame_flag_spill = 1,
+ be_frame_flag_local = 2,
+ be_frame_flag_arg = 4
+} be_frame_flag_t;
+
#define BE_STACK_FRAME_SIZE ((unsigned) -1)
void be_node_init(void);
ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
+ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
+ ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, entity *ent);
+ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
+ ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, entity *ent);
+ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, entity *ent);
+
ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *operand);
/**
ir_node *be_new_Call(ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[]);
ir_node *be_new_Return(ir_graph *irg, ir_node *bl, int n, ir_node *in[]);
-ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, unsigned offset);
+ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, entity *ent);
ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_out);
ir_node *be_new_NoReg(const arch_register_t *reg, ir_graph *irg, ir_node *bl);
int be_is_NoReg(const ir_node *irn);
+/**
+ * Get the entity on the stack frame the given node uses.
+ * @param irn The node.
+ * @return The entity on the stack frame used by the node or NULL,
+ * if the node does not access the stack frame or is no back-end node.
+ *
+ */
+entity *be_get_frame_entity(ir_node *irn);
+
void be_set_Spill_entity(ir_node *irn, entity *ent);
entity *be_get_spill_entity(ir_node *irn);
static void firm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
{
+ static ir_type *between_type = NULL;
const arch_register_class_t *cls = ®_classes[CLS_DATAB];
int i, n;
+ if(!between_type) {
+ between_type = new_type_class(new_id_from_str("firm_be_between"));
+ set_type_size_bytes(between_type, 0);
+ }
+
+
for(i = 0, n = get_method_n_params(method_type); i < n; ++i) {
ir_type *t = get_method_param_type(method_type, i);
if(is_Primitive_type(t))
be_abi_call_res_reg(abi, i, &cls->regs[i]);
}
- be_abi_call_set_flags(abi, BE_ABI_NONE, 0);
+ be_abi_call_set_flags(abi, BE_ABI_NONE, between_type);
}
return res;
}
-static void firm_set_stack_bias(const void *self, ir_node *irn, int bias) {
+static void firm_set_stack_bias(const void *self, ir_node *irn, int bias)
+{
+}
+
+static entity *firm_get_frame_entity(const void *self, const ir_node *irn)
+{
+ return NULL;
}
static const arch_irn_ops_if_t firm_irn_ops_if = {
firm_get_irn_reg,
firm_classify,
firm_get_flags,
+ firm_get_frame_entity,
firm_set_stack_bias
};
}
}
+static entity *ia32_get_frame_entity(const void *self, const ir_node *irn)
+{
+ /* TODO: Implement */
+ return NULL;
+}
+
static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
if (get_ia32_use_frame(irn)) {
/* TODO: correct offset */
ia32_get_irn_reg,
ia32_classify,
ia32_get_flags,
+ ia32_get_frame_entity,
ia32_set_stack_bias
};
return &ia32_reg_classes[CLASS_ia32_gp];
}
+/**
+ * Produces the type which sits between the stack args and the locals on the stack.
+ * it will contain the return address and space to store the old base pointer.
+ * @return The Firm type modelling the ABI between type.
+ */
+static ir_type *get_between_type(void)
+{
+ static ir_type *between_type = NULL;
+ static entity *old_bp_ent = NULL;
+
+ if(!between_type) {
+ entity *ret_addr_ent;
+ ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P);
+ ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P);
+
+ between_type = new_type_class(new_id_from_str("ia32_between_type"));
+ old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
+ ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
+
+ set_entity_offset_bytes(old_bp_ent, 0);
+ set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
+ set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
+ }
+
+ return between_type;
+}
+
/**
* Get the ABI restrictions for procedure calls.
* @param self The this pointer.
* @param abi The abi object to be modified
*/
void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
+ ir_type *between_type;
ir_type *tp;
ir_mode *mode;
unsigned cc = get_method_calling_convention(method_type);
ir_mode **modes;
const arch_register_t *reg;
+ /* get the between type and the frame pointer save entity */
+ between_type = get_between_type();
+
/* set stack parameter passing style */
- be_abi_call_set_flags(abi, BE_ABI_FRAME_POINTER_DEDICATED, 4);
+ be_abi_call_set_flags(abi, BE_ABI_NONE, between_type);
/* collect the mode for each type */
modes = alloca(n * sizeof(modes[0]));
tp = get_method_res_type(method_type, 0);
mode = get_type_mode(tp);
- if (mode_is_float(mode)) {
- be_abi_call_res_reg(abi, 1, &ia32_fp_regs[REG_XMM0]);
- }
- else {
- be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EAX]);
- }
+ be_abi_call_res_reg(abi, 0, &ia32_fp_regs[mode_is_float(mode) ? REG_XMM0 : REG_EAX]);
}
}
return 0;
}
else if (is_Proj(pred)) {
- first = get_Proj_pred(pred);
-
- if (is_ia32_Call(first))
- return 0;
-
- assert(0 && "unsupported proj-pos translation Proj(Proj)");
- return -1;
+ return nr;
}
else if (get_irn_opcode(pred) == iro_Start) {
return nr;
+#include <stdio.h>
+
void dequant_h263_inter_c( short * data,
const short * coeff,
const unsigned int quant)