static arch_irn_class_t amd64_classify(const ir_node *irn)
{
(void) irn;
- return 0;
+ return arch_irn_class_none;
}
static ir_entity *amd64_get_frame_entity(const ir_node *node)
{
if (is_amd64_FrameAddr(node)) {
- const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
} else if (is_amd64_Store(node)) {
- const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
} else if (is_amd64_Load(node)) {
- const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
}
static void amd64_set_frame_offset(ir_node *irn, int offset)
{
if (is_amd64_FrameAddr(irn)) {
- amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
attr->fp_offset += offset;
} else if (is_amd64_Store(irn)) {
- amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
attr->fp_offset += offset;
} else if (is_amd64_Load(irn)) {
- amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
attr->fp_offset += offset;
}
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irg_frame(irg);
- ir_node *mem = new_NoMem();
+ ir_node *mem = new_r_NoMem(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
//ir_mode *mode = get_irn_mode(val);
ir_entity *entity = be_get_frame_entity(node);
}
}
+static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
+{
+ assert(be_is_Reload(node));
+ be_node_set_frame_entity(node, entity);
+}
+
+/**
+ * Collects nodes that need frame entities assigned.
+ */
+static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
+{
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ be_fec_env_t *env = (be_fec_env_t*)data;
+ const ir_mode *mode = get_irn_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ }
+}
+
static void amd64_after_ra(ir_graph *irg)
{
- be_coalesce_spillslots(irg);
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
+
+ /* create and coalesce frame entities */
+ irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env, amd64_set_frame_entity);
+ be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
}
return res;
}
-const arch_isa_if_t amd64_isa_if;
+extern const arch_isa_if_t amd64_isa_if;
static amd64_isa_t amd64_isa_template = {
{
&amd64_isa_if, /* isa interface implementation */
- &amd64_gp_regs[REG_RSP], /* stack pointer register */
- &amd64_gp_regs[REG_RBP], /* base pointer register */
+ N_AMD64_REGISTERS,
+ amd64_registers,
+ N_AMD64_CLASSES,
+ amd64_reg_classes,
+ &amd64_registers[REG_RSP], /* stack pointer register */
+ &amd64_registers[REG_RBP], /* base pointer register */
&amd64_reg_classes[CLASS_amd64_gp], /* link pointer register class */
-1, /* stack direction */
3, /* power of two stack alignment for calls, 2^2 == 4 */
*/
static arch_env_t *amd64_init(FILE *outfile)
{
- static int run_once = 0;
- amd64_isa_t *isa;
-
- if(run_once)
- return NULL;
- run_once = 1;
-
- isa = XMALLOC(amd64_isa_t);
+ amd64_isa_t *isa = XMALLOC(amd64_isa_t);
memcpy(isa, &amd64_isa_template, sizeof(*isa));
be_emit_init(outfile);
*/
static void amd64_done(void *self)
{
- amd64_isa_t *isa = self;
+ amd64_isa_t *isa = (amd64_isa_t*)self;
/* emit now all global declarations */
be_gas_emit_decls(isa->base.main_env);
}
-static unsigned amd64_get_n_reg_class(void)
-{
- return N_CLASSES;
-}
-
-static const arch_register_class_t *amd64_get_reg_class(unsigned i)
-{
- assert(i < N_CLASSES);
- return &amd64_reg_classes[i];
-}
-
-
-
/**
* Get the register class which shall be used to store a value of a given mode.
* @param self The this pointer.
static const arch_register_t *amd64_abi_prologue(void *self, ir_node **mem,
pmap *reg_map, int *stack_bias)
{
- amd64_abi_env_t *env = self;
+ amd64_abi_env_t *env = (amd64_abi_env_t*)self;
const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
(void) mem;
(void) stack_bias;
static void amd64_abi_epilogue(void *self, ir_node *bl, ir_node **mem,
pmap *reg_map)
{
- amd64_abi_env_t *env = self;
+ amd64_abi_env_t *env = (amd64_abi_env_t*)self;
const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
ir_node *curr_sp = be_abi_reg_map_get(reg_map, aenv->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, aenv->bp);
};
static const arch_register_t *gpreg_param_reg_std[] = {
- &amd64_gp_regs[REG_RDI],
- &amd64_gp_regs[REG_RSI],
- &amd64_gp_regs[REG_RDX],
- &amd64_gp_regs[REG_RCX],
- &amd64_gp_regs[REG_R8],
- &amd64_gp_regs[REG_R9],
+ &amd64_registers[REG_RDI],
+ &amd64_registers[REG_RSI],
+ &amd64_registers[REG_RDX],
+ &amd64_registers[REG_RCX],
+ &amd64_registers[REG_R8],
+ &amd64_registers[REG_R9],
};
static const arch_register_t *amd64_get_RegParam_reg(int n)
/* FIXME: No floating point yet */
/* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_gp_regs[REG_R0], ABI_CONTEXT_BOTH) */;
+ mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH) */;
be_abi_call_res_reg(abi, 0,
- &amd64_gp_regs[REG_RAX], ABI_CONTEXT_BOTH);
+ &amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
}
}
static void amd64_lower_for_target(void)
{
+ lower_params_t params = {
+ 4, /* def_ptr_alignment */
+ LF_COMPOUND_RETURN | LF_RETURN_HIDDEN, /* flags */
+ ADD_HIDDEN_ALWAYS_IN_FRONT, /* hidden_params */
+ NULL, /* find pointer type */
+ NULL, /* ret_compound_in_regs */
+ };
+
+ /* lower compound param handling */
+ lower_calls_with_compounds(¶ms);
+}
+
+static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
+ ir_node *mux_true)
+{
+ (void) sel;
+ (void) mux_false;
+ (void) mux_true;
+ return false;
}
/**
0, /* no inline assembly */
1, /* support Rotl nodes */
0, /* little endian */
- amd64_lower_for_target, /* lowering callback */
NULL, /* will be set later */
- NULL, /* parameter for if conversion */
+ amd64_is_mux_allowed, /* parameter for if conversion */
NULL, /* float arithmetic mode */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
const arch_isa_if_t amd64_isa_if = {
amd64_init,
+ amd64_lower_for_target,
amd64_done,
NULL, /* handle intrinsics */
- amd64_get_n_reg_class,
- amd64_get_reg_class,
amd64_get_reg_class_for_mode,
amd64_get_call_abi,
amd64_get_reg_class_alignment,