*/
#include "config.h"
-#include "pseudo_irg.h"
#include "irgwalk.h"
#include "irprog.h"
#include "irprintf.h"
#include "../begnuas.h"
#include "../belistsched.h"
#include "../beflags.h"
+#include "../bespillslots.h"
#include "bearch_amd64_t.h"
static arch_irn_class_t amd64_classify(const ir_node *irn)
{
(void) irn;
- return 0;
+ return arch_irn_class_none;
}
static ir_entity *amd64_get_frame_entity(const ir_node *node)
{
if (is_amd64_FrameAddr(node)) {
- const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
+ return attr->entity;
+
+ } else if (is_amd64_Store(node)) {
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
+ return attr->entity;
+
+ } else if (is_amd64_Load(node)) {
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
}
return NULL;
}
-static void amd64_set_frame_entity(ir_node *node, ir_entity *ent)
-{
- (void) node;
- (void) ent;
- /* TODO: set the ir_entity assigned to the frame */
-}
-
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
static void amd64_set_frame_offset(ir_node *irn, int offset)
{
if (is_amd64_FrameAddr(irn)) {
- amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
+ attr->fp_offset += offset;
+
+ } else if (is_amd64_Store(irn)) {
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
+ attr->fp_offset += offset;
+
+ } else if (is_amd64_Load(irn)) {
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
attr->fp_offset += offset;
+
}
}
/* fill register allocator interface */
static const arch_irn_ops_t amd64_irn_ops = {
- get_amd64_in_req,
amd64_classify,
amd64_get_frame_entity,
- amd64_set_frame_entity,
amd64_set_frame_offset,
amd64_get_sp_bias,
NULL, /* get_inverse */
* Transforms the standard firm graph into
* a amd64 firm graph
*/
-static void amd64_prepare_graph(void *self)
+static void amd64_prepare_graph(ir_graph *irg)
{
- amd64_code_gen_t *cg = self;
-
- amd64_transform_graph (cg);
+ amd64_irg_data_t *irg_data = amd64_get_irg_data(irg);
+ amd64_transform_graph(irg);
- if (cg->dump)
- dump_ir_graph(cg->irg, "transformed");
+ if (irg_data->dump)
+ dump_ir_graph(irg, "transformed");
}
-
/**
* Called immediatly before emit phase.
*/
-static void amd64_finish_irg(void *self)
+static void amd64_finish_irg(ir_graph *irg)
{
- amd64_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
+ (void) irg;
+}
- dump_ir_graph(irg, "amd64-finished");
+static void amd64_before_ra(ir_graph *irg)
+{
+ be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
}
-static void amd64_before_ra(void *self)
+
+static void transform_Reload(ir_node *node)
{
- amd64_code_gen_t *cg = self;
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *ptr = get_irg_frame(irg);
+ ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
+ ir_mode *mode = get_irn_mode(node);
+ ir_entity *entity = be_get_frame_entity(node);
+ const arch_register_t *reg;
+ ir_node *proj;
+ ir_node *load;
- be_sched_fix_flags(cg->birg, &amd64_reg_classes[CLASS_amd64_flags], 0);
+ ir_node *sched_point = sched_prev(node);
+
+ load = new_bd_amd64_Load(dbgi, block, ptr, mem, entity);
+ sched_add_after(sched_point, load);
+ sched_remove(node);
+
+ proj = new_rd_Proj(dbgi, load, mode, pn_amd64_Load_res);
+
+ reg = arch_get_irn_register(node);
+ arch_set_irn_register(proj, reg);
+
+ exchange(node, proj);
}
-static void amd64_after_ra(void *self)
+static void transform_Spill(ir_node *node)
{
- (void) self;
- /* Some stuff you need to do immediatly after register allocation */
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *ptr = get_irg_frame(irg);
+ ir_node *mem = new_r_NoMem(irg);
+ ir_node *val = get_irn_n(node, be_pos_Spill_val);
+ //ir_mode *mode = get_irn_mode(val);
+ ir_entity *entity = be_get_frame_entity(node);
+ ir_node *sched_point;
+ ir_node *store;
+
+ sched_point = sched_prev(node);
+ store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, entity);
+
+ sched_remove(node);
+ sched_add_after(sched_point, store);
+
+ exchange(node, store);
}
+static void amd64_after_ra_walker(ir_node *block, void *data)
+{
+ ir_node *node, *prev;
+ (void) data;
+
+ for (node = sched_last(block); !sched_is_begin(node); node = prev) {
+ prev = sched_prev(node);
+ if (be_is_Reload(node)) {
+ transform_Reload(node);
+ } else if (be_is_Spill(node)) {
+ transform_Spill(node);
+ }
+ }
+}
+
+static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
+{
+ assert(be_is_Reload(node));
+ be_node_set_frame_entity(node, entity);
+}
/**
- * Emits the code, closes the output file and frees
- * the code generator interface.
+ * Collects nodes that need frame entities assigned.
*/
-static void amd64_emit_and_done(void *self)
+static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
{
- amd64_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
- amd64_gen_routine(cg, irg);
-
- /* de-allocate code generator */
- free(cg);
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ be_fec_env_t *env = (be_fec_env_t*)data;
+ const ir_mode *mode = get_irn_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ }
}
-static void *amd64_cg_init(be_irg_t *birg);
+static void amd64_after_ra(ir_graph *irg)
+{
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
-static const arch_code_generator_if_t amd64_code_gen_if = {
- amd64_cg_init,
- NULL, /* get_pic_base hook */
- NULL, /* before abi introduce hook */
- amd64_prepare_graph,
- NULL, /* spill hook */
- amd64_before_ra, /* before register allocation hook */
- amd64_after_ra, /* after register allocation hook */
- amd64_finish_irg,
- amd64_emit_and_done
-};
+ /* create and coalesce frame entities */
+ irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env, amd64_set_frame_entity);
+ be_free_frame_entity_coalescer(fec_env);
+
+ irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
+}
/**
* Initializes the code generator.
*/
-static void *amd64_cg_init(be_irg_t *birg)
+static void amd64_init_graph(ir_graph *irg)
{
- const arch_env_t *arch_env = be_get_birg_arch_env(birg);
- amd64_isa_t *isa = (amd64_isa_t *) arch_env;
- amd64_code_gen_t *cg = XMALLOC(amd64_code_gen_t);
+ struct obstack *obst = be_get_be_obst(irg);
+ amd64_irg_data_t *irg_data = OALLOCZ(obst, amd64_irg_data_t);
+ irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
- cg->impl = &amd64_code_gen_if;
- cg->irg = be_get_birg_irg(birg);
- cg->isa = isa;
- cg->birg = birg;
- cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
-
- return (arch_code_generator_t *)cg;
+ be_birg_from_irg(irg)->isa_link = irg_data;
}
/**
* Used to create per-graph unique pseudo nodes.
*/
-static inline ir_node *create_const(amd64_code_gen_t *cg, ir_node **place,
+static inline ir_node *create_const(ir_graph *irg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
if (*place != NULL)
return *place;
- block = get_irg_start_block(cg->irg);
+ block = get_irg_start_block(irg);
res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
return res;
}
-const arch_isa_if_t amd64_isa_if;
+extern const arch_isa_if_t amd64_isa_if;
static amd64_isa_t amd64_isa_template = {
{
&amd64_isa_if, /* isa interface implementation */
- &amd64_gp_regs[REG_RSP], /* stack pointer register */
- &amd64_gp_regs[REG_RBP], /* base pointer register */
+ N_AMD64_REGISTERS,
+ amd64_registers,
+ N_AMD64_CLASSES,
+ amd64_reg_classes,
+ &amd64_registers[REG_RSP], /* stack pointer register */
+ &amd64_registers[REG_RBP], /* base pointer register */
&amd64_reg_classes[CLASS_amd64_gp], /* link pointer register class */
-1, /* stack direction */
- 2, /* power of two stack alignment for calls, 2^2 == 4 */
+ 3, /* power of two stack alignment for calls, 2^2 == 4 */
NULL, /* main environment */
7, /* costs for a spill instruction */
5, /* costs for a reload instruction */
+ false, /* no custom abi handling */
},
};
*/
static arch_env_t *amd64_init(FILE *outfile)
{
- static int run_once = 0;
- amd64_isa_t *isa;
-
- if(run_once)
- return NULL;
- run_once = 1;
-
- isa = XMALLOC(amd64_isa_t);
+ amd64_isa_t *isa = XMALLOC(amd64_isa_t);
memcpy(isa, &amd64_isa_template, sizeof(*isa));
be_emit_init(outfile);
amd64_register_init();
amd64_create_opcodes(&amd64_irn_ops);
- return &isa->arch_env;
+ return &isa->base;
}
*/
static void amd64_done(void *self)
{
- amd64_isa_t *isa = self;
+ amd64_isa_t *isa = (amd64_isa_t*)self;
/* emit now all global declarations */
- be_gas_emit_decls(isa->arch_env.main_env);
+ be_gas_emit_decls(isa->base.main_env);
be_emit_exit();
free(self);
}
-static unsigned amd64_get_n_reg_class(void)
-{
- return N_CLASSES;
-}
-
-static const arch_register_class_t *amd64_get_reg_class(unsigned i)
-{
- assert(i < N_CLASSES);
- return &amd64_reg_classes[i];
-}
-
-
-
/**
* Get the register class which shall be used to store a value of a given mode.
* @param self The this pointer.
typedef struct {
be_abi_call_flags_bits_t flags;
- const arch_env_t *arch_env;
ir_graph *irg;
} amd64_abi_env_t;
-static void *amd64_abi_init(const be_abi_call_t *call, const arch_env_t *arch_env, ir_graph *irg)
+static void *amd64_abi_init(const be_abi_call_t *call, ir_graph *irg)
{
amd64_abi_env_t *env = XMALLOC(amd64_abi_env_t);
be_abi_call_flags_t fl = be_abi_call_get_flags(call);
env->flags = fl.bits;
env->irg = irg;
- env->arch_env = arch_env;
return env;
}
static const arch_register_t *amd64_abi_prologue(void *self, ir_node **mem,
pmap *reg_map, int *stack_bias)
{
- amd64_abi_env_t *env = self;
- const arch_env_t *aenv = env->arch_env;
+ amd64_abi_env_t *env = (amd64_abi_env_t*)self;
+ const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
(void) mem;
(void) stack_bias;
(void) aenv;
if (!env->flags.try_omit_fp) {
/* FIXME: maybe later here should be some code to generate
* the usual abi prologue */
- return env->arch_env->bp;
+ return aenv->bp;
}
- return env->arch_env->sp;
+ return aenv->sp;
}
/* Build the epilog */
static void amd64_abi_epilogue(void *self, ir_node *bl, ir_node **mem,
pmap *reg_map)
{
- amd64_abi_env_t *env = self;
- const arch_env_t *aenv = env->arch_env;
+ amd64_abi_env_t *env = (amd64_abi_env_t*)self;
+ const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
ir_node *curr_sp = be_abi_reg_map_get(reg_map, aenv->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, aenv->bp);
(void) bl;
amd64_abi_epilogue,
};
+static const arch_register_t *gpreg_param_reg_std[] = {
+ &amd64_registers[REG_RDI],
+ &amd64_registers[REG_RSI],
+ &amd64_registers[REG_RDX],
+ &amd64_registers[REG_RCX],
+ &amd64_registers[REG_R8],
+ &amd64_registers[REG_R9],
+};
+
+static const arch_register_t *amd64_get_RegParam_reg(int n)
+{
+ assert(n < 6 && n >=0 && "register param > 6 requested");
+ return gpreg_param_reg_std[n];
+}
+
/**
* Get the ABI restrictions for procedure calls.
* @param self The this pointer.
/* set abi flags for calls */
call_flags.bits.left_to_right = 0;
- call_flags.bits.store_args_sequential = 1;
+ call_flags.bits.store_args_sequential = 0;
call_flags.bits.try_omit_fp = 1;
call_flags.bits.fp_free = 0;
call_flags.bits.call_has_imm = 1;
mode = get_type_mode(tp);
//d// printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
- if (!no_reg && (i == 0 || i == 1) && mode == mode_Iu) {
+ if (!no_reg && i < 6 && mode_is_data (mode)) {
//d// printf("TEST%d\n", i);
- be_abi_call_param_reg(abi, i,
- i == 0 ? &amd64_gp_regs[REG_RDI]
- : &amd64_gp_regs[REG_RSI],
+ be_abi_call_param_reg(abi, i, amd64_get_RegParam_reg (i),
ABI_CONTEXT_BOTH);
/* default: all parameters on stack */
} else {
no_reg = 1;
- be_abi_call_param_stack(abi, i, mode, 4, 0, 0, ABI_CONTEXT_BOTH);
+ be_abi_call_param_stack(abi, i, mode, 8, 0, 0, ABI_CONTEXT_BOTH);
}
}
/* FIXME: No floating point yet */
/* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_gp_regs[REG_R0], ABI_CONTEXT_BOTH) */;
+ mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH) */;
be_abi_call_res_reg(abi, 0,
- &amd64_gp_regs[REG_RAX], ABI_CONTEXT_BOTH);
+ &amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
}
}
-static int amd64_to_appear_in_schedule(void *block_env, const ir_node *irn)
-{
- (void) block_env;
-
- if(!is_amd64_irn(irn))
- return -1;
-
- return 1;
-}
-
/**
- * Initializes the code generator interface.
+ * Returns the necessary byte alignment for storing a register of given class.
*/
-static const arch_code_generator_if_t *amd64_get_code_generator_if(
- void *self)
+static int amd64_get_reg_class_alignment(const arch_register_class_t *cls)
{
- (void) self;
- return &amd64_code_gen_if;
+ ir_mode *mode = arch_register_class_mode(cls);
+ return get_mode_size_bytes(mode);
}
-list_sched_selector_t amd64_sched_selector;
-
-/**
- * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
- */
-static const list_sched_selector_t *amd64_get_list_sched_selector(
- const void *self, list_sched_selector_t *selector)
+static void amd64_lower_for_target(void)
{
- (void) self;
- (void) selector;
-
- amd64_sched_selector = trivial_selector;
- amd64_sched_selector.to_appear_in_schedule = amd64_to_appear_in_schedule;
- return &amd64_sched_selector;
-}
+ lower_params_t params = {
+ 4, /* def_ptr_alignment */
+ LF_COMPOUND_RETURN | LF_RETURN_HIDDEN, /* flags */
+ ADD_HIDDEN_ALWAYS_IN_FRONT, /* hidden_params */
+ NULL, /* find pointer type */
+ NULL, /* ret_compound_in_regs */
+ };
-static const ilp_sched_selector_t *amd64_get_ilp_sched_selector(
- const void *self)
-{
- (void) self;
- return NULL;
+ /* lower compound param handling */
+ lower_calls_with_compounds(¶ms);
}
-/**
- * Returns the necessary byte alignment for storing a register of given class.
- */
-static int amd64_get_reg_class_alignment(const arch_register_class_t *cls)
+static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
+ ir_node *mux_true)
{
- ir_mode *mode = arch_register_class_mode(cls);
- return get_mode_size_bytes(mode);
+ (void) sel;
+ (void) mux_false;
+ (void) mux_true;
+ return false;
}
/**
*/
static const backend_params *amd64_get_backend_params(void) {
static backend_params p = {
- 0, /* no dword lowering */
0, /* no inline assembly */
+ 1, /* support Rotl nodes */
+ 0, /* little endian */
NULL, /* will be set later */
- NULL, /* no creator function */
- NULL, /* context for create_intrinsic_fkt */
- NULL, /* parameter for if conversion */
+ amd64_is_mux_allowed, /* parameter for if conversion */
NULL, /* float arithmetic mode */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
- 4 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
+ 8 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
};
return &p;
}
-static const be_execution_unit_t ***amd64_get_allowed_execution_units(
- const ir_node *irn)
-{
- (void) irn;
- /* TODO */
- assert(0);
- return NULL;
-}
-
-static const be_machine_t *amd64_get_machine(const void *self)
-{
- (void) self;
- /* TODO */
- assert(0);
- return NULL;
-}
-
static ir_graph **amd64_get_backend_irg_list(const void *self,
ir_graph ***irgs)
{
const arch_isa_if_t amd64_isa_if = {
amd64_init,
+ amd64_lower_for_target,
amd64_done,
NULL, /* handle intrinsics */
- amd64_get_n_reg_class,
- amd64_get_reg_class,
amd64_get_reg_class_for_mode,
amd64_get_call_abi,
- amd64_get_code_generator_if,
- amd64_get_list_sched_selector,
- amd64_get_ilp_sched_selector,
amd64_get_reg_class_alignment,
amd64_get_backend_params,
- amd64_get_allowed_execution_units,
- amd64_get_machine,
amd64_get_backend_irg_list,
NULL, /* mark remat */
amd64_parse_asm_constraint,
- amd64_is_valid_clobber
+ amd64_is_valid_clobber,
+
+ amd64_init_graph,
+ NULL, /* get_pic_base */
+ NULL, /* before_abi */
+ amd64_prepare_graph,
+ amd64_before_ra,
+ amd64_after_ra,
+ amd64_finish_irg,
+ amd64_gen_routine,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64);