/**
* @file
* @brief The main amd64 backend driver file.
- * @version $Id: bearch_amd64.c 26909 2010-01-05 15:56:54Z matze $
*/
#include "config.h"
-#include "pseudo_irg.h"
#include "irgwalk.h"
#include "irprog.h"
#include "irprintf.h"
#include "ircons.h"
#include "irgmod.h"
+#include "irdump.h"
+#include "lower_calls.h"
#include "bitset.h"
#include "debug.h"
-
-#include "be.h"
-#include "../bearch.h"
-#include "../benode.h"
-#include "../belower.h"
-#include "../besched.h"
-#include "../beabi.h"
-#include "../bemodule.h"
-#include "../begnuas.h"
-#include "../belistsched.h"
+#include "error.h"
+
+#include "be_t.h"
+#include "bearch.h"
+#include "beirg.h"
+#include "benode.h"
+#include "belower.h"
+#include "besched.h"
+#include "beabi.h"
+#include "bemodule.h"
+#include "begnuas.h"
+#include "belistsched.h"
+#include "beflags.h"
+#include "bespillslots.h"
+#include "bespillutil.h"
+#include "bestack.h"
#include "bearch_amd64_t.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static arch_irn_class_t amd64_classify(const ir_node *irn)
-{
- (void) irn;
- return 0;
-}
-
static ir_entity *amd64_get_frame_entity(const ir_node *node)
{
+ if (is_amd64_FrameAddr(node)) {
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
+ return attr->entity;
+
+ } else if (is_amd64_Store(node)) {
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
+ return attr->entity;
+
+ } else if (is_amd64_Load(node)) {
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
+ return attr->entity;
+ }
+
(void) node;
/* TODO: return the ir_entity assigned to the frame */
return NULL;
}
-static void amd64_set_frame_entity(ir_node *node, ir_entity *ent)
-{
- (void) node;
- (void) ent;
- /* TODO: set the ir_entity assigned to the frame */
-}
-
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
static void amd64_set_frame_offset(ir_node *irn, int offset)
{
- (void) irn;
- (void) offset;
- /* TODO: correct offset if irn accesses the stack */
+ if (is_amd64_FrameAddr(irn)) {
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
+ attr->fp_offset += offset;
+
+ } else if (is_amd64_Store(irn)) {
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
+ attr->fp_offset += offset;
+
+ } else if (is_amd64_Load(irn)) {
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
+ attr->fp_offset += offset;
+
+ }
}
static int amd64_get_sp_bias(const ir_node *irn)
/* fill register allocator interface */
static const arch_irn_ops_t amd64_irn_ops = {
- get_amd64_in_req,
- amd64_classify,
amd64_get_frame_entity,
- amd64_set_frame_entity,
amd64_set_frame_offset,
amd64_get_sp_bias,
- NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
NULL, /* perform_memory_operand */
* Transforms the standard firm graph into
* a amd64 firm graph
*/
-static void amd64_prepare_graph(void *self)
+static void amd64_prepare_graph(ir_graph *irg)
{
- amd64_code_gen_t *cg = self;
+ amd64_transform_graph(irg);
- amd64_transform_graph (cg);
+ if (be_options.dump_flags & DUMP_BE)
+ dump_ir_graph(irg, "transformed");
+}
- if (cg->dump)
- be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
+static void amd64_before_ra(ir_graph *irg)
+{
+ be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
}
+static void transform_Reload(ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *ptr = get_irg_frame(irg);
+ ir_node *mem = get_irn_n(node, n_be_Reload_mem);
+ ir_mode *mode = get_irn_mode(node);
+ ir_entity *entity = be_get_frame_entity(node);
+ const arch_register_t *reg;
+ ir_node *proj;
+ ir_node *load;
+ ir_node *sched_point = sched_prev(node);
-/**
- * Called immediatly before emit phase.
- */
-static void amd64_finish_irg(void *self)
-{
- amd64_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
+ load = new_bd_amd64_Load(dbgi, block, ptr, mem, entity);
+ sched_add_after(sched_point, load);
+ sched_remove(node);
- dump_ir_block_graph_sched(irg, "-amd64-finished");
-}
+ proj = new_rd_Proj(dbgi, load, mode, pn_amd64_Load_res);
+ reg = arch_get_irn_register(node);
+ arch_set_irn_register(proj, reg);
-static void amd64_before_ra(void *self)
-{
- (void) self;
- /* Some stuff you need to do after scheduling but before register allocation */
+ exchange(node, proj);
}
-static void amd64_after_ra(void *self)
+static void transform_Spill(ir_node *node)
{
- (void) self;
- /* Some stuff you need to do immediatly after register allocation */
-}
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *ptr = get_irg_frame(irg);
+ ir_node *mem = get_irg_no_mem(irg);
+ ir_node *val = get_irn_n(node, n_be_Spill_val);
+ //ir_mode *mode = get_irn_mode(val);
+ ir_entity *entity = be_get_frame_entity(node);
+ ir_node *sched_point;
+ ir_node *store;
+ sched_point = sched_prev(node);
+ store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, entity);
+ sched_remove(node);
+ sched_add_after(sched_point, store);
-/**
- * Emits the code, closes the output file and frees
- * the code generator interface.
- */
-static void amd64_emit_and_done(void *self)
+ exchange(node, store);
+}
+
+static void amd64_after_ra_walker(ir_node *block, void *data)
{
- amd64_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
+ ir_node *node, *prev;
+ (void) data;
- amd64_gen_routine(cg, irg);
+ for (node = sched_last(block); !sched_is_begin(node); node = prev) {
+ prev = sched_prev(node);
- /* de-allocate code generator */
- free(cg);
+ if (be_is_Reload(node)) {
+ transform_Reload(node);
+ } else if (be_is_Spill(node)) {
+ transform_Spill(node);
+ }
+ }
}
-static void *amd64_cg_init(be_irg_t *birg);
-
-static const arch_code_generator_if_t amd64_code_gen_if = {
- amd64_cg_init,
- NULL, /* get_pic_base hook */
- NULL, /* before abi introduce hook */
- amd64_prepare_graph,
- NULL, /* spill hook */
- amd64_before_ra, /* before register allocation hook */
- amd64_after_ra, /* after register allocation hook */
- amd64_finish_irg,
- amd64_emit_and_done
-};
+static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
+{
+ assert(be_is_Reload(node) || be_is_Spill(node));
+ be_node_set_frame_entity(node, entity);
+}
/**
- * Initializes the code generator.
+ * Collects nodes that need frame entities assigned.
*/
-static void *amd64_cg_init(be_irg_t *birg)
+static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
{
- const arch_env_t *arch_env = be_get_birg_arch_env(birg);
- amd64_isa_t *isa = (amd64_isa_t *) arch_env;
- amd64_code_gen_t *cg = XMALLOC(amd64_code_gen_t);
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ be_fec_env_t *env = (be_fec_env_t*)data;
+ const ir_mode *mode = get_irn_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ }
+}
- cg->impl = &amd64_code_gen_if;
- cg->irg = be_get_birg_irg(birg);
- cg->isa = isa;
- cg->birg = birg;
- cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
+/**
+ * Called immediatly before emit phase.
+ */
+static void amd64_finish_irg(ir_graph *irg)
+{
+ be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
+ bool at_begin = stack_layout->sp_relative ? true : false;
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
- return (arch_code_generator_t *)cg;
-}
+ /* create and coalesce frame entities */
+ irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env, amd64_set_frame_entity, at_begin);
+ be_free_frame_entity_coalescer(fec_env);
+ irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
+ /* fix stack entity offsets */
+ be_abi_fix_stack_nodes(irg);
+ be_abi_fix_stack_bias(irg);
+}
-const arch_isa_if_t amd64_isa_if;
+extern const arch_isa_if_t amd64_isa_if;
static amd64_isa_t amd64_isa_template = {
{
&amd64_isa_if, /* isa interface implementation */
- &amd64_gp_regs[REG_RSP], /* stack pointer register */
- &amd64_gp_regs[REG_RBP], /* base pointer register */
- &amd64_reg_classes[CLASS_amd64_gp], /* link pointer register class */
- -1, /* stack direction */
- 2, /* power of two stack alignment for calls, 2^2 == 4 */
- NULL, /* main environment */
- 7, /* costs for a spill instruction */
- 5, /* costs for a reload instruction */
+ N_AMD64_REGISTERS,
+ amd64_registers,
+ N_AMD64_CLASSES,
+ amd64_reg_classes,
+ &amd64_registers[REG_RSP], /* stack pointer register */
+ &amd64_registers[REG_RBP], /* base pointer register */
+ 3, /* power of two stack alignment for calls, 2^2 == 4 */
+ 7, /* costs for a spill instruction */
+ 5, /* costs for a reload instruction */
+ false, /* no custom abi handling */
},
};
-/**
- * Initializes the backend ISA
- */
-static arch_env_t *amd64_init(FILE *outfile)
+static void amd64_init(void)
{
- static int run_once = 0;
- amd64_isa_t *isa;
-
- if(run_once)
- return NULL;
- run_once = 1;
-
- isa = XMALLOC(amd64_isa_t);
- memcpy(isa, &amd64_isa_template, sizeof(*isa));
-
- be_emit_init(outfile);
-
amd64_register_init();
amd64_create_opcodes(&amd64_irn_ops);
-
- return &isa->arch_env;
}
-
-
-/**
- * Closes the output file and frees the ISA structure.
- */
-static void amd64_done(void *self)
+static void amd64_finish(void)
{
- amd64_isa_t *isa = self;
-
- /* emit now all global declarations */
- be_gas_emit_decls(isa->arch_env.main_env);
-
- be_emit_exit();
- free(self);
+ amd64_free_opcodes();
}
-
-static unsigned amd64_get_n_reg_class(void)
+static arch_env_t *amd64_begin_codegeneration(void)
{
- return N_CLASSES;
-}
+ amd64_isa_t *isa = XMALLOC(amd64_isa_t);
+ *isa = amd64_isa_template;
-static const arch_register_class_t *amd64_get_reg_class(unsigned i)
-{
- assert(i < N_CLASSES);
- return &amd64_reg_classes[i];
+ return &isa->base;
}
-
-
/**
- * Get the register class which shall be used to store a value of a given mode.
- * @param self The this pointer.
- * @param mode The mode in question.
- * @return A register class which can hold values of the given mode.
+ * Closes the output file and frees the ISA structure.
*/
-static const arch_register_class_t *amd64_get_reg_class_for_mode(const ir_mode *mode)
+static void amd64_end_codegeneration(void *self)
{
- if (mode_is_float(mode))
- return &amd64_reg_classes[CLASS_amd64_fp];
- else
- return &amd64_reg_classes[CLASS_amd64_gp];
-}
-
-
-
-typedef struct {
- be_abi_call_flags_bits_t flags;
- const arch_env_t *arch_env;
- ir_graph *irg;
-} amd64_abi_env_t;
-
-static void *amd64_abi_init(const be_abi_call_t *call, const arch_env_t *arch_env, ir_graph *irg)
-{
- amd64_abi_env_t *env = XMALLOC(amd64_abi_env_t);
- be_abi_call_flags_t fl = be_abi_call_get_flags(call);
- env->flags = fl.bits;
- env->irg = irg;
- env->arch_env = arch_env;
- return env;
+ free(self);
}
/**
* @param self The callback object.
* @return The between type of for that call.
*/
-static ir_type *amd64_get_between_type(void *self)
+static ir_type *amd64_get_between_type(ir_graph *irg)
{
static ir_type *between_type = NULL;
static ir_entity *old_bp_ent = NULL;
- (void) self;
+ (void) irg;
if(!between_type) {
ir_entity *ret_addr_ent;
return between_type;
}
-/**
- * Build the prolog, return the BASE POINTER register
- */
-static const arch_register_t *amd64_abi_prologue(void *self, ir_node **mem,
- pmap *reg_map, int *stack_bias)
-{
- amd64_abi_env_t *env = self;
- const arch_env_t *aenv = env->arch_env;
- (void) mem;
- (void) stack_bias;
- (void) aenv;
- (void) reg_map;
-
- if (!env->flags.try_omit_fp) {
- /* FIXME: maybe later here should be some code to generate
- * the usual abi prologue */
- return env->arch_env->bp;
- }
+static const be_abi_callbacks_t amd64_abi_callbacks = {
+ amd64_get_between_type,
+};
- return env->arch_env->sp;
-}
+static const arch_register_t *gpreg_param_reg_std[] = {
+ &amd64_registers[REG_RDI],
+ &amd64_registers[REG_RSI],
+ &amd64_registers[REG_RDX],
+ &amd64_registers[REG_RCX],
+ &amd64_registers[REG_R8],
+ &amd64_registers[REG_R9],
+};
-/* Build the epilog */
-static void amd64_abi_epilogue(void *self, ir_node *bl, ir_node **mem,
- pmap *reg_map)
+static const arch_register_t *amd64_get_RegParam_reg(int n)
{
- amd64_abi_env_t *env = self;
- const arch_env_t *aenv = env->arch_env;
- ir_node *curr_sp = be_abi_reg_map_get(reg_map, aenv->sp);
- ir_node *curr_bp = be_abi_reg_map_get(reg_map, aenv->bp);
- (void) bl;
- (void) mem;
-
- if (env->flags.try_omit_fp) {
- curr_sp = be_new_IncSP(aenv->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
- }
-
- be_abi_reg_map_set(reg_map, aenv->sp, curr_sp);
- be_abi_reg_map_set(reg_map, aenv->bp, curr_bp);
+ assert(n < 6 && n >=0 && "register param > 6 requested");
+ return gpreg_param_reg_std[n];
}
-static const be_abi_callbacks_t amd64_abi_callbacks = {
- amd64_abi_init,
- free,
- amd64_get_between_type,
- amd64_abi_prologue,
- amd64_abi_epilogue,
-};
-
/**
* Get the ABI restrictions for procedure calls.
* @param self The this pointer.
* @param method_type The type of the method (procedure) in question.
* @param abi The abi object to be modified
*/
-static void amd64_get_call_abi(const void *self, ir_type *method_type,
- be_abi_call_t *abi)
+static void amd64_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
{
ir_type *tp;
ir_mode *mode;
int i, n = get_method_n_params(method_type);
- be_abi_call_flags_t call_flags;
int no_reg = 0;
- (void) self;
-
/* set abi flags for calls */
- call_flags.bits.left_to_right = 0;
- call_flags.bits.store_args_sequential = 1;
- call_flags.bits.try_omit_fp = 1;
- call_flags.bits.fp_free = 0;
- call_flags.bits.call_has_imm = 1;
-
- /* set stack parameter passing style */
+ be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
+ call_flags.call_has_imm = true;
be_abi_call_set_flags(abi, call_flags, &amd64_abi_callbacks);
for (i = 0; i < n; i++) {
tp = get_method_param_type(method_type, i);
mode = get_type_mode(tp);
- printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
+ //d// printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
- if (!no_reg && (i == 0 || i == 1) && mode == mode_Iu) {
- printf("TEST%d\n", i);
- be_abi_call_param_reg(abi, i,
- i == 0 ? &amd64_gp_regs[REG_RDI]
- : &amd64_gp_regs[REG_RSI],
+ if (!no_reg && i < 6 && mode_is_data (mode)) {
+ //d// printf("TEST%d\n", i);
+ be_abi_call_param_reg(abi, i, amd64_get_RegParam_reg (i),
ABI_CONTEXT_BOTH);
/* default: all parameters on stack */
} else {
no_reg = 1;
- be_abi_call_param_stack(abi, i, mode, 4, 0, 0, ABI_CONTEXT_BOTH);
+ be_abi_call_param_stack(abi, i, mode, 8, 0, 0, ABI_CONTEXT_BOTH);
}
}
tp = get_method_res_type(method_type, 0);
mode = get_type_mode(tp);
- /* FIXME: No floating point yet */
- /* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_gp_regs[REG_R0], ABI_CONTEXT_BOTH) */;
+ if (mode_is_float(mode))
+ panic("float not supported yet");
be_abi_call_res_reg(abi, 0,
- &amd64_gp_regs[REG_RAX], ABI_CONTEXT_BOTH);
+ &amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
}
}
-static int amd64_to_appear_in_schedule(void *block_env, const ir_node *irn)
-{
- (void) block_env;
-
- if(!is_amd64_irn(irn))
- return -1;
-
- return 1;
-}
-
-/**
- * Initializes the code generator interface.
- */
-static const arch_code_generator_if_t *amd64_get_code_generator_if(
- void *self)
-{
- (void) self;
- return &amd64_code_gen_if;
-}
-
-list_sched_selector_t amd64_sched_selector;
-
-/**
- * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
- */
-static const list_sched_selector_t *amd64_get_list_sched_selector(
- const void *self, list_sched_selector_t *selector)
+static void amd64_lower_for_target(void)
{
- (void) self;
- (void) selector;
+ size_t i, n_irgs = get_irp_n_irgs();
- amd64_sched_selector = trivial_selector;
- amd64_sched_selector.to_appear_in_schedule = amd64_to_appear_in_schedule;
- return &amd64_sched_selector;
-}
+ /* lower compound param handling */
+ lower_calls_with_compounds(LF_RETURN_HIDDEN);
-static const ilp_sched_selector_t *amd64_get_ilp_sched_selector(
- const void *self)
-{
- (void) self;
- return NULL;
+ for (i = 0; i < n_irgs; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+ /* Turn all small CopyBs into loads/stores, and turn all bigger
+ * CopyBs into memcpy calls, because we cannot handle CopyB nodes
+ * during code generation yet.
+ * TODO: Adapt this once custom CopyB handling is implemented. */
+ lower_CopyB(irg, 64, 65, true);
+ }
}
-/**
- * Returns the necessary byte alignment for storing a register of given class.
- */
-static int amd64_get_reg_class_alignment(const arch_register_class_t *cls)
+static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
+ ir_node *mux_true)
{
- ir_mode *mode = arch_register_class_mode(cls);
- return get_mode_size_bytes(mode);
+ (void) sel;
+ (void) mux_false;
+ (void) mux_true;
+ return false;
}
/**
*/
static const backend_params *amd64_get_backend_params(void) {
static backend_params p = {
- 0, /* no dword lowering */
0, /* no inline assembly */
+ 1, /* support Rotl nodes */
+ 0, /* little endian */
+ 1, /* modulo shift is efficient */
+ 0, /* non-modulo shift is not efficient */
NULL, /* will be set later */
- NULL, /* no creator function */
- NULL, /* context for create_intrinsic_fkt */
- NULL, /* parameter for if conversion */
+ amd64_is_mux_allowed, /* parameter for if conversion */
+ 64, /* machine size */
NULL, /* float arithmetic mode */
+ NULL, /* long long type */
+ NULL, /* unsigned long long type */
+ NULL, /* long double type (not supported yet) */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
- 4 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
+ 8 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
};
return &p;
}
-static const be_execution_unit_t ***amd64_get_allowed_execution_units(
- const ir_node *irn)
-{
- (void) irn;
- /* TODO */
- assert(0);
- return NULL;
-}
-
-static const be_machine_t *amd64_get_machine(const void *self)
-{
- (void) self;
- /* TODO */
- assert(0);
- return NULL;
-}
-
-static ir_graph **amd64_get_backend_irg_list(const void *self,
- ir_graph ***irgs)
-{
- (void) self;
- (void) irgs;
- return NULL;
-}
-
static asm_constraint_flags_t amd64_parse_asm_constraint(const char **c)
{
(void) c;
return 0;
}
+static int amd64_register_saved_by(const arch_register_t *reg, int callee)
+{
+ if (callee) {
+ /* check for callee saved */
+ if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
+ switch (reg->index) {
+ case REG_GP_RBX:
+ case REG_GP_RBP:
+ case REG_GP_R12:
+ case REG_GP_R13:
+ case REG_GP_R14:
+ case REG_GP_R15:
+ return 1;
+ default:
+ return 0;
+ }
+ }
+ } else {
+ /* check for caller saved */
+ if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
+ switch (reg->index) {
+ case REG_GP_RAX:
+ case REG_GP_RCX:
+ case REG_GP_RDX:
+ case REG_GP_RSI:
+ case REG_GP_RDI:
+ case REG_GP_R8:
+ case REG_GP_R9:
+ case REG_GP_R10:
+ case REG_GP_R11:
+ return 1;
+ default:
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
const arch_isa_if_t amd64_isa_if = {
amd64_init,
- amd64_done,
- NULL, /* handle intrinsics */
- amd64_get_n_reg_class,
- amd64_get_reg_class,
- amd64_get_reg_class_for_mode,
- amd64_get_call_abi,
- amd64_get_code_generator_if,
- amd64_get_list_sched_selector,
- amd64_get_ilp_sched_selector,
- amd64_get_reg_class_alignment,
+ amd64_finish,
amd64_get_backend_params,
- amd64_get_allowed_execution_units,
- amd64_get_machine,
- amd64_get_backend_irg_list,
- NULL, /* mark remat */
+ amd64_lower_for_target,
amd64_parse_asm_constraint,
- amd64_is_valid_clobber
+ amd64_is_valid_clobber,
+
+ amd64_begin_codegeneration,
+ amd64_end_codegeneration,
+ NULL,
+ amd64_get_call_abi,
+ NULL, /* mark remat */
+ NULL, /* get_pic_base */
+ be_new_spill,
+ be_new_reload,
+ amd64_register_saved_by,
+
+ NULL, /* handle intrinsics */
+ NULL, /* before_abi */
+ amd64_prepare_graph,
+ amd64_before_ra,
+ amd64_finish_irg,
+ amd64_gen_routine,
};
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
void be_init_arch_amd64(void)
{
be_register_isa_if("amd64", &amd64_isa_if);