/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
- *
* This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
*/
/**
#include "bitset.h"
#include "debug.h"
+#include "error.h"
-#include "be.h"
+#include "be_t.h"
#include "bearch.h"
+#include "beirg.h"
#include "benode.h"
#include "belower.h"
#include "besched.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static arch_irn_class_t amd64_classify(const ir_node *irn)
-{
- (void) irn;
- return arch_irn_class_none;
-}
-
static ir_entity *amd64_get_frame_entity(const ir_node *node)
{
if (is_amd64_FrameAddr(node)) {
/* fill register allocator interface */
static const arch_irn_ops_t amd64_irn_ops = {
- amd64_classify,
amd64_get_frame_entity,
amd64_set_frame_offset,
amd64_get_sp_bias,
- NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
NULL, /* perform_memory_operand */
*/
static void amd64_prepare_graph(ir_graph *irg)
{
- amd64_irg_data_t *irg_data = amd64_get_irg_data(irg);
amd64_transform_graph(irg);
- if (irg_data->dump)
+ if (be_options.dump_flags & DUMP_BE)
dump_ir_graph(irg, "transformed");
}
ir_node *proj;
ir_node *load;
- ir_node *sched_point = sched_prev(node);
-
load = new_bd_amd64_Load(dbgi, block, ptr, mem, entity);
- sched_add_after(sched_point, load);
- sched_remove(node);
+ sched_replace(node, load);
proj = new_rd_Proj(dbgi, load, mode, pn_amd64_Load_res);
ir_node *val = get_irn_n(node, n_be_Spill_val);
//ir_mode *mode = get_irn_mode(val);
ir_entity *entity = be_get_frame_entity(node);
- ir_node *sched_point;
ir_node *store;
- sched_point = sched_prev(node);
store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, entity);
-
- sched_remove(node);
- sched_add_after(sched_point, store);
+ sched_replace(node, store);
exchange(node, store);
}
static void amd64_after_ra_walker(ir_node *block, void *data)
{
- ir_node *node, *prev;
(void) data;
- for (node = sched_last(block); !sched_is_begin(node); node = prev) {
- prev = sched_prev(node);
-
+ sched_foreach_reverse_safe(block, node) {
if (be_is_Reload(node)) {
transform_Reload(node);
} else if (be_is_Spill(node)) {
static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
{
- assert(be_is_Reload(node));
+ assert(be_is_Reload(node) || be_is_Spill(node));
be_node_set_frame_entity(node, entity);
}
be_abi_fix_stack_bias(irg);
}
-/**
- * Initializes the code generator.
- */
-static void amd64_init_graph(ir_graph *irg)
-{
- struct obstack *obst = be_get_be_obst(irg);
- amd64_irg_data_t *irg_data = OALLOCZ(obst, amd64_irg_data_t);
- irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
-
- be_birg_from_irg(irg)->isa_link = irg_data;
-}
-
-
-typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
-
-/**
- * Used to create per-graph unique pseudo nodes.
- */
-static inline ir_node *create_const(ir_graph *irg, ir_node **place,
- create_const_node_func func,
- const arch_register_t* reg)
-{
- ir_node *block, *res;
-
- if (*place != NULL)
- return *place;
-
- block = get_irg_start_block(irg);
- res = func(NULL, block);
- arch_set_irn_register(res, reg);
- *place = res;
-
- return res;
-}
-
extern const arch_isa_if_t amd64_isa_if;
static amd64_isa_t amd64_isa_template = {
{
amd64_registers,
N_AMD64_CLASSES,
amd64_reg_classes,
- &amd64_registers[REG_RSP], /* stack pointer register */
- &amd64_registers[REG_RBP], /* base pointer register */
- &amd64_reg_classes[CLASS_amd64_gp], /* link pointer register class */
- 3, /* power of two stack alignment for calls, 2^2 == 4 */
- NULL, /* main environment */
- 7, /* costs for a spill instruction */
- 5, /* costs for a reload instruction */
- false, /* no custom abi handling */
+ &amd64_registers[REG_RSP], /* stack pointer register */
+ &amd64_registers[REG_RBP], /* base pointer register */
+ 3, /* power of two stack alignment for calls, 2^2 == 4 */
+ 7, /* costs for a spill instruction */
+ 5, /* costs for a reload instruction */
+ false, /* no custom abi handling */
},
};
-/**
- * Initializes the backend ISA
- */
-static arch_env_t *amd64_init(const be_main_env_t *env)
+static void amd64_init(void)
{
- amd64_isa_t *isa = XMALLOC(amd64_isa_t);
- *isa = amd64_isa_template;
-
amd64_register_init();
amd64_create_opcodes(&amd64_irn_ops);
-
- be_emit_init(env->file_handle);
- be_gas_begin_compilation_unit(env);
-
- return &isa->base;
}
-
-
-/**
- * Closes the output file and frees the ISA structure.
- */
-static void amd64_done(void *self)
+static void amd64_finish(void)
{
- amd64_isa_t *isa = (amd64_isa_t*)self;
+ amd64_free_opcodes();
+}
- /* emit now all global declarations */
- be_gas_end_compilation_unit(isa->base.main_env);
+static arch_env_t *amd64_begin_codegeneration(void)
+{
+ amd64_isa_t *isa = XMALLOC(amd64_isa_t);
+ *isa = amd64_isa_template;
- be_emit_exit();
- free(self);
+ return &isa->base;
}
-
/**
- * Get the register class which shall be used to store a value of a given mode.
- * @param self The this pointer.
- * @param mode The mode in question.
- * @return A register class which can hold values of the given mode.
+ * Closes the output file and frees the ISA structure.
*/
-static const arch_register_class_t *amd64_get_reg_class_for_mode(const ir_mode *mode)
+static void amd64_end_codegeneration(void *self)
{
- assert(!mode_is_float(mode));
- return &amd64_reg_classes[CLASS_amd64_gp];
+ free(self);
}
-
-
-typedef struct {
- be_abi_call_flags_bits_t flags;
- ir_graph *irg;
-} amd64_abi_env_t;
-
/**
* Get the between type for that call.
* @param self The callback object.
* @param method_type The type of the method (procedure) in question.
* @param abi The abi object to be modified
*/
-static void amd64_get_call_abi(const void *self, ir_type *method_type,
- be_abi_call_t *abi)
+static void amd64_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
{
ir_type *tp;
ir_mode *mode;
int i, n = get_method_n_params(method_type);
- be_abi_call_flags_t call_flags;
int no_reg = 0;
- (void) self;
-
/* set abi flags for calls */
- call_flags.bits.store_args_sequential = 0;
- call_flags.bits.try_omit_fp = 1;
- call_flags.bits.fp_free = 0;
- call_flags.bits.call_has_imm = 1;
-
- /* set stack parameter passing style */
+ be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
+ call_flags.call_has_imm = true;
be_abi_call_set_flags(abi, call_flags, &amd64_abi_callbacks);
for (i = 0; i < n; i++) {
tp = get_method_res_type(method_type, 0);
mode = get_type_mode(tp);
- /* FIXME: No floating point yet */
- /* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH); */
+ if (mode_is_float(mode))
+ panic("float not supported yet");
be_abi_call_res_reg(abi, 0,
&amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
}
}
-/**
- * Returns the necessary byte alignment for storing a register of given class.
- */
-static int amd64_get_reg_class_alignment(const arch_register_class_t *cls)
-{
- ir_mode *mode = arch_register_class_mode(cls);
- return get_mode_size_bytes(mode);
-}
-
static void amd64_lower_for_target(void)
{
size_t i, n_irgs = get_irp_n_irgs();
return &p;
}
-static ir_graph **amd64_get_backend_irg_list(const void *self,
- ir_graph ***irgs)
-{
- (void) self;
- (void) irgs;
- return NULL;
-}
-
static asm_constraint_flags_t amd64_parse_asm_constraint(const char **c)
{
(void) c;
static int amd64_register_saved_by(const arch_register_t *reg, int callee)
{
- if (callee) {
- /* check for callee saved */
- if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
- switch (reg->index) {
- case REG_GP_RBX:
- case REG_GP_RBP:
- case REG_GP_R12:
- case REG_GP_R13:
- case REG_GP_R14:
- case REG_GP_R15:
- return 1;
- default:
- return 0;
- }
- }
- } else {
- /* check for caller saved */
- if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
- switch (reg->index) {
- case REG_GP_RAX:
- case REG_GP_RCX:
- case REG_GP_RDX:
- case REG_GP_RSI:
- case REG_GP_RDI:
- case REG_GP_R8:
- case REG_GP_R9:
- case REG_GP_R10:
- case REG_GP_R11:
- return 1;
- default:
- return 0;
- }
- }
+ switch (reg->global_index) {
+ case REG_RBX:
+ case REG_RBP:
+ case REG_R12:
+ case REG_R13:
+ case REG_R14:
+ case REG_R15:
+ return callee;
+
+ case REG_RAX:
+ case REG_RCX:
+ case REG_RDX:
+ case REG_RSI:
+ case REG_RDI:
+ case REG_R8:
+ case REG_R9:
+ case REG_R10:
+ case REG_R11:
+ return !callee;
+
+ default:
+ return 0;
}
- return 0;
}
const arch_isa_if_t amd64_isa_if = {
amd64_init,
- amd64_lower_for_target,
- amd64_done,
- NULL, /* handle intrinsics */
- amd64_get_reg_class_for_mode,
- amd64_get_call_abi,
- amd64_get_reg_class_alignment,
+ amd64_finish,
amd64_get_backend_params,
- amd64_get_backend_irg_list,
- NULL, /* mark remat */
+ amd64_lower_for_target,
amd64_parse_asm_constraint,
amd64_is_valid_clobber,
- amd64_init_graph,
+ amd64_begin_codegeneration,
+ amd64_end_codegeneration,
+ NULL,
+ amd64_get_call_abi,
+ NULL, /* mark remat */
NULL, /* get_pic_base */
+ be_new_spill,
+ be_new_reload,
+ amd64_register_saved_by,
+
+ NULL, /* handle intrinsics */
NULL, /* before_abi */
amd64_prepare_graph,
amd64_before_ra,
amd64_finish_irg,
amd64_gen_routine,
- amd64_register_saved_by,
- be_new_spill,
- be_new_reload
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)