/**
* @file
* @brief The main amd64 backend driver file.
- * @version $Id: bearch_amd64.c 26909 2010-01-05 15:56:54Z matze $
*/
#include "config.h"
#include "ircons.h"
#include "irgmod.h"
#include "irdump.h"
+#include "lower_calls.h"
#include "bitset.h"
#include "debug.h"
-
-#include "be.h"
-#include "../bearch.h"
-#include "../benode.h"
-#include "../belower.h"
-#include "../besched.h"
-#include "../beabi.h"
-#include "../bemodule.h"
-#include "../begnuas.h"
-#include "../belistsched.h"
-#include "../beflags.h"
-#include "../bespillslots.h"
+#include "error.h"
+
+#include "be_t.h"
+#include "bearch.h"
+#include "beirg.h"
+#include "benode.h"
+#include "belower.h"
+#include "besched.h"
+#include "beabi.h"
+#include "bemodule.h"
+#include "begnuas.h"
+#include "belistsched.h"
+#include "beflags.h"
+#include "bespillslots.h"
+#include "bespillutil.h"
+#include "bestack.h"
#include "bearch_amd64_t.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static arch_irn_class_t amd64_classify(const ir_node *irn)
-{
- (void) irn;
- return 0;
-}
-
static ir_entity *amd64_get_frame_entity(const ir_node *node)
{
if (is_amd64_FrameAddr(node)) {
- const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
} else if (is_amd64_Store(node)) {
- const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
} else if (is_amd64_Load(node)) {
- const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
}
static void amd64_set_frame_offset(ir_node *irn, int offset)
{
if (is_amd64_FrameAddr(irn)) {
- amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
attr->fp_offset += offset;
} else if (is_amd64_Store(irn)) {
- amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
attr->fp_offset += offset;
} else if (is_amd64_Load(irn)) {
- amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
+ amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
attr->fp_offset += offset;
}
/* fill register allocator interface */
static const arch_irn_ops_t amd64_irn_ops = {
- amd64_classify,
amd64_get_frame_entity,
amd64_set_frame_offset,
amd64_get_sp_bias,
- NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
NULL, /* perform_memory_operand */
*/
static void amd64_prepare_graph(ir_graph *irg)
{
- amd64_irg_data_t *irg_data = amd64_get_irg_data(irg);
amd64_transform_graph(irg);
- if (irg_data->dump)
+ if (be_options.dump_flags & DUMP_BE)
dump_ir_graph(irg, "transformed");
}
-
-/**
- * Called immediatly before emit phase.
- */
-static void amd64_finish_irg(ir_graph *irg)
-{
- (void) irg;
-}
-
static void amd64_before_ra(ir_graph *irg)
{
be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
}
-
static void transform_Reload(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irg_frame(irg);
- ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
+ ir_node *mem = get_irn_n(node, n_be_Reload_mem);
ir_mode *mode = get_irn_mode(node);
ir_entity *entity = be_get_frame_entity(node);
const arch_register_t *reg;
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irg_frame(irg);
- ir_node *mem = new_r_NoMem(irg);
- ir_node *val = get_irn_n(node, be_pos_Spill_val);
+ ir_node *mem = get_irg_no_mem(irg);
+ ir_node *val = get_irn_n(node, n_be_Spill_val);
//ir_mode *mode = get_irn_mode(val);
ir_entity *entity = be_get_frame_entity(node);
ir_node *sched_point;
}
}
-static void amd64_after_ra(ir_graph *irg)
+static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
{
- be_coalesce_spillslots(irg);
-
- irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
+ assert(be_is_Reload(node) || be_is_Spill(node));
+ be_node_set_frame_entity(node, entity);
}
/**
- * Initializes the code generator.
+ * Collects nodes that need frame entities assigned.
*/
-static void amd64_init_graph(ir_graph *irg)
+static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
{
- struct obstack *obst = be_get_be_obst(irg);
- amd64_irg_data_t *irg_data = OALLOCZ(obst, amd64_irg_data_t);
- irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
-
- be_birg_from_irg(irg)->isa_link = irg_data;
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ be_fec_env_t *env = (be_fec_env_t*)data;
+ const ir_mode *mode = get_irn_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ }
}
-
-typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
-
/**
- * Used to create per-graph unique pseudo nodes.
+ * Called immediatly before emit phase.
*/
-static inline ir_node *create_const(ir_graph *irg, ir_node **place,
- create_const_node_func func,
- const arch_register_t* reg)
+static void amd64_finish_irg(ir_graph *irg)
{
- ir_node *block, *res;
+ be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
+ bool at_begin = stack_layout->sp_relative ? true : false;
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
- if (*place != NULL)
- return *place;
+ /* create and coalesce frame entities */
+ irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env, amd64_set_frame_entity, at_begin);
+ be_free_frame_entity_coalescer(fec_env);
- block = get_irg_start_block(irg);
- res = func(NULL, block);
- arch_set_irn_register(res, reg);
- *place = res;
+ irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
- return res;
+ /* fix stack entity offsets */
+ be_abi_fix_stack_nodes(irg);
+ be_abi_fix_stack_bias(irg);
}
-const arch_isa_if_t amd64_isa_if;
+extern const arch_isa_if_t amd64_isa_if;
static amd64_isa_t amd64_isa_template = {
{
&amd64_isa_if, /* isa interface implementation */
amd64_registers,
N_AMD64_CLASSES,
amd64_reg_classes,
- &amd64_registers[REG_RSP], /* stack pointer register */
- &amd64_registers[REG_RBP], /* base pointer register */
- &amd64_reg_classes[CLASS_amd64_gp], /* link pointer register class */
- -1, /* stack direction */
- 3, /* power of two stack alignment for calls, 2^2 == 4 */
- NULL, /* main environment */
- 7, /* costs for a spill instruction */
- 5, /* costs for a reload instruction */
- false, /* no custom abi handling */
+ &amd64_registers[REG_RSP], /* stack pointer register */
+ &amd64_registers[REG_RBP], /* base pointer register */
+ 3, /* power of two stack alignment for calls, 2^2 == 4 */
+ 7, /* costs for a spill instruction */
+ 5, /* costs for a reload instruction */
+ false, /* no custom abi handling */
},
};
-/**
- * Initializes the backend ISA
- */
-static arch_env_t *amd64_init(FILE *outfile)
+static void amd64_init(void)
{
- static int run_once = 0;
- amd64_isa_t *isa;
-
- if(run_once)
- return NULL;
- run_once = 1;
-
- isa = XMALLOC(amd64_isa_t);
- memcpy(isa, &amd64_isa_template, sizeof(*isa));
-
- be_emit_init(outfile);
-
amd64_register_init();
amd64_create_opcodes(&amd64_irn_ops);
-
- return &isa->base;
}
-
-
-/**
- * Closes the output file and frees the ISA structure.
- */
-static void amd64_done(void *self)
+static void amd64_finish(void)
{
- amd64_isa_t *isa = self;
+ amd64_free_opcodes();
+}
- /* emit now all global declarations */
- be_gas_emit_decls(isa->base.main_env);
+static arch_env_t *amd64_begin_codegeneration(void)
+{
+ amd64_isa_t *isa = XMALLOC(amd64_isa_t);
+ *isa = amd64_isa_template;
- be_emit_exit();
- free(self);
+ return &isa->base;
}
-
/**
- * Get the register class which shall be used to store a value of a given mode.
- * @param self The this pointer.
- * @param mode The mode in question.
- * @return A register class which can hold values of the given mode.
+ * Closes the output file and frees the ISA structure.
*/
-static const arch_register_class_t *amd64_get_reg_class_for_mode(const ir_mode *mode)
+static void amd64_end_codegeneration(void *self)
{
- assert(!mode_is_float(mode));
- return &amd64_reg_classes[CLASS_amd64_gp];
-}
-
-
-
-typedef struct {
- be_abi_call_flags_bits_t flags;
- ir_graph *irg;
-} amd64_abi_env_t;
-
-static void *amd64_abi_init(const be_abi_call_t *call, ir_graph *irg)
-{
- amd64_abi_env_t *env = XMALLOC(amd64_abi_env_t);
- be_abi_call_flags_t fl = be_abi_call_get_flags(call);
- env->flags = fl.bits;
- env->irg = irg;
- return env;
+ free(self);
}
/**
* @param self The callback object.
* @return The between type of for that call.
*/
-static ir_type *amd64_get_between_type(void *self)
+static ir_type *amd64_get_between_type(ir_graph *irg)
{
static ir_type *between_type = NULL;
static ir_entity *old_bp_ent = NULL;
- (void) self;
+ (void) irg;
if(!between_type) {
ir_entity *ret_addr_ent;
return between_type;
}
-/**
- * Build the prolog, return the BASE POINTER register
- */
-static const arch_register_t *amd64_abi_prologue(void *self, ir_node **mem,
- pmap *reg_map, int *stack_bias)
-{
- amd64_abi_env_t *env = self;
- const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
- (void) mem;
- (void) stack_bias;
- (void) aenv;
- (void) reg_map;
-
- if (!env->flags.try_omit_fp) {
- /* FIXME: maybe later here should be some code to generate
- * the usual abi prologue */
- return aenv->bp;
- }
-
- return aenv->sp;
-}
-
-/* Build the epilog */
-static void amd64_abi_epilogue(void *self, ir_node *bl, ir_node **mem,
- pmap *reg_map)
-{
- amd64_abi_env_t *env = self;
- const arch_env_t *aenv = be_get_irg_arch_env(env->irg);
- ir_node *curr_sp = be_abi_reg_map_get(reg_map, aenv->sp);
- ir_node *curr_bp = be_abi_reg_map_get(reg_map, aenv->bp);
- (void) bl;
- (void) mem;
-
- if (env->flags.try_omit_fp) {
- curr_sp = be_new_IncSP(aenv->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
- }
-
- be_abi_reg_map_set(reg_map, aenv->sp, curr_sp);
- be_abi_reg_map_set(reg_map, aenv->bp, curr_bp);
-}
-
static const be_abi_callbacks_t amd64_abi_callbacks = {
- amd64_abi_init,
- free,
amd64_get_between_type,
- amd64_abi_prologue,
- amd64_abi_epilogue,
};
static const arch_register_t *gpreg_param_reg_std[] = {
* @param method_type The type of the method (procedure) in question.
* @param abi The abi object to be modified
*/
-static void amd64_get_call_abi(const void *self, ir_type *method_type,
- be_abi_call_t *abi)
+static void amd64_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
{
ir_type *tp;
ir_mode *mode;
int i, n = get_method_n_params(method_type);
- be_abi_call_flags_t call_flags;
int no_reg = 0;
- (void) self;
-
/* set abi flags for calls */
- call_flags.bits.left_to_right = 0;
- call_flags.bits.store_args_sequential = 0;
- call_flags.bits.try_omit_fp = 1;
- call_flags.bits.fp_free = 0;
- call_flags.bits.call_has_imm = 1;
-
- /* set stack parameter passing style */
+ be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
+ call_flags.call_has_imm = true;
be_abi_call_set_flags(abi, call_flags, &amd64_abi_callbacks);
for (i = 0; i < n; i++) {
tp = get_method_res_type(method_type, 0);
mode = get_type_mode(tp);
- /* FIXME: No floating point yet */
- /* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH) */;
+ if (mode_is_float(mode))
+ panic("float not supported yet");
be_abi_call_res_reg(abi, 0,
&amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
}
}
-/**
- * Returns the necessary byte alignment for storing a register of given class.
- */
-static int amd64_get_reg_class_alignment(const arch_register_class_t *cls)
-{
- ir_mode *mode = arch_register_class_mode(cls);
- return get_mode_size_bytes(mode);
-}
-
static void amd64_lower_for_target(void)
{
+ size_t i, n_irgs = get_irp_n_irgs();
+
+ /* lower compound param handling */
+ lower_calls_with_compounds(LF_RETURN_HIDDEN);
+
+ for (i = 0; i < n_irgs; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+ /* Turn all small CopyBs into loads/stores, and turn all bigger
+ * CopyBs into memcpy calls, because we cannot handle CopyB nodes
+ * during code generation yet.
+ * TODO: Adapt this once custom CopyB handling is implemented. */
+ lower_CopyB(irg, 64, 65, true);
+ }
}
static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
0, /* no inline assembly */
1, /* support Rotl nodes */
0, /* little endian */
- amd64_lower_for_target, /* lowering callback */
+ 1, /* modulo shift is efficient */
+ 0, /* non-modulo shift is not efficient */
NULL, /* will be set later */
amd64_is_mux_allowed, /* parameter for if conversion */
+ 64, /* machine size */
NULL, /* float arithmetic mode */
+ NULL, /* long long type */
+ NULL, /* unsigned long long type */
+ NULL, /* long double type (not supported yet) */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
return &p;
}
-static ir_graph **amd64_get_backend_irg_list(const void *self,
- ir_graph ***irgs)
-{
- (void) self;
- (void) irgs;
- return NULL;
-}
-
static asm_constraint_flags_t amd64_parse_asm_constraint(const char **c)
{
(void) c;
return 0;
}
+static int amd64_register_saved_by(const arch_register_t *reg, int callee)
+{
+ if (callee) {
+ /* check for callee saved */
+ if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
+ switch (reg->index) {
+ case REG_GP_RBX:
+ case REG_GP_RBP:
+ case REG_GP_R12:
+ case REG_GP_R13:
+ case REG_GP_R14:
+ case REG_GP_R15:
+ return 1;
+ default:
+ return 0;
+ }
+ }
+ } else {
+ /* check for caller saved */
+ if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
+ switch (reg->index) {
+ case REG_GP_RAX:
+ case REG_GP_RCX:
+ case REG_GP_RDX:
+ case REG_GP_RSI:
+ case REG_GP_RDI:
+ case REG_GP_R8:
+ case REG_GP_R9:
+ case REG_GP_R10:
+ case REG_GP_R11:
+ return 1;
+ default:
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
const arch_isa_if_t amd64_isa_if = {
amd64_init,
- amd64_done,
- NULL, /* handle intrinsics */
- amd64_get_reg_class_for_mode,
- amd64_get_call_abi,
- amd64_get_reg_class_alignment,
+ amd64_finish,
amd64_get_backend_params,
- amd64_get_backend_irg_list,
- NULL, /* mark remat */
+ amd64_lower_for_target,
amd64_parse_asm_constraint,
amd64_is_valid_clobber,
- amd64_init_graph,
+ amd64_begin_codegeneration,
+ amd64_end_codegeneration,
+ NULL,
+ amd64_get_call_abi,
+ NULL, /* mark remat */
NULL, /* get_pic_base */
+ be_new_spill,
+ be_new_reload,
+ amd64_register_saved_by,
+
+ NULL, /* handle intrinsics */
NULL, /* before_abi */
amd64_prepare_graph,
amd64_before_ra,
- amd64_after_ra,
amd64_finish_irg,
amd64_gen_routine,
};
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
void be_init_arch_amd64(void)
{
be_register_isa_if("amd64", &amd64_isa_if);