/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
/**
* @file
* @brief emit assembler for a backend graph
- * @version $Id: amd64_emitter.c 26746 2009-11-27 08:53:15Z matze $
*/
#include "config.h"
#include <limits.h>
+#include "be_t.h"
+#include "error.h"
#include "xmalloc.h"
#include "tv.h"
#include "iredges.h"
#include "irargs_t.h"
#include "irprog.h"
-#include "../besched.h"
+#include "besched.h"
+#include "begnuas.h"
+#include "beblocksched.h"
#include "amd64_emitter.h"
#include "gen_amd64_emitter.h"
+#include "gen_amd64_regalloc_if.h"
#include "amd64_nodes_attr.h"
#include "amd64_new_nodes.h"
-#define SNPRINTF_BUF_LEN 128
+#include "benode.h"
+
+/*************************************************************
+ * _ _ __ _ _
+ * (_) | | / _| | | | |
+ * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
+ * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
+ * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
+ * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
+ * | | | |
+ * |_| |_|
+ *************************************************************/
/**
- * Returns the register at in position pos.
+ * Returns the target block for a control flow node.
*/
-static const arch_register_t *get_in_reg(const ir_node *node, int pos)
+static ir_node *get_cfop_target_block(const ir_node *irn)
+{
+ return (ir_node*)get_irn_link(irn);
+}
+
+void amd64_emitf(ir_node const *const node, char const *fmt, ...)
{
- ir_node *op;
- const arch_register_t *reg = NULL;
+ va_list ap;
+ va_start(ap, fmt);
- assert(get_irn_arity(node) > pos && "Invalid IN position");
+ be_emit_char('\t');
+ for (;;) {
+ char const *start = fmt;
- /* The out register of the operator at position pos is the
- in register we need. */
- op = get_irn_n(node, pos);
+ while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
+ ++fmt;
+ if (fmt != start) {
+ be_emit_string_len(start, fmt - start);
+ }
- reg = arch_get_irn_register(op);
+ if (*fmt == '\n') {
+ be_emit_char('\n');
+ be_emit_write_line();
+ be_emit_char('\t');
+ ++fmt;
+ continue;
+ }
- assert(reg && "no in register found");
- return reg;
-}
+ if (*fmt == '\0')
+ break;
-/**
- * Returns the register at out position pos.
- */
-static const arch_register_t *get_out_reg(const ir_node *node, int pos)
-{
- ir_node *proj;
- const arch_register_t *reg = NULL;
-
- /* 1st case: irn is not of mode_T, so it has only */
- /* one OUT register -> good */
- /* 2nd case: irn is of mode_T -> collect all Projs and ask the */
- /* Proj with the corresponding projnum for the register */
-
- if (get_irn_mode(node) != mode_T) {
- reg = arch_get_irn_register(node);
- } else if (is_amd64_irn(node)) {
- reg = arch_irn_get_register(node, pos);
- } else {
- const ir_edge_t *edge;
+ ++fmt;
+
+ switch (*fmt++) {
+ arch_register_t const *reg;
+
+ case '%':
+ be_emit_char('%');
+ break;
+
+ case 'C': {
+ amd64_attr_t const *const attr = get_amd64_attr_const(node);
+ be_emit_irprintf("$0x%X", attr->ext.imm_value);
+ break;
+ }
+
+ case 'D':
+ if (*fmt < '0' || '9' <= *fmt)
+ goto unknown;
+ reg = arch_get_irn_register_out(node, *fmt++ - '0');
+ goto emit_R;
+
+ case 'E': {
+ ir_entity const *const ent = va_arg(ap, ir_entity const*);
+ be_gas_emit_entity(ent);
+ break;
+ }
+
+ case 'L': {
+ ir_node *const block = get_cfop_target_block(node);
+ be_gas_emit_block_name(block);
+ break;
+ }
+
+ case 'O': {
+ amd64_SymConst_attr_t const *const attr = get_amd64_SymConst_attr_const(node);
+ if (attr->fp_offset)
+ be_emit_irprintf("%d", attr->fp_offset);
+ break;
+ }
+
+ case 'R':
+ reg = va_arg(ap, arch_register_t const*);
+emit_R:
+ be_emit_char('%');
+ be_emit_string(reg->name);
+ break;
- foreach_out_edge(node, edge) {
- proj = get_edge_src_irn(edge);
- assert(is_Proj(proj) && "non-Proj from mode_T node");
- if (get_Proj_proj(proj) == pos) {
- reg = arch_get_irn_register(proj);
+ case 'S': {
+ int pos;
+ if ('0' <= *fmt && *fmt <= '9') {
+ pos = *fmt++ - '0';
+ } else if (*fmt == '*') {
+ ++fmt;
+ pos = va_arg(ap, int);
+ } else {
+ goto unknown;
+ }
+ reg = arch_get_irn_register_in(node, pos);
+ goto emit_R;
+ }
+
+ case 'd': {
+ int const num = va_arg(ap, int);
+ be_emit_irprintf("%d", num);
break;
}
+
+ case 's': {
+ char const *const str = va_arg(ap, char const*);
+ be_emit_string(str);
+ break;
+ }
+
+ case 'u': {
+ unsigned const num = va_arg(ap, unsigned);
+ be_emit_irprintf("%u", num);
+ break;
+ }
+
+ default:
+unknown:
+ panic("unknown format conversion");
}
}
- assert(reg && "no out register found");
- return reg;
+ be_emit_finish_line_gas(node);
+ va_end(ap);
}
-/*************************************************************
- * _ _ __ _ _
- * (_) | | / _| | | | |
- * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
- * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
- * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
- * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
- * | | | |
- * |_| |_|
- *************************************************************/
+/***********************************************************************************
+ * _ __ _
+ * (_) / _| | |
+ * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
+ * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
+ * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
+ * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
+ *
+ ***********************************************************************************/
-void amd64_emit_immediate(const ir_node *node)
+/**
+ * Default emitter for anything that we don't want to generate code for.
+ */
+static void emit_nothing(const ir_node *node)
{
(void) node;
- /* TODO */
}
-void amd64_emit_source_register(const ir_node *node, int pos)
+/**
+ * Emit a SymConst.
+ */
+static void emit_amd64_SymConst(const ir_node *irn)
{
- const arch_register_t *reg = get_in_reg(node, pos);
- be_emit_string(arch_register_get_name(reg));
+ const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(irn);
+#if 0
+ sym_or_tv_t key, *entry;
+ unsigned label;
+
+ key.u.id = get_entity_ld_ident(attr->entity);
+ key.is_ident = 1;
+ key.label = 0;
+ entry = set_insert(sym_or_tv_t, sym_or_tv, &key, sizeof(key), hash_ptr(key.u.generic));
+ if (entry->label == 0) {
+ /* allocate a label */
+ entry->label = get_unique_label();
+ }
+ label = entry->label;
+#endif
+
+ amd64_emitf(irn, "mov $%E, %D0", attr->entity);
}
-void amd64_emit_dest_register(const ir_node *node, int pos)
+/**
+ * Emit a Conv.
+ */
+static void emit_amd64_Conv(const ir_node *irn)
{
- const arch_register_t *reg = get_out_reg(node, pos);
- be_emit_string(arch_register_get_name(reg));
+ amd64_emitf(irn, "mov %S0, %D0");
}
+
/**
- * Returns the target label for a control flow node.
+ * Returns the next block in a block schedule.
*/
-static void amd64_emit_cfop_target(const ir_node *node)
+static ir_node *sched_next_block(const ir_node *block)
{
- ir_node *block = get_irn_link(node);
-
- be_emit_irprintf("BLOCK_%ld", get_irn_node_nr(block));
+ return (ir_node*)get_irn_link(block);
}
-/***********************************************************************************
- * _ __ _
- * (_) / _| | |
- * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
- * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
- * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
- * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
- *
- ***********************************************************************************/
-
/**
- * Emits code for a unconditional jump.
+ * Emit a Jmp.
*/
-static void emit_Jmp(const ir_node *node)
+static void emit_amd64_Jmp(const ir_node *node)
{
- ir_node *block;
+ ir_node *block, *next_block;
/* for now, the code works for scheduled and non-schedules blocks */
block = get_nodes_block(node);
- be_emit_cstring("\tjmp ");
- amd64_emit_cfop_target(node);
+ /* we have a block schedule */
+ next_block = sched_next_block(block);
+ if (get_cfop_target_block(node) != next_block) {
+ amd64_emitf(node, "jmp %L");
+ } else if (be_options.verbose_asm) {
+ amd64_emitf(node, "/* fallthrough to %L */");
+ }
+}
+
+/**
+ * Emit a Compare with conditional branch.
+ */
+static void emit_amd64_Jcc(const ir_node *irn)
+{
+ const ir_node *proj_true = NULL;
+ const ir_node *proj_false = NULL;
+ const ir_node *block;
+ const ir_node *next_block;
+ const char *suffix;
+ const amd64_attr_t *attr = get_amd64_attr_const(irn);
+ ir_relation relation = attr->ext.relation;
+ ir_node *op1 = get_irn_n(irn, 0);
+ const amd64_attr_t *cmp_attr = get_amd64_attr_const(op1);
+ bool is_signed = !cmp_attr->data.cmp_unsigned;
+
+ assert(is_amd64_Cmp(op1));
+
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ long nr = get_Proj_proj(proj);
+ if (nr == pn_Cond_true) {
+ proj_true = proj;
+ } else {
+ proj_false = proj;
+ }
+ }
+
+ if (cmp_attr->data.ins_permuted) {
+ relation = get_inversed_relation(relation);
+ }
+
+ /* for now, the code works for scheduled and non-schedules blocks */
+ block = get_nodes_block(irn);
+
+ /* we have a block schedule */
+ next_block = sched_next_block(block);
+
+ assert(relation != ir_relation_false);
+ assert(relation != ir_relation_true);
+
+ if (get_cfop_target_block(proj_true) == next_block) {
+ /* exchange both proj's so the second one can be omitted */
+ const ir_node *t = proj_true;
+
+ proj_true = proj_false;
+ proj_false = t;
+ relation = get_negated_relation(relation);
+ }
+
+ switch (relation & ir_relation_less_equal_greater) {
+ case ir_relation_equal: suffix = "e"; break;
+ case ir_relation_less: suffix = is_signed ? "l" : "b"; break;
+ case ir_relation_less_equal: suffix = is_signed ? "le" : "be"; break;
+ case ir_relation_greater: suffix = is_signed ? "g" : "a"; break;
+ case ir_relation_greater_equal: suffix = is_signed ? "ge" : "ae"; break;
+ case ir_relation_less_greater: suffix = "ne"; break;
+ case ir_relation_less_equal_greater: suffix = "mp"; break;
+ default: panic("Cmp has unsupported pnc");
+ }
+
+ /* emit the true proj */
+ amd64_emitf(proj_true, "j%s %L", suffix);
+
+ if (get_cfop_target_block(proj_false) != next_block) {
+ amd64_emitf(proj_false, "jmp %L");
+ } else if (be_options.verbose_asm) {
+ amd64_emitf(proj_false, "/* fallthrough to %L */");
+ }
+}
+
+/**
+ * Emits code for a call.
+ */
+static void emit_be_Call(const ir_node *node)
+{
+ ir_entity *entity = be_Call_get_entity(node);
+
+ /* %eax/%rax is used in AMD64 to pass the number of vector parameters for
+ * variable argument counts */
+ if (get_method_variadicity (be_Call_get_type((ir_node *) node))) {
+ /* But this still is a hack... */
+ amd64_emitf(node, "xor %%rax, %%rax");
+ }
+
+ if (entity) {
+ amd64_emitf(node, "call %E", entity);
+ } else {
+ be_emit_pad_comment();
+ be_emit_cstring("/* FIXME: call NULL entity?! */\n");
+ }
+}
+
+/**
+ * emit copy node
+ */
+static void emit_be_Copy(const ir_node *irn)
+{
+ ir_mode *mode = get_irn_mode(irn);
+
+ if (arch_get_irn_register_in(irn, 0) == arch_get_irn_register_out(irn, 0)) {
+ /* omitted Copy */
+ return;
+ }
+
+ if (mode_is_float(mode)) {
+ panic("move not supported for FP");
+ } else if (mode_is_data(mode)) {
+ amd64_emitf(irn, "mov %S0, %D0");
+ } else {
+ panic("move not supported for this mode");
+ }
+}
+
+static void emit_be_Perm(const ir_node *node)
+{
+ const arch_register_t *in0, *in1;
+
+ in0 = arch_get_irn_register(get_irn_n(node, 0));
+ in1 = arch_get_irn_register(get_irn_n(node, 1));
+
+ arch_register_class_t const* const cls0 = in0->reg_class;
+ assert(cls0 == in1->reg_class && "Register class mismatch at Perm");
+
+ amd64_emitf(node, "xchg %R, %R", in0, in1);
+
+ if (cls0 != &amd64_reg_classes[CLASS_amd64_gp]) {
+ panic("unexpected register class in be_Perm (%+F)", node);
+ }
+}
+
+static void emit_amd64_FrameAddr(const ir_node *irn)
+{
+ const amd64_SymConst_attr_t *attr =
+ (const amd64_SymConst_attr_t*) get_amd64_attr_const(irn);
+
+ amd64_emitf(irn, "mov %S0, %D0");
+ amd64_emitf(irn, "add $%u, %D0", attr->fp_offset);
+}
+
+/**
+ * Emits code to increase stack pointer.
+ */
+static void emit_be_IncSP(const ir_node *node)
+{
+ int offs = be_get_IncSP_offset(node);
+
+ if (offs == 0)
+ return;
+
+ if (offs > 0) {
+ amd64_emitf(node, "sub, $%d, %D0", offs);
+ } else {
+ amd64_emitf(node, "add, $%d, %D0", -offs);
+ }
+}
+
+/**
+ * Emits code for a return.
+ */
+static void emit_be_Return(const ir_node *node)
+{
+ be_emit_cstring("\tret");
be_emit_finish_line_gas(node);
}
+
+static void emit_amd64_binop_op(const ir_node *irn, int second_op)
+{
+ if (irn->op == op_amd64_Add) {
+ amd64_emitf(irn, "add %S*, %D0", second_op);
+ } else if (irn->op == op_amd64_Sub) {
+ amd64_emitf(irn, "neg %S*", second_op);
+ amd64_emitf(irn, "add %S*, %D0", second_op);
+ amd64_emitf(irn, "neg %S*", second_op);
+ }
+
+}
+
+/**
+ * Emits an arithmetic operation that handles arbitraty input registers.
+ */
+static void emit_amd64_binop(const ir_node *irn)
+{
+ const arch_register_t *reg_s1 = arch_get_irn_register_in(irn, 0);
+ const arch_register_t *reg_s2 = arch_get_irn_register_in(irn, 1);
+ const arch_register_t *reg_d1 = arch_get_irn_register_out(irn, 0);
+
+ int second_op = 0;
+
+ if (reg_d1 != reg_s1 && reg_d1 != reg_s2) {
+ amd64_emitf(irn, "mov %R, %R", reg_s1, reg_d1);
+ second_op = 1;
+ } else if (reg_d1 == reg_s2 && reg_d1 != reg_s1) {
+ second_op = 0;
+ }
+
+ emit_amd64_binop_op(irn, second_op);
+}
+
+/**
+ * The type of a emitter function.
+ */
+typedef void (emit_func)(const ir_node *irn);
+
+/**
+ * Set a node emitter. Make it a bit more type safe.
+ */
+static inline void set_emitter(ir_op *op, emit_func arm_emit_node)
+{
+ op->ops.generic = (op_func)arm_emit_node;
+}
+
/**
* Enters the emitter functions for handled nodes into the generic
* pointer of an opcode.
*/
static void amd64_register_emitters(void)
{
-
-/* some convienience macros to register additional emitter functions
- (other than the generated ones) */
-#define amd64_EMIT(a) op_amd64_##a->ops.generic = (op_func)emit_amd64_##a
-#define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
-#define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
-
/* first clear the generic function pointer for all ops */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
/* register all emitter functions defined in spec */
amd64_register_spec_emitters();
- /* register addtional emitter functions if needed */
- EMIT(Jmp);
-
-#undef amd64_EMIT
-#undef BE_EMIT
-#undef EMIT
+ set_emitter(op_amd64_SymConst, emit_amd64_SymConst);
+ set_emitter(op_amd64_Jmp, emit_amd64_Jmp);
+ set_emitter(op_amd64_Jcc, emit_amd64_Jcc);
+ set_emitter(op_amd64_Conv, emit_amd64_Conv);
+ set_emitter(op_amd64_FrameAddr, emit_amd64_FrameAddr);
+ set_emitter(op_be_Return, emit_be_Return);
+ set_emitter(op_be_Call, emit_be_Call);
+ set_emitter(op_be_Copy, emit_be_Copy);
+ set_emitter(op_be_IncSP, emit_be_IncSP);
+ set_emitter(op_be_Perm, emit_be_Perm);
+
+ set_emitter(op_amd64_Add, emit_amd64_binop);
+ set_emitter(op_amd64_Sub, emit_amd64_binop);
+
+ set_emitter(op_be_Start, emit_nothing);
+ set_emitter(op_be_Keep, emit_nothing);
+ set_emitter(op_Phi, emit_nothing);
}
typedef void (*emit_func_ptr) (const ir_node *);
*/
static void amd64_gen_block(ir_node *block, void *data)
{
- ir_node *node;
(void) data;
if (! is_Block(block))
return;
- be_emit_cstring("BLOCK_");
- be_emit_irprintf("%ld:\n", get_irn_node_nr(block));
- be_emit_write_line();
+ be_gas_begin_block(block, true);
sched_foreach(block, node) {
amd64_emit_node(node);
}
-/**
- * Emits code for function start.
- */
-static void amd64_emit_func_prolog(ir_graph *irg)
-{
- const char *irg_name = get_entity_name(get_irg_entity(irg));
-
- /* TODO: emit function header */
- be_emit_cstring("/* start of ");
- be_emit_string(irg_name);
- be_emit_cstring(" */\n");
- be_emit_write_line();
-}
-
-/**
- * Emits code for function end
- */
-static void amd64_emit_func_epilog(ir_graph *irg)
-{
- const char *irg_name = get_entity_name(get_irg_entity(irg));
-
- /* TODO: emit function end */
- be_emit_cstring("/* end of ");
- be_emit_string(irg_name);
- be_emit_cstring(" */\n");
- be_emit_write_line();
-}
-
/**
* Sets labels for control flow nodes (jump target)
* TODO: Jump optimization
/**
* Main driver
*/
-void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg)
+void amd64_gen_routine(ir_graph *irg)
{
- (void)cg;
+ ir_entity *entity = get_irg_entity(irg);
+ ir_node **blk_sched;
+ size_t i, n;
/* register all emitter functions */
amd64_register_emitters();
- amd64_emit_func_prolog(irg);
+ blk_sched = be_create_block_schedule(irg);
+
+ be_gas_emit_function_prolog(entity, 4, NULL);
+
irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
- irg_walk_blkwise_graph(irg, NULL, amd64_gen_block, NULL);
- amd64_emit_func_epilog(irg);
+
+ n = ARR_LEN(blk_sched);
+ for (i = 0; i < n; i++) {
+ ir_node *block = blk_sched[i];
+ ir_node *next = (i + 1) < n ? blk_sched[i+1] : NULL;
+
+ set_irn_link(block, next);
+ }
+
+ for (i = 0; i < n; ++i) {
+ ir_node *block = blk_sched[i];
+
+ amd64_gen_block(block, 0);
+ }
+
+ be_gas_emit_function_epilog(entity);
}