+static void bemit_binop_with_imm(
+ const ir_node *node,
+ unsigned char opcode_ax,
+ unsigned char opcode, unsigned char ruval)
+{
+ /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
+ const ir_node *op = get_irn_n(node, n_ia32_binary_right);
+ const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
+ unsigned size;
+
+ /* Some instructions (test) have no short form with 32bit value + 8bit
+ * immediate. */
+ if (attr->symconst != NULL || opcode & SIGNEXT_IMM) {
+ size = 4;
+ } else {
+ /* check for sign extension */
+ size = get_signed_imm_size(attr->offset);
+ }
+
+ switch (size) {
+ case 1:
+ bemit8(opcode | SIGNEXT_IMM);
+ /* cmp has this special mode */
+ if (get_ia32_op_type(node) == ia32_AddrModeS) {
+ bemit_mod_am(ruval, node);
+ } else {
+ const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
+ bemit_modru(reg, ruval);
+ }
+ bemit8((unsigned char)attr->offset);
+ return;
+ case 2:
+ case 4:
+ /* check for eax variant: this variant is shorter for 32bit immediates only */
+ if (get_ia32_op_type(node) == ia32_AddrModeS) {
+ bemit8(opcode);
+ bemit_mod_am(ruval, node);
+ } else {
+ const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
+ if (reg->index == REG_EAX) {
+ bemit8(opcode_ax);
+ } else {
+ bemit8(opcode);
+ bemit_modru(reg, ruval);
+ }
+ }
+ bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
+ return;
+ }
+ panic("invalid imm size?!?");
+}
+
+/**
+ * Emits a binop.
+ */
+static void bemit_binop_2(const ir_node *node, unsigned code)
+{
+ const arch_register_t *out = get_in_reg(node, n_ia32_binary_left);
+ ia32_op_type_t am_type = get_ia32_op_type(node);
+ unsigned char d = 0;
+ const arch_register_t *op2;
+
+ switch (am_type) {
+ case ia32_AddrModeS:
+ d = 2;
+ /* FALLTHROUGH */
+ case ia32_AddrModeD:
+ bemit8(code | d);
+ bemit_mod_am(reg_gp_map[out->index], node);
+ return;
+ case ia32_Normal:
+ bemit8(code);
+ op2 = get_in_reg(node, n_ia32_binary_right);
+ bemit_modrr(out, op2);
+ return;
+ }
+ panic("invalid address mode");
+}
+
+/**
+ * Emit a binop.
+ */
+static void bemit_binop(const ir_node *node, const unsigned char opcodes[4])
+{
+ ir_node *right = get_irn_n(node, n_ia32_binary_right);
+ if (is_ia32_Immediate(right)) {
+ bemit_binop_with_imm(node, opcodes[1], opcodes[2], opcodes[3]);
+ } else {
+ bemit_binop_2(node, opcodes[0]);
+ }
+}
+
+/**
+ * Emit an unop.
+ */
+static void bemit_unop(const ir_node *node, unsigned char code, unsigned char ext, int input)
+{
+ bemit8(code);
+ if (get_ia32_op_type(node) == ia32_Normal) {
+ const arch_register_t *in = get_in_reg(node, input);
+ bemit_modru(in, ext);
+ } else {
+ bemit_mod_am(ext, node);
+ }
+}
+
+static void bemit_unop_reg(const ir_node *node, unsigned char code, int input)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+ bemit_unop(node, code, reg_gp_map[out->index], input);
+}
+
+static void bemit_immediate(const ir_node *node, bool relative)
+{
+ const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
+ bemit_entity(attr->symconst, attr->sc_sign, attr->offset, relative);
+}
+
+static void bemit_copy(const ir_node *copy)
+{
+ const arch_register_t *in = get_in_reg(copy, 0);
+ const arch_register_t *out = get_out_reg(copy, 0);
+
+ if (in == out || is_unknown_reg(in))
+ return;
+ /* copies of vf nodes aren't real... */
+ if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
+ return;
+
+ if (get_irn_mode(copy) == mode_E) {
+ panic("NIY");
+ } else {
+ assert(arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_gp]);
+ bemit8(0x89);
+ bemit_modrr(out, in);
+ }
+}
+
+static void bemit_xor0(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+ bemit8(0x31);
+ bemit_modrr(out, out);
+}
+
+static void bemit_mov_const(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+ bemit8(0xB8 + reg_gp_map[out->index]);
+ bemit_immediate(node, false);
+}
+
+/**
+ * Creates a function for a Binop with 3 possible encodings.
+ */
+#define BINOP(op, op0, op1, op2, op2_ext) \
+static void bemit_ ## op(const ir_node *node) { \
+ static const unsigned char op ## _codes[] = {op0, op1, op2, op2_ext}; \
+ bemit_binop(node, op ## _codes); \
+}
+
+/* insn def eax,imm imm */
+BINOP(add, 0x01, 0x05, 0x81, 0)
+BINOP(or, 0x09, 0x0D, 0x81, 1)
+BINOP(adc, 0x11, 0x15, 0x81, 2)
+BINOP(sbb, 0x19, 0x1D, 0x81, 3)
+BINOP(and, 0x21, 0x25, 0x81, 4)
+BINOP(sub, 0x29, 0x2D, 0x81, 5)
+BINOP(xor, 0x31, 0x35, 0x81, 6)
+BINOP(cmp, 0x39, 0x3D, 0x81, 7)
+BINOP(test, 0x85, 0xA9, 0xF7, 0)
+
+/**
+ * Creates a function for an Unop with code /ext encoding.
+ */
+#define UNOP(op, code, ext, input) \
+static void bemit_ ## op(const ir_node *node) { \
+ bemit_unop(node, code, ext, input); \
+}
+
+UNOP(not, 0xF7, 2, n_ia32_Not_val)
+UNOP(neg, 0xF7, 3, n_ia32_Neg_val)
+UNOP(mul, 0xF7, 4, n_ia32_Mul_right)
+UNOP(imul1op, 0xF7, 5, n_ia32_IMul1OP_right)
+UNOP(div, 0xF7, 6, n_ia32_Div_divisor)
+UNOP(idiv, 0xF7, 7, n_ia32_IDiv_divisor)
+
+/* TODO: am support for IJmp */
+UNOP(ijmp, 0xFF, 4, n_ia32_IJmp_target)
+
+#define SHIFT(op, ext) \
+static void bemit_##op(const ir_node *node) \
+{ \
+ const arch_register_t *out = get_out_reg(node, 0); \
+ ir_node *count = get_irn_n(node, 1); \
+ if (is_ia32_Immediate(count)) { \
+ int offset = get_ia32_immediate_attr_const(count)->offset; \
+ if (offset == 1) { \
+ bemit8(0xD1); \
+ bemit_modru(out, ext); \
+ } else { \
+ bemit8(0xC1); \
+ bemit_modru(out, ext); \
+ bemit8(offset); \
+ } \
+ } else { \
+ bemit8(0xD3); \
+ bemit_modru(out, ext); \
+ } \
+}
+
+SHIFT(rol, 0)
+SHIFT(ror, 1)
+SHIFT(shl, 4)
+SHIFT(shr, 5)
+SHIFT(sar, 7)
+
+static void bemit_imul(const ir_node *node)
+{
+ ir_node *right = get_irn_n(node, n_ia32_IMul_right);
+ /* Do we need the immediate form? */
+ if (is_ia32_Immediate(right)) {
+ int imm = get_ia32_immediate_attr_const(right)->offset;
+ if (get_signed_imm_size(imm) == 1) {
+ bemit_unop_reg(node, 0x6B, n_ia32_IMul_left);
+ bemit8(imm);
+ } else {
+ bemit_unop_reg(node, 0x69, n_ia32_IMul_left);
+ bemit32(imm);
+ }
+ } else {
+ bemit8(0x0F);
+ bemit_unop_reg(node, 0xAF, n_ia32_IMul_right);
+ }
+}
+
+static void bemit_dec(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, pn_ia32_Dec_res);
+ bemit8(0x48 + reg_gp_map[out->index]);
+}
+
+static void bemit_inc(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, pn_ia32_Inc_res);
+ bemit8(0x40 + reg_gp_map[out->index]);
+}
+
+/**
+ * Emit a Lea.
+ */
+static void bemit_lea(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+ bemit8(0x8D);
+ bemit_mod_am(reg_gp_map[out->index], node);
+}
+
+/**
+ * Emit a single optcode.
+ */
+#define EMIT_SINGLEOP(op, code) \
+static void bemit_ ## op(const ir_node *node) { \
+ (void) node; \
+ bemit8(code); \
+}
+
+//EMIT_SINGLEOP(daa, 0x27)
+//EMIT_SINGLEOP(das, 0x2F)
+//EMIT_SINGLEOP(aaa, 0x37)
+//EMIT_SINGLEOP(aas, 0x3F)
+//EMIT_SINGLEOP(nop, 0x90)
+EMIT_SINGLEOP(cwtl, 0x98)
+EMIT_SINGLEOP(cltd, 0x99)
+//EMIT_SINGLEOP(fwait, 0x9B)
+EMIT_SINGLEOP(sahf, 0x9E)
+//EMIT_SINGLEOP(popf, 0x9D)
+EMIT_SINGLEOP(int3, 0xCC)
+//EMIT_SINGLEOP(iret, 0xCF)
+//EMIT_SINGLEOP(xlat, 0xD7)
+//EMIT_SINGLEOP(lock, 0xF0)
+EMIT_SINGLEOP(rep, 0xF3)
+//EMIT_SINGLEOP(halt, 0xF4)
+EMIT_SINGLEOP(cmc, 0xF5)
+EMIT_SINGLEOP(stc, 0xF9)
+//EMIT_SINGLEOP(cli, 0xFA)
+//EMIT_SINGLEOP(sti, 0xFB)
+//EMIT_SINGLEOP(std, 0xFD)
+
+/**
+ * Emits a MOV out, [MEM].
+ */
+static void bemit_load(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+
+ if (out->index == REG_EAX) {
+ ir_entity *ent = get_ia32_am_sc(node);
+ int offs = get_ia32_am_offs_int(node);
+ ir_node *base = get_irn_n(node, n_ia32_base);
+ int has_base = !is_ia32_NoReg_GP(base);
+ ir_node *index = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(index);
+
+ if (ent == NULL && !has_base && !has_index) {
+ /* load from constant address to EAX can be encoded
+ as 0xA1 [offset] */
+ bemit8(0xA1);
+ bemit_entity(NULL, 0, offs, false);
+ return;
+ }
+ }
+ bemit8(0x8B);
+ bemit_mod_am(reg_gp_map[out->index], node);
+}
+
+/**
+ * Emits a MOV [mem], in.
+ */
+static void bemit_store(const ir_node *node)
+{
+ const ir_node *value = get_irn_n(node, n_ia32_Store_val);
+
+ if (is_ia32_Immediate(value)) {
+ bemit8(0xC7);
+ bemit_mod_am(0, node);
+ bemit_immediate(value, false);
+ } else {
+ const arch_register_t *in = get_in_reg(node, n_ia32_Store_val);
+
+ if (in->index == REG_EAX) {
+ ir_entity *ent = get_ia32_am_sc(node);
+ int offs = get_ia32_am_offs_int(node);
+ ir_node *base = get_irn_n(node, n_ia32_base);
+ int has_base = !is_ia32_NoReg_GP(base);
+ ir_node *index = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(index);
+
+ if (ent == NULL && !has_base && !has_index) {
+ /* store to constant address from EAX can be encoded as
+ 0xA3 [offset]*/
+ bemit8(0xA3);
+ bemit_entity(NULL, 0, offs, false);
+ return;
+ }
+ }
+ bemit8(0x89);
+ bemit_mod_am(reg_gp_map[in->index], node);
+ }
+}
+
+static void bemit_conv_i2i(const ir_node *node)
+{
+ ir_mode *smaller_mode = get_ia32_ls_mode(node);
+ unsigned opcode;
+
+ bemit8(0x0F);
+ /* 8 16 bit source
+ * movzx B6 B7
+ * movsx BE BF
+ */
+ opcode = 0xB6;
+ if (mode_is_signed(smaller_mode)) opcode |= 0x08;
+ if (get_mode_size_bits(smaller_mode) == 16) opcode |= 0x01;
+ bemit_unop_reg(node, opcode, n_ia32_Conv_I2I_val);
+}
+
+/**
+ * Emit a Push.
+ */
+static void bemit_push(const ir_node *node)
+{
+ const ir_node *value = get_irn_n(node, n_ia32_Push_val);
+
+ if (is_ia32_Immediate(value)) {
+ const ia32_immediate_attr_t *attr
+ = get_ia32_immediate_attr_const(value);
+ unsigned size = get_unsigned_imm_size(attr->offset);
+ if (attr->symconst)
+ size = 4;
+ switch (size) {
+ case 1:
+ bemit8(0x6A);
+ bemit8((unsigned char)attr->offset);
+ break;
+ case 2:
+ case 4:
+ bemit8(0x68);
+ bemit_immediate(value, false);
+ break;
+ }
+ } else if (is_ia32_NoReg_GP(value)) {
+ bemit8(0xFF);
+ bemit_mod_am(6, node);
+ } else {
+ const arch_register_t *reg = get_in_reg(node, n_ia32_Push_val);
+ bemit8(0x50 + reg_gp_map[reg->index]);
+ }
+}
+
+/**
+ * Emit a Pop.
+ */
+static void bemit_pop(const ir_node *node)
+{
+ const arch_register_t *reg = get_out_reg(node, pn_ia32_Pop_res);
+ bemit8(0x58 + reg_gp_map[reg->index]);
+}
+
+static void bemit_popmem(const ir_node *node)
+{
+ bemit8(0x8F);
+ bemit_mod_am(0, node);
+}
+
+static void bemit_call(const ir_node *node)
+{
+ ir_node *proc = get_irn_n(node, n_ia32_Call_addr);
+
+ if (is_ia32_Immediate(proc)) {
+ bemit8(0xE8);
+ bemit_immediate(proc, true);
+ } else {
+ bemit_unop(node, 0xFF, 2, n_ia32_Call_addr);
+ }
+}
+
+static void bemit_jmp(const ir_node *dest_block)
+{
+ bemit8(0xE9);
+ bemit_jmp_destination(dest_block);
+}
+
+static void bemit_jump(const ir_node *node)
+{
+ if (can_be_fallthrough(node))
+ return;
+
+ bemit_jmp(get_cfop_target_block(node));
+}
+
+static void bemit_jcc(int pnc, const ir_node *dest_block)
+{
+ unsigned char cc;
+
+ if (pnc == ia32_pn_Cmp_parity) {
+ cc = 0x0A;
+ } else {
+ if (pnc & ia32_pn_Cmp_float || pnc & ia32_pn_Cmp_unsigned) {
+ cc = pnc_map_unsigned[pnc & 0x07];
+ } else {
+ cc = pnc_map_signed[pnc & 0x07];
+ }
+ }
+ assert(cc != 0xFF);
+
+ bemit8(0x0F);
+ bemit8(0x80 + cc);
+ bemit_jmp_destination(dest_block);
+}
+
+static void bemit_ia32_jcc(const ir_node *node)
+{
+ int pnc = get_ia32_condcode(node);
+ int need_parity_label = 0;
+ const ir_node *proj_true;
+ const ir_node *proj_false;
+ const ir_node *dest_true;
+ const ir_node *dest_false;
+ const ir_node *block;
+
+ pnc = determine_final_pnc(node, 0, pnc);
+
+ /* get both Projs */
+ proj_true = get_proj(node, pn_ia32_Jcc_true);
+ assert(proj_true && "Jcc without true Proj");
+
+ proj_false = get_proj(node, pn_ia32_Jcc_false);
+ assert(proj_false && "Jcc without false Proj");
+
+ block = get_nodes_block(node);
+
+ if (can_be_fallthrough(proj_true)) {
+ /* exchange both proj's so the second one can be omitted */
+ const ir_node *t = proj_true;
+
+ proj_true = proj_false;
+ proj_false = t;
+ pnc = ia32_get_negated_pnc(pnc);
+ }
+
+ dest_true = get_cfop_target_block(proj_true);
+ dest_false = get_cfop_target_block(proj_false);
+
+ if (pnc & ia32_pn_Cmp_float) {
+ panic("Float jump NIY");
+ /* Some floating point comparisons require a test of the parity flag,
+ * which indicates that the result is unordered */
+ switch (pnc & 15) {
+ case pn_Cmp_Uo: {
+ ia32_emitf(proj_true, "\tjp %L\n");
+ break;
+ }
+
+ case pn_Cmp_Leg:
+ ia32_emitf(proj_true, "\tjnp %L\n");
+ break;
+
+ case pn_Cmp_Eq:
+ case pn_Cmp_Lt:
+ case pn_Cmp_Le:
+ /* we need a local label if the false proj is a fallthrough
+ * as the falseblock might have no label emitted then */
+ if (can_be_fallthrough(proj_false)) {
+ need_parity_label = 1;
+ ia32_emitf(proj_false, "\tjp 1f\n");
+ } else {
+ ia32_emitf(proj_false, "\tjp %L\n");
+ }
+ goto emit_jcc;
+
+ case pn_Cmp_Ug:
+ case pn_Cmp_Uge:
+ case pn_Cmp_Ne:
+ ia32_emitf(proj_true, "\tjp %L\n");
+ goto emit_jcc;
+
+ default:
+ goto emit_jcc;
+ }
+ } else {
+emit_jcc:
+ bemit_jcc(pnc, dest_true);
+ }
+
+ if (need_parity_label) {
+ panic("parity label NIY");
+ }
+
+ /* the second Proj might be a fallthrough */
+ if (can_be_fallthrough(proj_false)) {
+ /* it's a fallthrough */
+ } else {
+ bemit_jmp(dest_false);
+ }
+}
+
+/**
+ * Emits a return.
+ */
+static void bemit_return(const ir_node *node)
+{
+ unsigned pop = be_Return_get_pop(node);
+ if (pop > 0 || be_Return_get_emit_pop(node)) {
+ bemit8(0xC2);
+ assert(pop <= 0xffff);
+ bemit16(pop);
+ } else {
+ bemit8(0xC3);
+ }
+}
+
+static void bemit_incsp(const ir_node *node)
+{
+ int offs;
+ const arch_register_t *reg;
+ unsigned size;
+ unsigned ext;
+
+ offs = be_get_IncSP_offset(node);
+ if (offs == 0)
+ return;
+
+ if (offs > 0) {
+ ext = 5; /* sub */
+ } else {
+ ext = 0; /* add */
+ offs = -offs;
+ }
+
+ size = get_signed_imm_size(offs);
+ bemit8(size == 1 ? 0x83 : 0x81);
+
+ reg = get_out_reg(node, 0);
+ bemit_modru(reg, ext);
+
+ if (size == 1) {
+ bemit8(offs);
+ } else {
+ bemit32(offs);
+ }
+}
+
+/**
+ * The type of a emitter function.
+ */
+typedef void (*emit_func) (const ir_node *);
+
+/**
+ * Set a node emitter. Make it a bit more type safe.
+ */
+static void register_emitter(ir_op *op, emit_func func)
+{
+ op->ops.generic = (op_func) func;
+}
+
+static void ia32_register_binary_emitters(void)
+{
+ /* first clear the generic function pointer for all ops */
+ clear_irp_opcodes_generic_func();
+
+ /* benode emitter */
+ register_emitter(op_be_Copy, bemit_copy);
+ register_emitter(op_be_CopyKeep, bemit_copy);
+ register_emitter(op_be_IncSP, bemit_incsp);
+ register_emitter(op_be_Return, bemit_return);
+ register_emitter(op_ia32_Adc, bemit_adc);
+ register_emitter(op_ia32_Add, bemit_add);
+ register_emitter(op_ia32_And, bemit_and);
+ register_emitter(op_ia32_Breakpoint, bemit_int3);
+ register_emitter(op_ia32_Call, bemit_call);
+ register_emitter(op_ia32_Cltd, bemit_cltd);
+ register_emitter(op_ia32_Cmc, bemit_cmc);
+ register_emitter(op_ia32_Cmp, bemit_cmp);
+ register_emitter(op_ia32_Const, bemit_mov_const);
+ register_emitter(op_ia32_Conv_I2I, bemit_conv_i2i);
+ register_emitter(op_ia32_Conv_I2I8Bit, bemit_conv_i2i);
+ register_emitter(op_ia32_Cwtl, bemit_cwtl);
+ register_emitter(op_ia32_Dec, bemit_dec);
+ register_emitter(op_ia32_Div, bemit_div);
+ register_emitter(op_ia32_IDiv, bemit_idiv);
+ register_emitter(op_ia32_IJmp, bemit_ijmp);
+ register_emitter(op_ia32_IMul1OP, bemit_imul1op);
+ register_emitter(op_ia32_IMul, bemit_imul);
+ register_emitter(op_ia32_Inc, bemit_inc);
+ register_emitter(op_ia32_Jcc, bemit_ia32_jcc);
+ register_emitter(op_ia32_Jmp, bemit_jump);
+ register_emitter(op_ia32_Lea, bemit_lea);
+ register_emitter(op_ia32_Load, bemit_load);
+ register_emitter(op_ia32_Mul, bemit_mul);
+ register_emitter(op_ia32_Neg, bemit_neg);
+ register_emitter(op_ia32_Not, bemit_not);
+ register_emitter(op_ia32_Or, bemit_or);
+ register_emitter(op_ia32_Pop, bemit_pop);
+ register_emitter(op_ia32_PopEbp, bemit_pop);
+ register_emitter(op_ia32_PopMem, bemit_popmem);
+ register_emitter(op_ia32_Push, bemit_push);
+ register_emitter(op_ia32_RepPrefix, bemit_rep);
+ register_emitter(op_ia32_Rol, bemit_rol);
+ register_emitter(op_ia32_Ror, bemit_ror);
+ register_emitter(op_ia32_Sahf, bemit_sahf);
+ register_emitter(op_ia32_Sar, bemit_sar);
+ register_emitter(op_ia32_Sbb, bemit_sbb);
+ register_emitter(op_ia32_Shl, bemit_shl);
+ register_emitter(op_ia32_Shr, bemit_shr);
+ register_emitter(op_ia32_Stc, bemit_stc);
+ register_emitter(op_ia32_Store, bemit_store);
+ register_emitter(op_ia32_Sub, bemit_sub);
+ register_emitter(op_ia32_Test, bemit_test);
+ register_emitter(op_ia32_Xor, bemit_xor);
+ register_emitter(op_ia32_Xor0, bemit_xor0);
+
+ /* ignore the following nodes */
+ register_emitter(op_ia32_ProduceVal, emit_Nothing);
+ register_emitter(op_be_Barrier, emit_Nothing);
+ register_emitter(op_be_Keep, emit_Nothing);
+ register_emitter(op_be_Start, emit_Nothing);
+ register_emitter(op_Phi, emit_Nothing);
+ register_emitter(op_Start, emit_Nothing);
+}
+
+static void gen_binary_block(ir_node *block)
+{
+ ir_node *node;
+
+ ia32_emit_block_header(block);
+
+ /* emit the contents of the block */
+ sched_foreach(block, node) {
+ ia32_emit_node(node);
+ }
+}
+
+void ia32_gen_binary_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
+{
+ ir_entity *entity = get_irg_entity(irg);