+ ir_entity *ent = get_ia32_am_sc(node);
+ int offs = get_ia32_am_offs_int(node);
+ ir_node *base = get_irn_n(node, n_ia32_base);
+ int has_base = !is_ia32_NoReg_GP(base);
+ ir_node *index = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(index);
+ unsigned modrm = 0;
+ unsigned sib = 0;
+ unsigned emitoffs = 0;
+ bool emitsib = false;
+
+ /* set the mod part depending on displacement */
+ if (ent != NULL) {
+ modrm |= MOD_IND_WORD_OFS;
+ emitoffs = 32;
+ } else if (offs == 0) {
+ modrm |= MOD_IND;
+ emitoffs = 0;
+ } else if (offs >= -127 && offs <= 128) {
+ modrm |= MOD_IND_BYTE_OFS;
+ emitoffs = 8;
+ } else {
+ modrm |= MOD_IND_WORD_OFS;
+ emitoffs = 32;
+ }
+
+ /* determine if we need a SIB byte */
+ if (has_index) {
+ int scale;
+ const arch_register_t *reg_index = arch_get_irn_register(index);
+ assert(reg_index->index != REG_ESP);
+ sib |= ENC_INDEX(reg_gp_map[reg_index->index]);
+
+ if (has_base) {
+ const arch_register_t *reg = arch_get_irn_register(base);
+ sib |= ENC_BASE(reg_gp_map[reg->index]);
+ } else {
+ /* use the EBP encoding if NO base register */
+ sib |= 0x05;
+ }
+
+ scale = get_ia32_am_scale(node);
+ assert(scale < 4);
+ sib |= ENC_SCALE(scale);
+ emitsib = true;
+ }
+
+ /* determine modrm byte */
+ if (emitsib) {
+ /* R/M set to ESP means SIB in 32bit mode */
+ modrm |= ENC_RM(0x04);
+ } else if (has_base) {
+ const arch_register_t *reg = arch_get_irn_register(base);
+ if (reg->index == REG_ESP) {
+ /* for the above reason we are forced to emit a sib
+ when base is ESP. Only the base is used */
+ sib = ENC_BASE(0x04);
+ emitsib = true;
+
+ /* we are forced to emit a 8bit offset as EBP base without
+ offset is a special case for SIB without base register */
+ } else if (reg->index == REG_EBP && emitoffs == 0) {
+ assert(GET_MODE(modrm) == MOD_IND);
+ emitoffs = 8;
+ modrm |= MOD_IND_BYTE_OFS;
+ }
+ modrm |= ENC_RM(reg_gp_map[reg->index]);
+ } else {
+ /* only displacement: Use EBP + disp encoding in 32bit mode */
+ if (emitoffs == 0) {
+ emitoffs = 8;
+ modrm = MOD_IND_BYTE_OFS;
+ }
+ modrm |= ENC_RM(0x05);
+ }
+
+ modrm |= ENC_REG(reg);
+
+ bemit8(modrm);
+ if (emitsib)
+ bemit8(sib);
+
+ /* emit displacement */
+ if (emitoffs == 8) {
+ bemit8((unsigned) offs);
+ } else if (emitoffs == 32) {
+ bemit_entity(ent, is_ia32_am_sc_sign(node), offs, false);
+ }
+}
+
+/**
+ * Emits a binop.
+ */
+static void bemit_binop_2(const ir_node *node, unsigned code)
+{
+ const arch_register_t *out = get_in_reg(node, n_ia32_binary_left);
+ ia32_op_type_t am_type = get_ia32_op_type(node);
+ unsigned char d = 0;
+ const arch_register_t *op2;
+
+ switch (am_type) {
+ case ia32_AddrModeS:
+ d = 2;
+ /* FALLTHROUGH */
+ case ia32_AddrModeD:
+ bemit8(code | d);
+ bemit_mod_am(reg_gp_map[out->index], node);
+ return;
+ case ia32_Normal:
+ bemit8(code);
+ op2 = get_in_reg(node, n_ia32_binary_right);
+ bemit_modrr(out, op2);
+ return;
+ }
+ panic("invalid address mode");
+}
+
+/**
+ * Emit a binop.
+ */
+static void bemit_binop(const ir_node *node, const unsigned char opcodes[4])
+{
+ ir_node *right = get_irn_n(node, n_ia32_binary_right);
+ if (is_ia32_Immediate(right)) {
+ /* there's a shorter variant with DEST=EAX */
+ const arch_register_t *reg = get_out_reg(node, 0);
+ if (reg->index == REG_EAX)
+
+ bemit_binop_with_imm(node, opcodes[1], opcodes[2], opcodes[3]);
+ } else {
+ bemit_binop_2(node, opcodes[0]);
+ }
+}
+
+/**
+ * Emit an unop.
+ */
+static void bemit_unop(const ir_node *node, unsigned char code, unsigned char ext, int input)
+{
+ ia32_op_type_t am_type = get_ia32_op_type(node);
+
+ bemit8(code);
+ if (am_type == ia32_AddrModeD) {
+ bemit8(code);
+ bemit_mod_am(ext, node);
+ } else {
+ const arch_register_t *in = get_in_reg(node, input);
+ assert(am_type == ia32_Normal);
+ bemit_modru(in, ext);
+ }
+}
+
+
+static void bemit_immediate(const ir_node *node, bool relative)
+{
+ const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
+ bemit_entity(attr->symconst, attr->sc_sign, attr->offset, relative);
+}
+
+static void bemit_copy(const ir_node *copy)
+{
+ const ir_node *op = be_get_Copy_op(copy);
+ const arch_register_t *in = arch_get_irn_register(op);
+ const arch_register_t *out = arch_get_irn_register(copy);
+
+ if (in == out || is_unknown_reg(in))
+ return;
+ /* copies of vf nodes aren't real... */
+ if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
+ return;
+
+ if (get_irn_mode(copy) == mode_E) {
+ panic("NIY");
+ } else {
+ assert(arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_gp]);
+ bemit8(0x89);
+ bemit_modrr(out, in);
+ }
+}
+
+static void bemit_xor0(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+ bemit8(0x31);
+ bemit_modrr(out, out);
+}
+
+static void bemit_mov_const(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+ bemit8(0xB8 + reg_gp_map[out->index]);
+ bemit_immediate(node, false);
+}
+
+/**
+ * Creates a function for a Binop with 3 possible encodings.
+ */
+#define BINOP(op, op0, op1, op2, op2_ext) \
+static void bemit_ ## op(const ir_node *node) { \
+ static const unsigned char op ## _codes[] = {op0, op1, op2, op2_ext}; \
+ bemit_binop(node, op ## _codes); \
+}
+
+/* insn def eax,imm imm */
+BINOP(add, 0x01, 0x05, 0x81, 0 )
+BINOP(or, 0x09, 0x0D, 0x81, 1 )
+BINOP(adc, 0x11, 0x15, 0x81, 2 )
+BINOP(sbb, 0x19, 0x1D, 0x81, 3 )
+BINOP(and, 0x21, 0x25, 0x81, 4 )
+BINOP(sub, 0x29, 0x2D, 0x81, 5 )
+BINOP(xor, 0x31, 0x35, 0x81, 6 )
+BINOP(cmp, 0x39, 0x3D, 0x81, 7 )
+
+/**
+ * Creates a function for an Unop with code /ext encoding.
+ */
+#define UNOP(op, code, ext, input) \
+static void bemit_ ## op(const ir_node *node) { \
+ bemit_unop(node, code, ext, input); \
+}
+
+UNOP(not, 0xF7, 2, n_ia32_unary_op)
+UNOP(neg, 0xF7, 3, n_ia32_unary_op)
+UNOP(mul, 0xF7, 4, n_ia32_binary_right)
+UNOP(imul1op, 0xF7, 5, n_ia32_binary_right)
+UNOP(div, 0xF7, 6, n_ia32_unary_op)
+UNOP(idiv, 0xF7, 7, n_ia32_unary_op)
+
+UNOP(ijmp, 0xFF, 4, n_ia32_unary_op)
+
+/**
+ * Emit a Lea.
+ */
+static void bemit_lea(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+ bemit8(0x8D);
+ bemit_mod_am(reg_gp_map[out->index], node);
+}
+
+/**
+ * Emit a single optcode.
+ */
+#define EMIT_SINGLEOP(op, code) \
+static void bemit_ ## op(const ir_node *node) { \
+ (void) node; \
+ bemit8(code); \
+}
+
+//EMIT_SINGLEOP(daa, 0x27)
+//EMIT_SINGLEOP(das, 0x2F)
+//EMIT_SINGLEOP(aaa, 0x37)
+//EMIT_SINGLEOP(aas, 0x3F)
+//EMIT_SINGLEOP(nop, 0x90)
+EMIT_SINGLEOP(cwde, 0x98)
+EMIT_SINGLEOP(cltd, 0x99)
+//EMIT_SINGLEOP(fwait, 0x9B)
+EMIT_SINGLEOP(sahf, 0x9E)
+//EMIT_SINGLEOP(popf, 0x9D)
+EMIT_SINGLEOP(int3, 0xCC)
+//EMIT_SINGLEOP(iret, 0xCF)
+//EMIT_SINGLEOP(xlat, 0xD7)
+//EMIT_SINGLEOP(lock, 0xF0)
+EMIT_SINGLEOP(rep, 0xF3)
+//EMIT_SINGLEOP(halt, 0xF4)
+EMIT_SINGLEOP(cmc, 0xF5)
+EMIT_SINGLEOP(stc, 0xF9)
+//EMIT_SINGLEOP(cli, 0xFA)
+//EMIT_SINGLEOP(sti, 0xFB)
+//EMIT_SINGLEOP(std, 0xFD)
+
+/**
+ * Emits a MOV out, [MEM].
+ */
+static void bemit_load(const ir_node *node)
+{
+ const arch_register_t *out = get_out_reg(node, 0);
+
+ if (out->index == REG_EAX) {
+ ir_entity *ent = get_ia32_am_sc(node);
+ int offs = get_ia32_am_offs_int(node);
+ ir_node *base = get_irn_n(node, n_ia32_base);
+ int has_base = !is_ia32_NoReg_GP(base);
+ ir_node *index = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(index);
+
+ if (ent == NULL && !has_base && !has_index) {
+ /* load from constant address to EAX can be encoded
+ as 0xA1 [offset] */
+ bemit8(0xA1);
+ bemit_entity(NULL, 0, offs, false);
+ return;
+ }
+ }
+ bemit8(0x8B);
+ bemit_mod_am(reg_gp_map[out->index], node);
+}
+
+/**
+ * Emits a MOV [mem], in.
+ */
+static void bemit_store(const ir_node *node)
+{
+ const ir_node *value = get_irn_n(node, n_ia32_Store_val);
+
+ if (is_ia32_Immediate(value)) {
+ bemit8(0xC7);
+ bemit_mod_am(0, node);
+ bemit_immediate(value, false);
+ } else {
+ const arch_register_t *in = get_in_reg(node, n_ia32_Store_val);
+
+ if (in->index == REG_EAX) {
+ ir_entity *ent = get_ia32_am_sc(node);
+ int offs = get_ia32_am_offs_int(node);
+ ir_node *base = get_irn_n(node, n_ia32_base);
+ int has_base = !is_ia32_NoReg_GP(base);
+ ir_node *index = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(index);
+
+ if (ent == NULL && !has_base && !has_index) {
+ /* store to constant address from EAX can be encoded as
+ 0xA3 [offset]*/
+ bemit8(0xA3);
+ bemit_entity(NULL, 0, offs, false);
+ return;
+ }
+ }
+ bemit8(0x89);
+ bemit_mod_am(reg_gp_map[in->index], node);
+ }
+}
+
+/**
+ * Emit a Push.
+ */
+static void bemit_push(const ir_node *node)
+{
+ const ir_node *value = get_irn_n(node, n_ia32_Push_val);
+
+ if (is_ia32_Immediate(value)) {
+ const ia32_immediate_attr_t *attr
+ = get_ia32_immediate_attr_const(value);
+ unsigned size = get_unsigned_imm_size(attr->offset);
+ if (attr->symconst)
+ size = 4;
+ switch (size) {
+ case 1:
+ bemit8(0x6A);
+ bemit8((unsigned char)attr->offset);
+ break;
+ case 2:
+ case 4:
+ bemit8(0x68);
+ bemit_immediate(value, false);
+ break;
+ }
+ } else {
+ bemit8(0xFF);
+ bemit_mod_am(6, node);
+ }
+}
+
+/**
+ * Emit a Pop.
+ */
+static void bemit_pop(const ir_node *node)
+{
+ const arch_register_t *reg = get_out_reg(node, pn_ia32_Pop_res);
+ if (get_ia32_op_type(node) == ia32_Normal)
+ bemit8(0x58 + reg_gp_map[reg->index]);
+ else {
+ bemit8(0x8F);
+ bemit_mod_am(0, node);
+ }
+}
+
+static void bemit_call(const ir_node *node)
+{
+ ir_node *proc = get_irn_n(node, n_ia32_Call_addr);
+
+ if (is_ia32_Immediate(proc)) {
+ bemit8(0xE8);
+ bemit_immediate(proc, true);
+ } else {
+ panic("indirect call NIY");
+ }
+}
+
+/**
+ * Emits a return.
+ */
+static void bemit_return(const ir_node *node)
+{
+ unsigned pop = be_Return_get_pop(node);
+ if (pop > 0 || be_Return_get_emit_pop(node)) {
+ bemit8(0xC2);
+ assert(pop <= 0xffff);
+ bemit16(pop);
+ } else {
+ bemit8(0xC3);
+ }
+}
+
+static void bemit_incsp(const ir_node *node)
+{
+ const arch_register_t *reg = get_out_reg(node, 0);
+ int offs = be_get_IncSP_offset(node);
+ unsigned size = get_signed_imm_size(offs);
+ unsigned char w = size == 1 ? 2 : 0;
+
+ bemit8(0x81 | w);
+ if (offs > 0) {
+
+ bemit_modru(reg, 5); /* sub */
+ if (size == 8) {
+ bemit8(offs);
+ } else {
+ bemit32(offs);
+ }
+ } else if (offs < 0) {
+ bemit_modru(reg, 0); /* add */
+ if (size == 8) {
+ bemit8(-offs);
+ } else {
+ bemit32(-offs);
+ }
+ }
+}
+
+/**
+ * The type of a emitter function.
+ */
+typedef void (*emit_func) (const ir_node *);
+
+/**
+ * Set a node emitter. Make it a bit more type safe.
+ */
+static void register_emitter(ir_op *op, emit_func func)
+{
+ op->ops.generic = (op_func) func;
+}
+
+static void ia32_register_binary_emitters(void)
+{
+ /* first clear the generic function pointer for all ops */
+ clear_irp_opcodes_generic_func();
+
+ /* benode emitter */
+ register_emitter(op_be_Copy, bemit_copy);
+ register_emitter(op_be_Return, bemit_return);
+ register_emitter(op_be_IncSP, bemit_incsp);
+ register_emitter(op_ia32_Add, bemit_add);
+ register_emitter(op_ia32_Adc, bemit_adc);
+ register_emitter(op_ia32_And, bemit_and);
+ register_emitter(op_ia32_Or, bemit_or);
+ register_emitter(op_ia32_Cmp, bemit_cmp);
+ register_emitter(op_ia32_Call, bemit_call);
+ register_emitter(op_ia32_Cltd, bemit_cltd);
+ register_emitter(op_ia32_Cmc, bemit_cmc);
+ register_emitter(op_ia32_Stc, bemit_stc);
+ register_emitter(op_ia32_RepPrefix, bemit_rep);
+ register_emitter(op_ia32_Breakpoint, bemit_int3);
+ register_emitter(op_ia32_Sahf, bemit_sahf);
+ register_emitter(op_ia32_Cltd, bemit_cwde);
+ register_emitter(op_ia32_Sub, bemit_sub);
+ register_emitter(op_ia32_Sbb, bemit_sbb);
+ register_emitter(op_ia32_Xor0, bemit_xor0);
+ register_emitter(op_ia32_Xor, bemit_xor);
+ register_emitter(op_ia32_Const, bemit_mov_const);
+ register_emitter(op_ia32_Lea, bemit_lea);
+ register_emitter(op_ia32_Load, bemit_load);
+ register_emitter(op_ia32_Not, bemit_not);
+ register_emitter(op_ia32_Neg, bemit_neg);
+ register_emitter(op_ia32_Push, bemit_push);
+ register_emitter(op_ia32_Pop, bemit_pop);
+ register_emitter(op_ia32_Store, bemit_store);
+ register_emitter(op_ia32_Mul, bemit_mul);
+ register_emitter(op_ia32_IMul1OP, bemit_imul1op);
+ register_emitter(op_ia32_Div, bemit_div);
+ register_emitter(op_ia32_IDiv, bemit_idiv);
+ register_emitter(op_ia32_IJmp, bemit_ijmp);
+
+ /* ignore the following nodes */
+ register_emitter(op_ia32_ProduceVal, emit_Nothing);
+ register_emitter(op_be_Barrier, emit_Nothing);
+ register_emitter(op_be_Keep, emit_Nothing);
+ register_emitter(op_be_Start, emit_Nothing);
+ register_emitter(op_Phi, emit_Nothing);
+ register_emitter(op_Start, emit_Nothing);
+}
+
+static void gen_binary_block(ir_node *block)
+{
+ ir_node *node;
+
+ ia32_emit_block_header(block);
+
+ /* emit the contents of the block */
+ sched_foreach(block, node) {
+ ia32_emit_node(node);
+ }
+}
+
+void ia32_gen_binary_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
+{
+ ir_entity *entity = get_irg_entity(irg);