2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the ia32 node emitter.
23 * @author Christian Wuerdig, Matthias Braun
25 * Summary table for x86 floatingpoint compares:
26 * (remember effect of unordered on x86: ZF=1, PF=1, CF=1)
34 * pnc_Leg => NP (ordered)
56 #include "iredges_t.h"
60 #include "raw_bitset.h"
69 #include "beemitter.h"
73 #include "ia32_emitter.h"
74 #include "ia32_common_transform.h"
75 #include "gen_ia32_emitter.h"
76 #include "gen_ia32_regalloc_if.h"
77 #include "ia32_nodes_attr.h"
78 #include "ia32_new_nodes.h"
79 #include "ia32_architecture.h"
80 #include "bearch_ia32_t.h"
82 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
84 static const ia32_isa_t *isa;
85 static char pic_base_label[128];
86 static ir_label_t exc_label_id;
87 static int mark_spill_reload = 0;
90 static bool sp_relative;
91 static int frame_type_size;
92 static int callframe_offset;
94 /** Return the next block in Block schedule */
95 static ir_node *get_prev_block_sched(const ir_node *block)
97 return (ir_node*)get_irn_link(block);
100 /** Checks if the current block is a fall-through target. */
101 static int is_fallthrough(const ir_node *cfgpred)
105 if (!is_Proj(cfgpred))
107 pred = get_Proj_pred(cfgpred);
108 if (is_ia32_SwitchJmp(pred))
115 * returns non-zero if the given block needs a label
116 * because of being a jump-target (and not a fall-through)
118 static int block_needs_label(const ir_node *block)
121 int n_cfgpreds = get_Block_n_cfgpreds(block);
123 if (get_Block_entity(block) != NULL)
126 if (n_cfgpreds == 0) {
128 } else if (n_cfgpreds == 1) {
129 ir_node *cfgpred = get_Block_cfgpred(block, 0);
130 ir_node *cfgpred_block = get_nodes_block(cfgpred);
132 if (get_prev_block_sched(block) == cfgpred_block
133 && is_fallthrough(cfgpred)) {
142 * Add a number to a prefix. This number will not be used a second time.
144 static char *get_unique_label(char *buf, size_t buflen, const char *prefix)
146 static unsigned long id = 0;
147 snprintf(buf, buflen, "%s%s%lu", be_gas_get_private_prefix(), prefix, ++id);
152 * Emit the name of the 8bit low register
154 static void emit_8bit_register(const arch_register_t *reg)
156 assert(reg->index == REG_GP_EAX || reg->index == REG_GP_EBX
157 || reg->index == REG_GP_ECX || reg->index == REG_GP_EDX);
160 be_emit_char(reg->name[1]); /* get the basic name of the register */
165 * Emit the name of the 8bit high register
167 static void emit_8bit_register_high(const arch_register_t *reg)
169 assert(reg->index == REG_GP_EAX || reg->index == REG_GP_EBX
170 || reg->index == REG_GP_ECX || reg->index == REG_GP_EDX);
173 be_emit_char(reg->name[1]); /* get the basic name of the register */
177 static void emit_16bit_register(const arch_register_t *reg)
180 be_emit_string(reg->name + 1); /* skip the 'e' prefix of the 32bit names */
184 * emit a register, possible shortened by a mode
186 * @param reg the register
187 * @param mode the mode of the register or NULL for full register
189 static void emit_register(const arch_register_t *reg, const ir_mode *mode)
192 int size = get_mode_size_bits(mode);
194 case 8: emit_8bit_register(reg); return;
195 case 16: emit_16bit_register(reg); return;
197 assert(mode_is_float(mode) || size == 32);
201 be_emit_string(reg->name);
204 static void ia32_emit_entity(ir_entity *entity, int no_pic_adjust)
206 be_gas_emit_entity(entity);
208 if (get_entity_owner(entity) == get_tls_type()) {
209 if (!entity_has_definition(entity)) {
210 be_emit_cstring("@INDNTPOFF");
212 be_emit_cstring("@NTPOFF");
216 if (do_pic && !no_pic_adjust) {
218 be_emit_string(pic_base_label);
222 static void emit_ia32_Immediate_no_prefix(const ir_node *node)
224 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
226 if (attr->symconst != NULL) {
229 ia32_emit_entity(attr->symconst, attr->no_pic_adjust);
231 if (attr->symconst == NULL || attr->offset != 0) {
232 if (attr->symconst != NULL) {
233 be_emit_irprintf("%+d", attr->offset);
235 be_emit_irprintf("0x%X", attr->offset);
240 static void emit_ia32_Immediate(const ir_node *node)
243 emit_ia32_Immediate_no_prefix(node);
246 static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
248 assert(mode_is_int(mode) || mode_is_reference(mode));
249 switch (get_mode_size_bits(mode)) {
250 case 8: be_emit_char('b'); return;
251 case 16: be_emit_char('w'); return;
252 case 32: be_emit_char('l'); return;
253 /* gas docu says q is the suffix but gcc, objdump and icc use ll
255 case 64: be_emit_cstring("ll"); return;
257 panic("Can't output mode_suffix for %+F", mode);
260 static void ia32_emit_x87_mode_suffix(ir_node const *const node)
264 /* we only need to emit the mode on address mode */
265 if (get_ia32_op_type(node) == ia32_Normal)
268 mode = get_ia32_ls_mode(node);
269 assert(mode != NULL);
271 if (mode_is_float(mode)) {
272 switch (get_mode_size_bits(mode)) {
273 case 32: be_emit_char('s'); return;
274 case 64: be_emit_char('l'); return;
275 /* long doubles have different sizes due to alignment on different
279 case 128: be_emit_char('t'); return;
282 assert(mode_is_int(mode) || mode_is_reference(mode));
283 switch (get_mode_size_bits(mode)) {
284 case 16: be_emit_char('s'); return;
285 case 32: be_emit_char('l'); return;
286 /* gas docu says q is the suffix but gcc, objdump and icc use ll
288 case 64: be_emit_cstring("ll"); return;
291 panic("Can't output mode_suffix for %+F", mode);
294 static char get_xmm_mode_suffix(ir_mode *mode)
296 assert(mode_is_float(mode));
297 switch (get_mode_size_bits(mode)) {
300 default: panic("Invalid XMM mode");
304 static void ia32_emit_xmm_mode_suffix(ir_node const *const node)
306 ir_mode *mode = get_ia32_ls_mode(node);
307 assert(mode != NULL);
308 be_emit_char(get_xmm_mode_suffix(mode));
312 * Returns the target block for a control flow node.
314 static ir_node *get_cfop_target_block(const ir_node *irn)
316 assert(get_irn_mode(irn) == mode_X);
317 return (ir_node*)get_irn_link(irn);
321 * Emits the target label for a control flow node.
323 static void ia32_emit_cfop_target(const ir_node *node)
325 ir_node *block = get_cfop_target_block(node);
326 be_gas_emit_block_name(block);
330 * Emit the suffix for a compare instruction.
332 static void ia32_emit_condition_code(ia32_condition_code_t cc)
335 case ia32_cc_overflow: be_emit_cstring("o"); return;
336 case ia32_cc_not_overflow: be_emit_cstring("no"); return;
337 case ia32_cc_float_below:
338 case ia32_cc_float_unordered_below:
339 case ia32_cc_below: be_emit_cstring("b"); return;
340 case ia32_cc_float_above_equal:
341 case ia32_cc_float_unordered_above_equal:
342 case ia32_cc_above_equal: be_emit_cstring("ae"); return;
343 case ia32_cc_float_equal:
344 case ia32_cc_equal: be_emit_cstring("e"); return;
345 case ia32_cc_float_not_equal:
346 case ia32_cc_not_equal: be_emit_cstring("ne"); return;
347 case ia32_cc_float_below_equal:
348 case ia32_cc_float_unordered_below_equal:
349 case ia32_cc_below_equal: be_emit_cstring("be"); return;
350 case ia32_cc_float_above:
351 case ia32_cc_float_unordered_above:
352 case ia32_cc_above: be_emit_cstring("a"); return;
353 case ia32_cc_sign: be_emit_cstring("s"); return;
354 case ia32_cc_not_sign: be_emit_cstring("ns"); return;
355 case ia32_cc_parity: be_emit_cstring("p"); return;
356 case ia32_cc_not_parity: be_emit_cstring("np"); return;
357 case ia32_cc_less: be_emit_cstring("l"); return;
358 case ia32_cc_greater_equal: be_emit_cstring("ge"); return;
359 case ia32_cc_less_equal: be_emit_cstring("le"); return;
360 case ia32_cc_greater: be_emit_cstring("g"); return;
361 case ia32_cc_float_parity_cases:
362 case ia32_cc_additional_float_cases:
365 panic("Invalid ia32 condition code");
368 typedef enum ia32_emit_mod_t {
370 EMIT_RESPECT_LS = 1U << 0,
371 EMIT_ALTERNATE_AM = 1U << 1,
373 EMIT_HIGH_REG = 1U << 3,
374 EMIT_LOW_REG = 1U << 4,
375 EMIT_16BIT_REG = 1U << 5
377 ENUM_BITSET(ia32_emit_mod_t)
380 * Emits address mode.
382 static void ia32_emit_am(ir_node const *const node)
384 ir_entity *ent = get_ia32_am_sc(node);
385 int offs = get_ia32_am_offs_int(node);
386 ir_node *base = get_irn_n(node, n_ia32_base);
387 int has_base = !is_ia32_NoReg_GP(base);
388 ir_node *idx = get_irn_n(node, n_ia32_index);
389 int has_index = !is_ia32_NoReg_GP(idx);
391 /* just to be sure... */
392 assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
394 if (get_ia32_am_tls_segment(node))
395 be_emit_cstring("%gs:");
399 const ia32_attr_t *attr = get_ia32_attr_const(node);
400 if (is_ia32_am_sc_sign(node))
402 ia32_emit_entity(ent, attr->data.am_sc_no_pic_adjust);
405 /* also handle special case if nothing is set */
406 if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
408 be_emit_irprintf("%+d", offs);
410 be_emit_irprintf("%d", offs);
414 if (has_base || has_index) {
419 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_base);
420 emit_register(reg, NULL);
423 /* emit index + scale */
425 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_index);
428 emit_register(reg, NULL);
430 scale = get_ia32_am_scale(node);
432 be_emit_irprintf(",%d", 1 << scale);
439 static ia32_condition_code_t determine_final_cc(ir_node const *node, int flags_pos, ia32_condition_code_t cc);
441 void ia32_emitf(ir_node const *const node, char const *fmt, ...)
448 const char *start = fmt;
449 ia32_emit_mod_t mod = EMIT_NONE;
451 while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
454 be_emit_string_len(start, fmt - start);
459 be_emit_write_line();
473 case '*': mod |= EMIT_ALTERNATE_AM; break;
474 case '#': mod |= EMIT_RESPECT_LS; break;
475 case 'l': mod |= EMIT_LONG; break;
476 case '>': mod |= EMIT_HIGH_REG; break;
477 case '<': mod |= EMIT_LOW_REG; break;
478 case '^': mod |= EMIT_16BIT_REG; break;
487 arch_register_t const *reg;
497 if (get_ia32_op_type(node) == ia32_AddrModeS) {
500 assert(get_ia32_op_type(node) == ia32_Normal);
501 ia32_x87_attr_t const *const x87_attr = get_ia32_x87_attr_const(node);
502 arch_register_t const *const out = x87_attr->x87[2];
503 arch_register_t const * in = x87_attr->x87[1];
505 in = x87_attr->x87[0];
506 be_emit_irprintf("%%%s, %%%s", in->name, out->name);
512 if (mod & EMIT_ALTERNATE_AM)
518 reg = va_arg(ap, const arch_register_t*);
519 if (get_ia32_op_type(node) == ia32_AddrModeS) {
526 if (get_ia32_op_type(node) == ia32_AddrModeS) {
530 assert(get_ia32_op_type(node) == ia32_Normal);
534 default: goto unknown;
540 imm = get_irn_n(node, n_ia32_binary_right);
541 if (is_ia32_Immediate(imm)) {
542 emit_ia32_Immediate(imm);
543 be_emit_cstring(", ");
544 if (get_ia32_op_type(node) == ia32_AddrModeS) {
547 assert(get_ia32_op_type(node) == ia32_Normal);
548 reg = arch_get_irn_register_in(node, n_ia32_binary_left);
549 emit_register(reg, get_ia32_ls_mode(node));
552 if (get_ia32_op_type(node) == ia32_AddrModeS) {
555 assert(get_ia32_op_type(node) == ia32_Normal);
556 reg = arch_get_irn_register_in(node, n_ia32_binary_right);
557 emit_register(reg, get_ia32_ls_mode(node));
559 be_emit_cstring(", ");
560 reg = arch_get_irn_register_in(node, n_ia32_binary_left);
561 emit_register(reg, get_ia32_ls_mode(node));
566 if (*fmt < '0' || '9' < *fmt)
568 reg = arch_get_irn_register_out(node, *fmt++ - '0');
574 ia32_emit_x87_mode_suffix(node);
575 } else if (*fmt == 'P') {
577 ia32_x87_attr_t const *const attr = get_ia32_x87_attr_const(node);
580 } else if (*fmt == 'R') {
582 /* NOTE: Work around a gas quirk for non-commutative operations if the
583 * destination register is not %st0. In this case r/non-r is swapped.
584 * %st0 = %st0 - %st1 -> fsub %st1, %st0 (as expected)
585 * %st0 = %st1 - %st0 -> fsubr %st1, %st0 (as expected)
586 * %st1 = %st0 - %st1 -> fsub %st0, %st1 (expected: fsubr)
587 * %st1 = %st1 - %st0 -> fsubr %st0, %st1 (expected: fsub)
588 * In fact this corresponds to the encoding of the instruction:
589 * - The r suffix selects whether %st0 is on the left (no r) or on the
590 * right (r) side of the executed operation.
591 * - The placement of %st0 selects whether the result is written to
592 * %st0 (right) or the other register (left).
593 * This results in testing whether the left operand register is %st0
594 * instead of the expected test whether the output register equals the
595 * left operand register. */
596 ia32_x87_attr_t const *const attr = get_ia32_x87_attr_const(node);
597 if (get_ia32_op_type(node) == ia32_Normal ?
598 attr->x87[0] != &ia32_registers[REG_ST0] :
599 attr->attr.data.ins_permuted)
601 } else if (*fmt == 'X') {
603 ia32_emit_xmm_mode_suffix(node);
604 } else if ('0' <= *fmt && *fmt <= '2') {
605 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
607 be_emit_string(attr->x87[*fmt++ - '0']->name);
616 if (!(mod & EMIT_ALTERNATE_AM))
618 emit_ia32_Immediate_no_prefix(imm);
622 ia32_emit_cfop_target(node);
626 ir_mode *mode = get_ia32_ls_mode(node);
629 if (mod & EMIT_RESPECT_LS) {
630 if (get_mode_size_bits(mode) == 32)
632 be_emit_char(mode_is_signed(mode) ? 's' : 'z');
634 ia32_emit_mode_suffix_mode(mode);
639 ia32_condition_code_t cc;
642 cc = (ia32_condition_code_t)va_arg(ap, int);
643 } else if ('0' <= *fmt && *fmt <= '9') {
644 cc = get_ia32_condcode(node);
645 cc = determine_final_cc(node, *fmt - '0', cc);
650 ia32_emit_condition_code(cc);
655 reg = va_arg(ap, const arch_register_t*);
657 if (mod & EMIT_ALTERNATE_AM)
659 if (mod & EMIT_HIGH_REG) {
660 emit_8bit_register_high(reg);
661 } else if (mod & EMIT_LOW_REG) {
662 emit_8bit_register(reg);
663 } else if (mod & EMIT_16BIT_REG) {
664 emit_16bit_register(reg);
666 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
674 if (*fmt < '0' || '9' < *fmt)
678 imm = get_irn_n(node, pos);
679 if (is_ia32_Immediate(imm)) {
682 reg = arch_get_irn_register_in(node, pos);
688 const char *str = va_arg(ap, const char*);
694 if (mod & EMIT_LONG) {
695 unsigned long num = va_arg(ap, unsigned long);
696 be_emit_irprintf("%lu", num);
698 unsigned num = va_arg(ap, unsigned);
699 be_emit_irprintf("%u", num);
704 if (mod & EMIT_LONG) {
705 long num = va_arg(ap, long);
706 be_emit_irprintf("%ld", num);
708 int num = va_arg(ap, int);
709 be_emit_irprintf("%d", num);
715 panic("unknown format conversion");
719 be_emit_finish_line_gas(node);
723 static void emit_ia32_IMul(const ir_node *node)
725 ir_node *left = get_irn_n(node, n_ia32_IMul_left);
726 const arch_register_t *out_reg = arch_get_irn_register_out(node, pn_ia32_IMul_res);
728 /* do we need the 3-address form? */
729 if (is_ia32_NoReg_GP(left) ||
730 arch_get_irn_register_in(node, n_ia32_IMul_left) != out_reg) {
731 ia32_emitf(node, "imul%M %#S4, %#AS3, %#D0");
733 ia32_emitf(node, "imul%M %#AS4, %#S3");
738 * walks up a tree of copies/perms/spills/reloads to find the original value
739 * that is moved around
741 static ir_node *find_original_value(ir_node *node)
743 if (irn_visited(node))
746 mark_irn_visited(node);
747 if (be_is_Copy(node)) {
748 return find_original_value(be_get_Copy_op(node));
749 } else if (be_is_CopyKeep(node)) {
750 return find_original_value(be_get_CopyKeep_op(node));
751 } else if (is_Proj(node)) {
752 ir_node *pred = get_Proj_pred(node);
753 if (be_is_Perm(pred)) {
754 return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
755 } else if (be_is_MemPerm(pred)) {
756 return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
757 } else if (is_ia32_Load(pred)) {
758 return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
759 } else if (is_ia32_Store(pred)) {
760 return find_original_value(get_irn_n(pred, n_ia32_Store_val));
764 } else if (is_Phi(node)) {
766 arity = get_irn_arity(node);
767 for (i = 0; i < arity; ++i) {
768 ir_node *in = get_irn_n(node, i);
769 ir_node *res = find_original_value(in);
780 static ia32_condition_code_t determine_final_cc(const ir_node *node,
781 int flags_pos, ia32_condition_code_t cc)
783 ir_node *flags = get_irn_n(node, flags_pos);
784 const ia32_attr_t *flags_attr;
785 flags = skip_Proj(flags);
787 if (is_ia32_Sahf(flags)) {
788 ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
789 if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
790 inc_irg_visited(current_ir_graph);
791 cmp = find_original_value(cmp);
793 assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
796 flags_attr = get_ia32_attr_const(cmp);
798 flags_attr = get_ia32_attr_const(flags);
801 if (flags_attr->data.ins_permuted)
802 cc = ia32_invert_condition_code(cc);
807 * Emits an exception label for a given node.
809 static void ia32_emit_exc_label(const ir_node *node)
811 be_emit_string(be_gas_insn_label_prefix());
812 be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
816 * Returns the Proj with projection number proj and NOT mode_M
818 static ir_node *get_proj(const ir_node *node, long proj)
822 assert(get_irn_mode(node) == mode_T && "expected mode_T node");
824 foreach_out_edge(node, edge) {
825 src = get_edge_src_irn(edge);
827 assert(is_Proj(src) && "Proj expected");
828 if (get_irn_mode(src) == mode_M)
831 if (get_Proj_proj(src) == proj)
837 static int can_be_fallthrough(const ir_node *node)
839 ir_node *target_block = get_cfop_target_block(node);
840 ir_node *block = get_nodes_block(node);
841 return get_prev_block_sched(target_block) == block;
845 * Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
847 static void emit_ia32_Jcc(const ir_node *node)
849 int need_parity_label = 0;
850 ia32_condition_code_t cc = get_ia32_condcode(node);
851 const ir_node *proj_true;
852 const ir_node *proj_false;
854 cc = determine_final_cc(node, 0, cc);
857 proj_true = get_proj(node, pn_ia32_Jcc_true);
858 assert(proj_true && "Jcc without true Proj");
860 proj_false = get_proj(node, pn_ia32_Jcc_false);
861 assert(proj_false && "Jcc without false Proj");
863 if (can_be_fallthrough(proj_true)) {
864 /* exchange both proj's so the second one can be omitted */
865 const ir_node *t = proj_true;
867 proj_true = proj_false;
869 cc = ia32_negate_condition_code(cc);
872 if (cc & ia32_cc_float_parity_cases) {
873 /* Some floating point comparisons require a test of the parity flag,
874 * which indicates that the result is unordered */
875 if (cc & ia32_cc_negated) {
876 ia32_emitf(proj_true, "jp %L");
878 /* we need a local label if the false proj is a fallthrough
879 * as the falseblock might have no label emitted then */
880 if (can_be_fallthrough(proj_false)) {
881 need_parity_label = 1;
882 ia32_emitf(proj_false, "jp 1f");
884 ia32_emitf(proj_false, "jp %L");
888 ia32_emitf(proj_true, "j%PX %L", (int)cc);
889 if (need_parity_label) {
890 be_emit_cstring("1:\n");
891 be_emit_write_line();
894 /* the second Proj might be a fallthrough */
895 if (can_be_fallthrough(proj_false)) {
896 if (be_options.verbose_asm)
897 ia32_emitf(proj_false, "/* fallthrough to %L */");
899 ia32_emitf(proj_false, "jmp %L");
904 * Emits an ia32 Setcc. This is mostly easy but some floating point compares
907 static void emit_ia32_Setcc(const ir_node *node)
909 const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
911 ia32_condition_code_t cc = get_ia32_condcode(node);
912 cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
913 if (cc & ia32_cc_float_parity_cases) {
914 if (cc & ia32_cc_negated) {
915 ia32_emitf(node, "set%PX %<R", (int)cc, dreg);
916 ia32_emitf(node, "setp %>R", dreg);
917 ia32_emitf(node, "orb %>R, %<R", dreg, dreg);
919 ia32_emitf(node, "set%PX %<R", (int)cc, dreg);
920 ia32_emitf(node, "setnp %>R", dreg);
921 ia32_emitf(node, "andb %>R, %<R", dreg, dreg);
924 ia32_emitf(node, "set%PX %#R", (int)cc, dreg);
928 static void emit_ia32_CMovcc(const ir_node *node)
930 const ia32_attr_t *attr = get_ia32_attr_const(node);
931 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
932 ia32_condition_code_t cc = get_ia32_condcode(node);
933 const arch_register_t *in_true;
934 const arch_register_t *in_false;
936 cc = determine_final_cc(node, n_ia32_CMovcc_eflags, cc);
937 /* although you can't set ins_permuted in the constructor it might still
938 * be set by memory operand folding
939 * Permuting inputs of a cmov means the condition is negated!
941 if (attr->data.ins_permuted)
942 cc = ia32_negate_condition_code(cc);
944 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
945 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
947 /* should be same constraint fullfilled? */
948 if (out == in_false) {
949 /* yes -> nothing to do */
950 } else if (out == in_true) {
951 const arch_register_t *tmp;
953 assert(get_ia32_op_type(node) == ia32_Normal);
955 cc = ia32_negate_condition_code(cc);
962 ia32_emitf(node, "movl %R, %R", in_false, out);
965 if (cc & ia32_cc_float_parity_cases) {
966 panic("CMov with floatingpoint compare/parity not supported yet");
969 ia32_emitf(node, "cmov%PX %#AR, %#R", (int)cc, in_true, out);
973 * Emits code for a SwitchJmp
975 static void emit_ia32_SwitchJmp(const ir_node *node)
977 ir_entity *jump_table = get_ia32_am_sc(node);
978 const ir_switch_table *table = get_ia32_switch_table(node);
980 ia32_emitf(node, "jmp %*AM");
981 be_emit_jump_table(node, table, jump_table, get_cfop_target_block);
985 * Emits code for a unconditional jump.
987 static void emit_ia32_Jmp(const ir_node *node)
989 /* we have a block schedule */
990 if (can_be_fallthrough(node)) {
991 if (be_options.verbose_asm)
992 ia32_emitf(node, "/* fallthrough to %L */");
994 ia32_emitf(node, "jmp %L");
999 * Emit an inline assembler operand.
1001 * @param node the ia32_ASM node
1002 * @param s points to the operand (a %c)
1004 * @return pointer to the first char in s NOT in the current operand
1006 static const char* emit_asm_operand(const ir_node *node, const char *s)
1008 const ia32_attr_t *ia32_attr = get_ia32_attr_const(node);
1009 const ia32_asm_attr_t *attr = CONST_CAST_IA32_ATTR(ia32_asm_attr_t,
1011 const arch_register_t *reg;
1012 const ia32_asm_reg_t *asm_regs = attr->register_map;
1013 const ia32_asm_reg_t *asm_reg;
1022 /* parse modifiers */
1025 ir_fprintf(stderr, "Warning: asm text (%+F) ends with %%\n", node);
1050 "Warning: asm text (%+F) contains unknown modifier '%c' for asm op\n",
1057 if (sscanf(s, "%d%n", &num, &p) != 1) {
1058 ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
1065 if (num < 0 || ARR_LEN(asm_regs) <= (size_t)num) {
1067 "Error: Custom assembler references invalid input/output (%+F)\n",
1071 asm_reg = & asm_regs[num];
1072 assert(asm_reg->valid);
1075 if (asm_reg->use_input == 0) {
1076 reg = arch_get_irn_register_out(node, asm_reg->inout_pos);
1078 ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
1080 /* might be an immediate value */
1081 if (is_ia32_Immediate(pred)) {
1082 emit_ia32_Immediate(pred);
1085 reg = arch_get_irn_register_in(node, asm_reg->inout_pos);
1089 "Warning: no register assigned for %d asm op (%+F)\n",
1094 if (asm_reg->memory) {
1099 if (modifier != 0) {
1102 emit_8bit_register(reg);
1105 emit_8bit_register_high(reg);
1108 emit_16bit_register(reg);
1111 panic("Invalid asm op modifier");
1114 emit_register(reg, asm_reg->memory ? mode_Iu : asm_reg->mode);
1117 if (asm_reg->memory) {
1125 * Emits code for an ASM pseudo op.
1127 static void emit_ia32_Asm(const ir_node *node)
1129 const void *gen_attr = get_irn_generic_attr_const(node);
1130 const ia32_asm_attr_t *attr
1131 = CONST_CAST_IA32_ATTR(ia32_asm_attr_t, gen_attr);
1132 ident *asm_text = attr->asm_text;
1133 const char *s = get_id_str(asm_text);
1135 be_emit_cstring("#APP\n");
1136 be_emit_write_line();
1143 s = emit_asm_operand(node, s);
1149 be_emit_cstring("\n#NO_APP\n");
1150 be_emit_write_line();
1155 * Emit movsb/w instructions to make mov count divideable by 4
1157 static void emit_CopyB_prolog(unsigned size)
1160 ia32_emitf(NULL, "movsb");
1162 ia32_emitf(NULL, "movsw");
1166 * Emit rep movsd instruction for memcopy.
1168 static void emit_ia32_CopyB(const ir_node *node)
1170 unsigned size = get_ia32_copyb_size(node);
1172 emit_CopyB_prolog(size);
1173 ia32_emitf(node, "rep movsd");
1177 * Emits unrolled memcopy.
1179 static void emit_ia32_CopyB_i(const ir_node *node)
1181 unsigned size = get_ia32_copyb_size(node);
1183 emit_CopyB_prolog(size);
1187 ia32_emitf(NULL, "movsd");
1193 * Emit code for conversions (I, FP), (FP, I) and (FP, FP).
1195 static void emit_ia32_Conv_with_FP(const ir_node *node, const char* conv_f,
1198 ir_mode *ls_mode = get_ia32_ls_mode(node);
1199 int ls_bits = get_mode_size_bits(ls_mode);
1200 const char *conv = ls_bits == 32 ? conv_f : conv_d;
1202 ia32_emitf(node, "cvt%s %AS3, %D0", conv);
1205 static void emit_ia32_Conv_I2FP(const ir_node *node)
1207 emit_ia32_Conv_with_FP(node, "si2ss", "si2sd");
1210 static void emit_ia32_Conv_FP2I(const ir_node *node)
1212 emit_ia32_Conv_with_FP(node, "ss2si", "sd2si");
1215 static void emit_ia32_Conv_FP2FP(const ir_node *node)
1217 emit_ia32_Conv_with_FP(node, "sd2ss", "ss2sd");
1221 * Emits code to increase stack pointer.
1223 static void emit_be_IncSP(const ir_node *node)
1225 int offs = be_get_IncSP_offset(node);
1231 ia32_emitf(node, "subl $%u, %D0", offs);
1233 ia32_emitf(node, "addl $%u, %D0", -offs);
1238 * Emits code for Copy/CopyKeep.
1240 static void Copy_emitter(const ir_node *node, const ir_node *op)
1242 const arch_register_t *in = arch_get_irn_register(op);
1243 const arch_register_t *out = arch_get_irn_register(node);
1248 /* copies of vf nodes aren't real... */
1249 if (in->reg_class == &ia32_reg_classes[CLASS_ia32_vfp])
1252 ia32_emitf(node, "movl %R, %R", in, out);
1255 static void emit_be_Copy(const ir_node *node)
1257 Copy_emitter(node, be_get_Copy_op(node));
1260 static void emit_be_CopyKeep(const ir_node *node)
1262 Copy_emitter(node, be_get_CopyKeep_op(node));
1266 * Emits code for exchange.
1268 static void emit_be_Perm(const ir_node *node)
1270 const arch_register_t *in0, *in1;
1272 in0 = arch_get_irn_register(get_irn_n(node, 0));
1273 in1 = arch_get_irn_register(get_irn_n(node, 1));
1275 arch_register_class_t const *const cls0 = in0->reg_class;
1276 assert(cls0 == in1->reg_class && "Register class mismatch at Perm");
1278 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
1279 ia32_emitf(node, "xchg %R, %R", in1, in0);
1280 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
1281 ia32_emitf(NULL, "xorpd %R, %R", in1, in0);
1282 ia32_emitf(NULL, "xorpd %R, %R", in0, in1);
1283 ia32_emitf(node, "xorpd %R, %R", in1, in0);
1284 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
1286 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
1289 panic("unexpected register class in be_Perm (%+F)", node);
1293 /* helper function for emit_ia32_Minus64Bit */
1294 static void emit_mov(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1296 ia32_emitf(node, "movl %R, %R", src, dst);
1299 /* helper function for emit_ia32_Minus64Bit */
1300 static void emit_neg(const ir_node* node, const arch_register_t *reg)
1302 ia32_emitf(node, "negl %R", reg);
1305 /* helper function for emit_ia32_Minus64Bit */
1306 static void emit_sbb0(const ir_node* node, const arch_register_t *reg)
1308 ia32_emitf(node, "sbbl $0, %R", reg);
1311 /* helper function for emit_ia32_Minus64Bit */
1312 static void emit_sbb(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1314 ia32_emitf(node, "sbbl %R, %R", src, dst);
1317 /* helper function for emit_ia32_Minus64Bit */
1318 static void emit_xchg(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1320 ia32_emitf(node, "xchgl %R, %R", src, dst);
1323 /* helper function for emit_ia32_Minus64Bit */
1324 static void emit_zero(const ir_node* node, const arch_register_t *reg)
1326 ia32_emitf(node, "xorl %R, %R", reg, reg);
1329 static void emit_ia32_Minus64Bit(const ir_node *node)
1331 const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
1332 const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
1333 const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
1334 const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
1336 if (out_lo == in_lo) {
1337 if (out_hi != in_hi) {
1338 /* a -> a, b -> d */
1341 /* a -> a, b -> b */
1344 } else if (out_lo == in_hi) {
1345 if (out_hi == in_lo) {
1346 /* a -> b, b -> a */
1347 emit_xchg(node, in_lo, in_hi);
1350 /* a -> b, b -> d */
1351 emit_mov(node, in_hi, out_hi);
1352 emit_mov(node, in_lo, out_lo);
1356 if (out_hi == in_lo) {
1357 /* a -> c, b -> a */
1358 emit_mov(node, in_lo, out_lo);
1360 } else if (out_hi == in_hi) {
1361 /* a -> c, b -> b */
1362 emit_mov(node, in_lo, out_lo);
1365 /* a -> c, b -> d */
1366 emit_mov(node, in_lo, out_lo);
1372 emit_neg( node, out_hi);
1373 emit_neg( node, out_lo);
1374 emit_sbb0(node, out_hi);
1378 emit_zero(node, out_hi);
1379 emit_neg( node, out_lo);
1380 emit_sbb( node, in_hi, out_hi);
1383 static void emit_ia32_GetEIP(const ir_node *node)
1385 ia32_emitf(node, "call %s", pic_base_label);
1386 be_emit_irprintf("%s:\n", pic_base_label);
1387 be_emit_write_line();
1388 ia32_emitf(node, "popl %D0");
1391 static void emit_ia32_ClimbFrame(const ir_node *node)
1393 const ia32_climbframe_attr_t *attr = get_ia32_climbframe_attr_const(node);
1395 ia32_emitf(node, "movl %S0, %D0");
1396 ia32_emitf(node, "movl $%u, %S1", attr->count);
1397 be_gas_emit_block_name(node);
1398 be_emit_cstring(":\n");
1399 be_emit_write_line();
1400 ia32_emitf(node, "movl (%D0), %D0");
1401 ia32_emitf(node, "dec %S1");
1402 be_emit_cstring("\tjnz ");
1403 be_gas_emit_block_name(node);
1404 be_emit_finish_line_gas(node);
1407 static void emit_be_Return(const ir_node *node)
1409 unsigned pop = be_Return_get_pop(node);
1411 if (pop > 0 || be_Return_get_emit_pop(node)) {
1412 ia32_emitf(node, "ret $%u", pop);
1414 ia32_emitf(node, "ret");
1418 static void emit_Nothing(const ir_node *node)
1425 * Enters the emitter functions for handled nodes into the generic
1426 * pointer of an opcode.
1428 static void ia32_register_emitters(void)
1430 #define IA32_EMIT(a) op_ia32_##a->ops.generic = (op_func)emit_ia32_##a
1431 #define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
1432 #define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
1433 #define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
1434 #define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
1436 /* first clear the generic function pointer for all ops */
1437 ir_clear_opcodes_generic_func();
1439 /* register all emitter functions defined in spec */
1440 ia32_register_spec_emitters();
1442 /* other ia32 emitter functions */
1445 IA32_EMIT(Conv_FP2FP);
1446 IA32_EMIT(Conv_FP2I);
1447 IA32_EMIT(Conv_I2FP);
1454 IA32_EMIT(Minus64Bit);
1455 IA32_EMIT(SwitchJmp);
1456 IA32_EMIT(ClimbFrame);
1459 /* benode emitter */
1478 typedef void (*emit_func_ptr) (const ir_node *);
1481 * Assign and emit an exception label if the current instruction can fail.
1483 static void ia32_assign_exc_label(ir_node *node)
1485 /* assign a new ID to the instruction */
1486 set_ia32_exc_label_id(node, ++exc_label_id);
1488 ia32_emit_exc_label(node);
1490 be_emit_pad_comment();
1491 be_emit_cstring("/* exception to Block ");
1492 ia32_emit_cfop_target(node);
1493 be_emit_cstring(" */\n");
1494 be_emit_write_line();
1498 * Emits code for a node.
1500 static void ia32_emit_node(ir_node *node)
1502 ir_op *op = get_irn_op(node);
1504 DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
1506 if (is_ia32_irn(node)) {
1507 if (get_ia32_exc_label(node)) {
1508 /* emit the exception label of this instruction */
1509 ia32_assign_exc_label(node);
1511 if (mark_spill_reload) {
1512 if (is_ia32_is_spill(node)) {
1513 ia32_emitf(NULL, "xchg %ebx, %ebx /* spill mark */");
1515 if (is_ia32_is_reload(node)) {
1516 ia32_emitf(NULL, "xchg %edx, %edx /* reload mark */");
1518 if (is_ia32_is_remat(node)) {
1519 ia32_emitf(NULL, "xchg %ecx, %ecx /* remat mark */");
1523 if (op->ops.generic) {
1524 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
1526 be_dwarf_location(get_irn_dbg_info(node));
1531 ir_fprintf(stderr, "Error: No emit handler for node %+F (%+G, graph %+F)\n", node, node, current_ir_graph);
1536 int sp_change = arch_get_sp_bias(node);
1537 if (sp_change != 0) {
1538 assert(sp_change != SP_BIAS_RESET);
1539 callframe_offset += sp_change;
1540 be_dwarf_callframe_offset(callframe_offset);
1546 * Emits gas alignment directives
1548 static void ia32_emit_alignment(unsigned align, unsigned skip)
1550 ia32_emitf(NULL, ".p2align %u,,%u", align, skip);
1554 * Emits gas alignment directives for Labels depended on cpu architecture.
1556 static void ia32_emit_align_label(void)
1558 unsigned align = ia32_cg_config.label_alignment;
1559 unsigned maximum_skip = ia32_cg_config.label_alignment_max_skip;
1560 ia32_emit_alignment(align, maximum_skip);
1564 * Test whether a block should be aligned.
1565 * For cpus in the P4/Athlon class it is useful to align jump labels to
1566 * 16 bytes. However we should only do that if the alignment nops before the
1567 * label aren't executed more often than we have jumps to the label.
1569 static int should_align_block(const ir_node *block)
1571 static const double DELTA = .0001;
1572 ir_node *prev = get_prev_block_sched(block);
1573 double prev_freq = 0; /**< execfreq of the fallthrough block */
1574 double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
1578 if (ia32_cg_config.label_alignment_factor <= 0)
1581 block_freq = get_block_execfreq(block);
1582 if (block_freq < DELTA)
1585 n_cfgpreds = get_Block_n_cfgpreds(block);
1586 for (i = 0; i < n_cfgpreds; ++i) {
1587 const ir_node *pred = get_Block_cfgpred_block(block, i);
1588 double pred_freq = get_block_execfreq(pred);
1591 prev_freq += pred_freq;
1593 jmp_freq += pred_freq;
1597 if (prev_freq < DELTA && !(jmp_freq < DELTA))
1600 jmp_freq /= prev_freq;
1602 return jmp_freq > ia32_cg_config.label_alignment_factor;
1606 * Emit the block header for a block.
1608 * @param block the block
1609 * @param prev_block the previous block
1611 static void ia32_emit_block_header(ir_node *block)
1613 ir_graph *irg = current_ir_graph;
1614 int need_label = block_needs_label(block);
1616 if (block == get_irg_end_block(irg))
1619 if (ia32_cg_config.label_alignment > 0) {
1620 /* align the current block if:
1621 * a) if should be aligned due to its execution frequency
1622 * b) there is no fall-through here
1624 if (should_align_block(block)) {
1625 ia32_emit_align_label();
1627 /* if the predecessor block has no fall-through,
1628 we can always align the label. */
1630 int has_fallthrough = 0;
1632 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1633 ir_node *cfg_pred = get_Block_cfgpred(block, i);
1634 if (can_be_fallthrough(cfg_pred)) {
1635 has_fallthrough = 1;
1640 if (!has_fallthrough)
1641 ia32_emit_align_label();
1645 be_gas_begin_block(block, need_label);
1649 * Walks over the nodes in a block connected by scheduling edges
1650 * and emits code for each node.
1652 static void ia32_gen_block(ir_node *block)
1654 ia32_emit_block_header(block);
1657 ir_graph *irg = get_irn_irg(block);
1658 callframe_offset = 4; /* 4 bytes for the return address */
1659 /* ESP guessing, TODO perform a real ESP simulation */
1660 if (block != get_irg_start_block(irg)) {
1661 callframe_offset += frame_type_size;
1663 be_dwarf_callframe_offset(callframe_offset);
1666 /* emit the contents of the block */
1667 be_dwarf_location(get_irn_dbg_info(block));
1668 sched_foreach(block, node) {
1669 ia32_emit_node(node);
1673 typedef struct exc_entry {
1674 ir_node *exc_instr; /** The instruction that can issue an exception. */
1675 ir_node *block; /** The block to call then. */
1680 * Sets labels for control flow nodes (jump target).
1681 * Links control predecessors to there destination blocks.
1683 static void ia32_gen_labels(ir_node *block, void *data)
1685 exc_entry **exc_list = (exc_entry**)data;
1689 for (n = get_Block_n_cfgpreds(block) - 1; n >= 0; --n) {
1690 pred = get_Block_cfgpred(block, n);
1691 set_irn_link(pred, block);
1693 pred = skip_Proj(pred);
1694 if (is_ia32_irn(pred) && get_ia32_exc_label(pred)) {
1699 ARR_APP1(exc_entry, *exc_list, e);
1700 set_irn_link(pred, block);
1706 * Compare two exception_entries.
1708 static int cmp_exc_entry(const void *a, const void *b)
1710 const exc_entry *ea = (const exc_entry*)a;
1711 const exc_entry *eb = (const exc_entry*)b;
1713 if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
1718 static parameter_dbg_info_t *construct_parameter_infos(ir_graph *irg)
1720 ir_entity *entity = get_irg_entity(irg);
1721 ir_type *type = get_entity_type(entity);
1722 size_t n_params = get_method_n_params(type);
1723 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1724 ir_type *arg_type = layout->arg_type;
1725 size_t n_members = get_compound_n_members(arg_type);
1726 parameter_dbg_info_t *infos = XMALLOCNZ(parameter_dbg_info_t, n_params);
1729 for (i = 0; i < n_members; ++i) {
1730 ir_entity *member = get_compound_member(arg_type, i);
1732 if (!is_parameter_entity(member))
1734 param = get_entity_parameter_number(member);
1735 if (param == IR_VA_START_PARAMETER_NUMBER)
1737 assert(infos[param].entity == NULL && infos[param].reg == NULL);
1738 infos[param].reg = NULL;
1739 infos[param].entity = member;
1746 * Main driver. Emits the code for one routine.
1748 void ia32_gen_routine(ir_graph *irg)
1750 ir_entity *entity = get_irg_entity(irg);
1751 exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
1752 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1753 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1754 ir_node **blk_sched = irg_data->blk_sched;
1755 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1756 parameter_dbg_info_t *infos;
1759 isa = (ia32_isa_t*) arch_env;
1760 do_pic = be_options.pic;
1762 be_gas_elf_type_char = '@';
1764 ia32_register_emitters();
1766 get_unique_label(pic_base_label, sizeof(pic_base_label), "PIC_BASE");
1768 infos = construct_parameter_infos(irg);
1769 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment,
1773 sp_relative = layout->sp_relative;
1774 if (layout->sp_relative) {
1775 ir_type *frame_type = get_irg_frame_type(irg);
1776 frame_type_size = get_type_size_bytes(frame_type);
1777 be_dwarf_callframe_register(&ia32_registers[REG_ESP]);
1779 /* well not entirely correct here, we should emit this after the
1780 * "movl esp, ebp" */
1781 be_dwarf_callframe_register(&ia32_registers[REG_EBP]);
1782 /* TODO: do not hardcode the following */
1783 be_dwarf_callframe_offset(8);
1784 be_dwarf_callframe_spilloffset(&ia32_registers[REG_EBP], -8);
1787 /* we use links to point to target blocks */
1788 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1789 irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
1791 /* initialize next block links */
1792 n = ARR_LEN(blk_sched);
1793 for (i = 0; i < n; ++i) {
1794 ir_node *block = blk_sched[i];
1795 ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
1797 set_irn_link(block, prev);
1800 for (i = 0; i < n; ++i) {
1801 ir_node *block = blk_sched[i];
1803 ia32_gen_block(block);
1806 be_gas_emit_function_epilog(entity);
1808 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1810 /* Sort the exception table using the exception label id's.
1811 Those are ascending with ascending addresses. */
1812 qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
1816 for (e = 0; e < ARR_LEN(exc_list); ++e) {
1817 be_emit_cstring("\t.long ");
1818 ia32_emit_exc_label(exc_list[e].exc_instr);
1820 be_emit_cstring("\t.long ");
1821 be_gas_emit_block_name(exc_list[e].block);
1825 DEL_ARR_F(exc_list);
1828 static const lc_opt_table_entry_t ia32_emitter_options[] = {
1829 LC_OPT_ENT_BOOL("mark_spill_reload", "mark spills and reloads with ud opcodes", &mark_spill_reload),
1833 /* ==== Experimental binary emitter ==== */
1835 static unsigned char reg_gp_map[N_ia32_gp_REGS];
1836 //static unsigned char reg_mmx_map[N_ia32_mmx_REGS];
1837 //static unsigned char reg_sse_map[N_ia32_xmm_REGS];
1839 static void build_reg_map(void)
1841 reg_gp_map[REG_GP_EAX] = 0x0;
1842 reg_gp_map[REG_GP_ECX] = 0x1;
1843 reg_gp_map[REG_GP_EDX] = 0x2;
1844 reg_gp_map[REG_GP_EBX] = 0x3;
1845 reg_gp_map[REG_GP_ESP] = 0x4;
1846 reg_gp_map[REG_GP_EBP] = 0x5;
1847 reg_gp_map[REG_GP_ESI] = 0x6;
1848 reg_gp_map[REG_GP_EDI] = 0x7;
1851 /** Returns the encoding for a pnc field. */
1852 static unsigned char pnc2cc(ia32_condition_code_t cc)
1857 /** Sign extension bit values for binops */
1859 UNSIGNED_IMM = 0, /**< unsigned immediate */
1860 SIGNEXT_IMM = 2, /**< sign extended immediate */
1863 /** The mod encoding of the ModR/M */
1865 MOD_IND = 0x00, /**< [reg1] */
1866 MOD_IND_BYTE_OFS = 0x40, /**< [reg1 + byte ofs] */
1867 MOD_IND_WORD_OFS = 0x80, /**< [reg1 + word ofs] */
1868 MOD_REG = 0xC0 /**< reg1 */
1871 /** create R/M encoding for ModR/M */
1872 #define ENC_RM(x) (x)
1873 /** create REG encoding for ModR/M */
1874 #define ENC_REG(x) ((x) << 3)
1876 /** create encoding for a SIB byte */
1877 #define ENC_SIB(scale, index, base) ((scale) << 6 | (index) << 3 | (base))
1879 /* Node: The following routines are supposed to append bytes, words, dwords
1880 to the output stream.
1881 Currently the implementation is stupid in that it still creates output
1882 for an "assembler" in the form of .byte, .long
1883 We will change this when enough infrastructure is there to create complete
1884 machine code in memory/object files */
1886 static void bemit8(const unsigned char byte)
1888 be_emit_irprintf("\t.byte 0x%x\n", byte);
1889 be_emit_write_line();
1892 static void bemit16(const unsigned short u16)
1894 be_emit_irprintf("\t.word 0x%x\n", u16);
1895 be_emit_write_line();
1898 static void bemit32(const unsigned u32)
1900 be_emit_irprintf("\t.long 0x%x\n", u32);
1901 be_emit_write_line();
1905 * Emit address of an entity. If @p is_relative is true then a relative
1906 * offset from behind the address to the entity is created.
1908 static void bemit_entity(ir_entity *entity, bool entity_sign, int offset,
1911 if (entity == NULL) {
1916 /* the final version should remember the position in the bytestream
1917 and patch it with the correct address at linktime... */
1918 be_emit_cstring("\t.long ");
1921 be_gas_emit_entity(entity);
1923 if (get_entity_owner(entity) == get_tls_type()) {
1924 if (!entity_has_definition(entity)) {
1925 be_emit_cstring("@INDNTPOFF");
1927 be_emit_cstring("@NTPOFF");
1932 be_emit_cstring("-.");
1937 be_emit_irprintf("%+d", offset);
1940 be_emit_write_line();
1943 static void bemit_jmp_destination(const ir_node *dest_block)
1945 be_emit_cstring("\t.long ");
1946 be_gas_emit_block_name(dest_block);
1947 be_emit_cstring(" - . - 4\n");
1948 be_emit_write_line();
1951 /* end emit routines, all emitters following here should only use the functions
1954 typedef enum reg_modifier {
1959 /** Create a ModR/M byte for src1,src2 registers */
1960 static void bemit_modrr(const arch_register_t *src1,
1961 const arch_register_t *src2)
1963 unsigned char modrm = MOD_REG;
1964 modrm |= ENC_RM(reg_gp_map[src1->index]);
1965 modrm |= ENC_REG(reg_gp_map[src2->index]);
1969 /** Create a ModR/M8 byte for src1,src2 registers */
1970 static void bemit_modrr8(reg_modifier_t high_part1, const arch_register_t *src1,
1971 reg_modifier_t high_part2, const arch_register_t *src2)
1973 unsigned char modrm = MOD_REG;
1974 modrm |= ENC_RM(reg_gp_map[src1->index] + (high_part1 == REG_HIGH ? 4 : 0));
1975 modrm |= ENC_REG(reg_gp_map[src2->index] + (high_part2 == REG_HIGH ? 4 : 0));
1979 /** Create a ModR/M byte for one register and extension */
1980 static void bemit_modru(const arch_register_t *reg, unsigned ext)
1982 unsigned char modrm = MOD_REG;
1984 modrm |= ENC_RM(reg_gp_map[reg->index]);
1985 modrm |= ENC_REG(ext);
1989 /** Create a ModR/M8 byte for one register */
1990 static void bemit_modrm8(reg_modifier_t high_part, const arch_register_t *reg)
1992 unsigned char modrm = MOD_REG;
1993 assert(reg_gp_map[reg->index] < 4);
1994 modrm |= ENC_RM(reg_gp_map[reg->index] + (high_part == REG_HIGH ? 4 : 0));
2000 * Calculate the size of an signed immediate in bytes.
2002 * @param offset an offset
2004 static unsigned get_signed_imm_size(int offset)
2006 if (-128 <= offset && offset < 128) {
2008 } else if (-32768 <= offset && offset < 32768) {
2016 * Emit an address mode.
2018 * @param reg content of the reg field: either a register index or an opcode extension
2019 * @param node the node
2021 static void bemit_mod_am(unsigned reg, const ir_node *node)
2023 ir_entity *ent = get_ia32_am_sc(node);
2024 int offs = get_ia32_am_offs_int(node);
2025 ir_node *base = get_irn_n(node, n_ia32_base);
2026 int has_base = !is_ia32_NoReg_GP(base);
2027 ir_node *idx = get_irn_n(node, n_ia32_index);
2028 int has_index = !is_ia32_NoReg_GP(idx);
2031 unsigned emitoffs = 0;
2032 bool emitsib = false;
2035 /* set the mod part depending on displacement */
2037 modrm |= MOD_IND_WORD_OFS;
2039 } else if (offs == 0) {
2042 } else if (-128 <= offs && offs < 128) {
2043 modrm |= MOD_IND_BYTE_OFS;
2046 modrm |= MOD_IND_WORD_OFS;
2051 const arch_register_t *base_reg = arch_get_irn_register(base);
2052 base_enc = reg_gp_map[base_reg->index];
2054 /* Use the EBP encoding + MOD_IND if NO base register. There is
2055 * always a 32bit offset present in this case. */
2061 /* Determine if we need a SIB byte. */
2063 const arch_register_t *reg_index = arch_get_irn_register(idx);
2064 int scale = get_ia32_am_scale(node);
2066 /* R/M set to ESP means SIB in 32bit mode. */
2067 modrm |= ENC_RM(0x04);
2068 sib = ENC_SIB(scale, reg_gp_map[reg_index->index], base_enc);
2070 } else if (base_enc == 0x04) {
2071 /* for the above reason we are forced to emit a SIB when base is ESP.
2072 * Only the base is used, index must be ESP too, which means no index.
2074 modrm |= ENC_RM(0x04);
2075 sib = ENC_SIB(0, 0x04, 0x04);
2078 modrm |= ENC_RM(base_enc);
2081 /* We are forced to emit an 8bit offset as EBP base without offset is a
2082 * special case for SIB without base register. */
2083 if (base_enc == 0x05 && emitoffs == 0) {
2084 modrm |= MOD_IND_BYTE_OFS;
2088 modrm |= ENC_REG(reg);
2094 /* emit displacement */
2095 if (emitoffs == 8) {
2096 bemit8((unsigned) offs);
2097 } else if (emitoffs == 32) {
2098 bemit_entity(ent, is_ia32_am_sc_sign(node), offs, false);
2103 * Emit a binop with a immediate operand.
2105 * @param node the node to emit
2106 * @param opcode_eax the opcode for the op eax, imm variant
2107 * @param opcode the opcode for the reg, imm variant
2108 * @param ruval the opcode extension for opcode
2110 static void bemit_binop_with_imm(
2111 const ir_node *node,
2112 unsigned char opcode_ax,
2113 unsigned char opcode, unsigned char ruval)
2115 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
2116 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
2117 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
2120 /* Some instructions (test) have no short form with 32bit value + 8bit
2122 if (attr->symconst != NULL || opcode & SIGNEXT_IMM) {
2125 /* check for sign extension */
2126 size = get_signed_imm_size(attr->offset);
2131 bemit8(opcode | SIGNEXT_IMM);
2132 /* cmp has this special mode */
2133 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2134 bemit_mod_am(ruval, node);
2136 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2137 bemit_modru(reg, ruval);
2139 bemit8((unsigned char)attr->offset);
2143 /* check for eax variant: this variant is shorter for 32bit immediates only */
2144 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2146 bemit_mod_am(ruval, node);
2148 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2149 if (reg->index == REG_GP_EAX) {
2153 bemit_modru(reg, ruval);
2156 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
2159 panic("invalid imm size?!?");
2165 static void bemit_binop_2(const ir_node *node, unsigned code)
2167 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
2169 if (get_ia32_op_type(node) == ia32_Normal) {
2170 const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
2171 bemit_modrr(op2, out);
2173 bemit_mod_am(reg_gp_map[out->index], node);
2180 static void bemit_binop(const ir_node *node, const unsigned char opcodes[4])
2182 ir_node *right = get_irn_n(node, n_ia32_binary_right);
2183 if (is_ia32_Immediate(right)) {
2184 bemit_binop_with_imm(node, opcodes[1], opcodes[2], opcodes[3]);
2186 bemit_binop_2(node, opcodes[0]);
2193 static void bemit_unop(const ir_node *node, unsigned char code, unsigned char ext, int input)
2196 if (get_ia32_op_type(node) == ia32_Normal) {
2197 const arch_register_t *in = arch_get_irn_register_in(node, input);
2198 bemit_modru(in, ext);
2200 bemit_mod_am(ext, node);
2204 static void bemit_unop_reg(const ir_node *node, unsigned char code, int input)
2206 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2207 bemit_unop(node, code, reg_gp_map[out->index], input);
2210 static void bemit_unop_mem(const ir_node *node, unsigned char code, unsigned char ext)
2212 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
2215 bemit8(size == 8 ? code : code + 1);
2216 bemit_mod_am(ext, node);
2219 static void bemit_0f_unop_reg(ir_node const *const node, unsigned char const code, int const input)
2222 bemit_unop_reg(node, code, input);
2225 static void bemit_immediate(const ir_node *node, bool relative)
2227 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
2228 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, relative);
2231 static void bemit_copy(const ir_node *copy)
2233 const arch_register_t *in = arch_get_irn_register_in(copy, 0);
2234 const arch_register_t *out = arch_get_irn_register_out(copy, 0);
2238 /* copies of vf nodes aren't real... */
2239 if (in->reg_class == &ia32_reg_classes[CLASS_ia32_vfp])
2242 assert(in->reg_class == &ia32_reg_classes[CLASS_ia32_gp]);
2244 bemit_modrr(in, out);
2247 static void bemit_perm(const ir_node *node)
2249 const arch_register_t *in0 = arch_get_irn_register(get_irn_n(node, 0));
2250 const arch_register_t *in1 = arch_get_irn_register(get_irn_n(node, 1));
2251 const arch_register_class_t *cls0 = in0->reg_class;
2253 assert(cls0 == in1->reg_class && "Register class mismatch at Perm");
2255 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
2256 if (in0->index == REG_GP_EAX) {
2257 bemit8(0x90 + reg_gp_map[in1->index]);
2258 } else if (in1->index == REG_GP_EAX) {
2259 bemit8(0x90 + reg_gp_map[in0->index]);
2262 bemit_modrr(in0, in1);
2264 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
2265 panic("unimplemented"); // TODO implement
2266 //ia32_emitf(NULL, "xorpd %R, %R", in1, in0);
2267 //ia32_emitf(NULL, "xorpd %R, %R", in0, in1);
2268 //ia32_emitf(node, "xorpd %R, %R", in1, in0);
2269 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
2271 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
2274 panic("unexpected register class in be_Perm (%+F)", node);
2278 static void bemit_xor0(const ir_node *node)
2280 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2282 bemit_modrr(out, out);
2285 static void bemit_mov_const(const ir_node *node)
2287 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2288 bemit8(0xB8 + reg_gp_map[out->index]);
2289 bemit_immediate(node, false);
2293 * Creates a function for a Binop with 3 possible encodings.
2295 #define BINOP(op, op0, op1, op2, op2_ext) \
2296 static void bemit_ ## op(const ir_node *node) { \
2297 static const unsigned char op ## _codes[] = {op0, op1, op2, op2_ext}; \
2298 bemit_binop(node, op ## _codes); \
2301 /* insn def eax,imm imm */
2302 BINOP(add, 0x03, 0x05, 0x81, 0)
2303 BINOP(or, 0x0B, 0x0D, 0x81, 1)
2304 BINOP(adc, 0x13, 0x15, 0x81, 2)
2305 BINOP(sbb, 0x1B, 0x1D, 0x81, 3)
2306 BINOP(and, 0x23, 0x25, 0x81, 4)
2307 BINOP(sub, 0x2B, 0x2D, 0x81, 5)
2308 BINOP(xor, 0x33, 0x35, 0x81, 6)
2309 BINOP(test, 0x85, 0xA9, 0xF7, 0)
2311 #define BINOPMEM(op, ext) \
2312 static void bemit_##op(const ir_node *node) \
2315 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2318 val = get_irn_n(node, n_ia32_unary_op); \
2319 if (is_ia32_Immediate(val)) { \
2320 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(val); \
2321 int offset = attr->offset; \
2322 if (attr->symconst == NULL && get_signed_imm_size(offset) == 1) { \
2324 bemit_mod_am(ext, node); \
2328 bemit_mod_am(ext, node); \
2332 bemit_entity(attr->symconst, attr->sc_sign, offset, false); \
2336 bemit8(ext << 3 | 1); \
2337 bemit_mod_am(reg_gp_map[arch_get_irn_register(val)->index], node); \
2341 static void bemit_##op##8bit(const ir_node *node) \
2343 ir_node *val = get_irn_n(node, n_ia32_unary_op); \
2344 if (is_ia32_Immediate(val)) { \
2346 bemit_mod_am(ext, node); \
2347 bemit8(get_ia32_immediate_attr_const(val)->offset); \
2350 bemit_mod_am(reg_gp_map[arch_get_irn_register(val)->index], node); \
2362 * Creates a function for an Unop with code /ext encoding.
2364 #define UNOP(op, code, ext, input) \
2365 static void bemit_ ## op(const ir_node *node) { \
2366 bemit_unop(node, code, ext, input); \
2369 UNOP(not, 0xF7, 2, n_ia32_Not_val)
2370 UNOP(neg, 0xF7, 3, n_ia32_Neg_val)
2371 UNOP(mul, 0xF7, 4, n_ia32_Mul_right)
2372 UNOP(imul1op, 0xF7, 5, n_ia32_IMul1OP_right)
2373 UNOP(div, 0xF7, 6, n_ia32_Div_divisor)
2374 UNOP(idiv, 0xF7, 7, n_ia32_IDiv_divisor)
2376 /* TODO: am support for IJmp */
2377 UNOP(ijmp, 0xFF, 4, n_ia32_IJmp_target)
2379 #define SHIFT(op, ext) \
2380 static void bemit_##op(const ir_node *node) \
2382 const arch_register_t *out = arch_get_irn_register_out(node, 0); \
2383 ir_node *count = get_irn_n(node, 1); \
2384 if (is_ia32_Immediate(count)) { \
2385 int offset = get_ia32_immediate_attr_const(count)->offset; \
2386 if (offset == 1) { \
2388 bemit_modru(out, ext); \
2391 bemit_modru(out, ext); \
2396 bemit_modru(out, ext); \
2400 static void bemit_##op##mem(const ir_node *node) \
2403 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2406 count = get_irn_n(node, 1); \
2407 if (is_ia32_Immediate(count)) { \
2408 int offset = get_ia32_immediate_attr_const(count)->offset; \
2409 if (offset == 1) { \
2410 bemit8(size == 8 ? 0xD0 : 0xD1); \
2411 bemit_mod_am(ext, node); \
2413 bemit8(size == 8 ? 0xC0 : 0xC1); \
2414 bemit_mod_am(ext, node); \
2418 bemit8(size == 8 ? 0xD2 : 0xD3); \
2419 bemit_mod_am(ext, node); \
2429 static void bemit_shld(const ir_node *node)
2431 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShlD_val_low);
2432 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShlD_res);
2433 ir_node *count = get_irn_n(node, n_ia32_ShlD_count);
2435 if (is_ia32_Immediate(count)) {
2437 bemit_modrr(out, in);
2438 bemit8(get_ia32_immediate_attr_const(count)->offset);
2441 bemit_modrr(out, in);
2445 static void bemit_shrd(const ir_node *node)
2447 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShrD_val_low);
2448 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShrD_res);
2449 ir_node *count = get_irn_n(node, n_ia32_ShrD_count);
2451 if (is_ia32_Immediate(count)) {
2453 bemit_modrr(out, in);
2454 bemit8(get_ia32_immediate_attr_const(count)->offset);
2457 bemit_modrr(out, in);
2461 static void bemit_sbb0(ir_node const *const node)
2463 arch_register_t const *const out = arch_get_irn_register_out(node, pn_ia32_Sbb0_res);
2464 unsigned char const reg = reg_gp_map[out->index];
2466 bemit8(MOD_REG | ENC_REG(reg) | ENC_RM(reg));
2470 * binary emitter for setcc.
2472 static void bemit_setcc(const ir_node *node)
2474 const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
2476 ia32_condition_code_t cc = get_ia32_condcode(node);
2477 cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
2478 if (cc & ia32_cc_float_parity_cases) {
2479 if (cc & ia32_cc_negated) {
2482 bemit8(0x90 | pnc2cc(cc));
2483 bemit_modrm8(REG_LOW, dreg);
2488 bemit_modrm8(REG_HIGH, dreg);
2490 /* orb %>dreg, %<dreg */
2492 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
2496 bemit8(0x90 | pnc2cc(cc));
2497 bemit_modrm8(REG_LOW, dreg);
2502 bemit_modrm8(REG_HIGH, dreg);
2504 /* andb %>dreg, %<dreg */
2506 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
2511 bemit8(0x90 | pnc2cc(cc));
2512 bemit_modrm8(REG_LOW, dreg);
2516 static void bemit_bsf(ir_node const *const node)
2518 bemit_0f_unop_reg(node, 0xBC, n_ia32_Bsf_operand);
2521 static void bemit_bsr(ir_node const *const node)
2523 bemit_0f_unop_reg(node, 0xBD, n_ia32_Bsr_operand);
2526 static void bemit_bswap(ir_node const *const node)
2529 bemit_modru(arch_get_irn_register_out(node, pn_ia32_Bswap_res), 1);
2532 static void bemit_bt(ir_node const *const node)
2535 arch_register_t const *const lreg = arch_get_irn_register_in(node, n_ia32_Bt_left);
2536 ir_node const *const right = get_irn_n(node, n_ia32_Bt_right);
2537 if (is_ia32_Immediate(right)) {
2538 ia32_immediate_attr_t const *const attr = get_ia32_immediate_attr_const(right);
2539 int const offset = attr->offset;
2540 assert(!attr->symconst);
2541 assert(get_signed_imm_size(offset) == 1);
2543 bemit_modru(lreg, 4);
2547 bemit_modrr(lreg, arch_get_irn_register(right));
2551 static void bemit_cmovcc(const ir_node *node)
2553 const ia32_attr_t *attr = get_ia32_attr_const(node);
2554 int ins_permuted = attr->data.ins_permuted;
2555 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
2556 ia32_condition_code_t cc = get_ia32_condcode(node);
2557 const arch_register_t *in_true;
2558 const arch_register_t *in_false;
2560 cc = determine_final_cc(node, n_ia32_CMovcc_eflags, cc);
2562 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
2563 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
2565 /* should be same constraint fullfilled? */
2566 if (out == in_false) {
2567 /* yes -> nothing to do */
2568 } else if (out == in_true) {
2569 assert(get_ia32_op_type(node) == ia32_Normal);
2570 ins_permuted = !ins_permuted;
2574 bemit8(0x8B); // mov %in_false, %out
2575 bemit_modrr(in_false, out);
2579 cc = ia32_negate_condition_code(cc);
2581 if (cc & ia32_cc_float_parity_cases)
2582 panic("cmov can't handle parity float cases");
2585 bemit8(0x40 | pnc2cc(cc));
2586 if (get_ia32_op_type(node) == ia32_Normal) {
2587 bemit_modrr(in_true, out);
2589 bemit_mod_am(reg_gp_map[out->index], node);
2593 static void bemit_cmp(const ir_node *node)
2595 unsigned ls_size = get_mode_size_bits(get_ia32_ls_mode(node));
2601 right = get_irn_n(node, n_ia32_binary_right);
2602 if (is_ia32_Immediate(right)) {
2603 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
2604 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
2605 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
2608 if (attr->symconst != NULL) {
2611 /* check for sign extension */
2612 size = get_signed_imm_size(attr->offset);
2617 bemit8(0x81 | SIGNEXT_IMM);
2618 /* cmp has this special mode */
2619 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2620 bemit_mod_am(7, node);
2622 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2623 bemit_modru(reg, 7);
2625 bemit8((unsigned char)attr->offset);
2629 /* check for eax variant: this variant is shorter for 32bit immediates only */
2630 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2632 bemit_mod_am(7, node);
2634 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2635 if (reg->index == REG_GP_EAX) {
2639 bemit_modru(reg, 7);
2642 if (ls_size == 16) {
2643 bemit16(attr->offset);
2645 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
2649 panic("invalid imm size?!?");
2651 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
2653 if (get_ia32_op_type(node) == ia32_Normal) {
2654 const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
2655 bemit_modrr(op2, out);
2657 bemit_mod_am(reg_gp_map[out->index], node);
2662 static void bemit_cmp8bit(const ir_node *node)
2664 ir_node *right = get_irn_n(node, n_ia32_binary_right);
2665 if (is_ia32_Immediate(right)) {
2666 if (get_ia32_op_type(node) == ia32_Normal) {
2667 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
2668 if (out->index == REG_GP_EAX) {
2672 bemit_modru(out, 7);
2676 bemit_mod_am(7, node);
2678 bemit8(get_ia32_immediate_attr_const(right)->offset);
2680 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
2682 if (get_ia32_op_type(node) == ia32_Normal) {
2683 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Cmp_right);
2684 bemit_modrr(out, in);
2686 bemit_mod_am(reg_gp_map[out->index], node);
2691 static void bemit_test8bit(const ir_node *node)
2693 ir_node *right = get_irn_n(node, n_ia32_Test8Bit_right);
2694 if (is_ia32_Immediate(right)) {
2695 if (get_ia32_op_type(node) == ia32_Normal) {
2696 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
2697 if (out->index == REG_GP_EAX) {
2701 bemit_modru(out, 0);
2705 bemit_mod_am(0, node);
2707 bemit8(get_ia32_immediate_attr_const(right)->offset);
2709 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
2711 if (get_ia32_op_type(node) == ia32_Normal) {
2712 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Test8Bit_right);
2713 bemit_modrr(out, in);
2715 bemit_mod_am(reg_gp_map[out->index], node);
2720 static void bemit_imul(const ir_node *node)
2722 ir_node *right = get_irn_n(node, n_ia32_IMul_right);
2723 /* Do we need the immediate form? */
2724 if (is_ia32_Immediate(right)) {
2725 int imm = get_ia32_immediate_attr_const(right)->offset;
2726 if (get_signed_imm_size(imm) == 1) {
2727 bemit_unop_reg(node, 0x6B, n_ia32_IMul_left);
2730 bemit_unop_reg(node, 0x69, n_ia32_IMul_left);
2734 bemit_0f_unop_reg(node, 0xAF, n_ia32_IMul_right);
2738 static void bemit_dec(const ir_node *node)
2740 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Dec_res);
2741 bemit8(0x48 + reg_gp_map[out->index]);
2744 static void bemit_inc(const ir_node *node)
2746 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Inc_res);
2747 bemit8(0x40 + reg_gp_map[out->index]);
2750 #define UNOPMEM(op, code, ext) \
2751 static void bemit_##op(const ir_node *node) \
2753 bemit_unop_mem(node, code, ext); \
2756 UNOPMEM(notmem, 0xF6, 2)
2757 UNOPMEM(negmem, 0xF6, 3)
2758 UNOPMEM(incmem, 0xFE, 0)
2759 UNOPMEM(decmem, 0xFE, 1)
2761 static void bemit_ldtls(const ir_node *node)
2763 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2765 bemit8(0x65); // gs:
2766 if (out->index == REG_GP_EAX) {
2767 bemit8(0xA1); // movl 0, %eax
2769 bemit8(0x8B); // movl 0, %reg
2770 bemit8(MOD_IND | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x05));
2778 static void bemit_lea(const ir_node *node)
2780 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2782 bemit_mod_am(reg_gp_map[out->index], node);
2785 /* helper function for bemit_minus64bit */
2786 static void bemit_helper_mov(const arch_register_t *src, const arch_register_t *dst)
2788 bemit8(0x8B); // movl %src, %dst
2789 bemit_modrr(src, dst);
2792 /* helper function for bemit_minus64bit */
2793 static void bemit_helper_neg(const arch_register_t *reg)
2795 bemit8(0xF7); // negl %reg
2796 bemit_modru(reg, 3);
2799 /* helper function for bemit_minus64bit */
2800 static void bemit_helper_sbb0(const arch_register_t *reg)
2802 bemit8(0x83); // sbbl $0, %reg
2803 bemit_modru(reg, 3);
2807 /* helper function for bemit_minus64bit */
2808 static void bemit_helper_sbb(const arch_register_t *src, const arch_register_t *dst)
2810 bemit8(0x1B); // sbbl %src, %dst
2811 bemit_modrr(src, dst);
2814 /* helper function for bemit_minus64bit */
2815 static void bemit_helper_xchg(const arch_register_t *src, const arch_register_t *dst)
2817 if (src->index == REG_GP_EAX) {
2818 bemit8(0x90 + reg_gp_map[dst->index]); // xchgl %eax, %dst
2819 } else if (dst->index == REG_GP_EAX) {
2820 bemit8(0x90 + reg_gp_map[src->index]); // xchgl %src, %eax
2822 bemit8(0x87); // xchgl %src, %dst
2823 bemit_modrr(src, dst);
2827 /* helper function for bemit_minus64bit */
2828 static void bemit_helper_zero(const arch_register_t *reg)
2830 bemit8(0x33); // xorl %reg, %reg
2831 bemit_modrr(reg, reg);
2834 static void bemit_minus64bit(const ir_node *node)
2836 const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
2837 const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
2838 const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
2839 const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
2841 if (out_lo == in_lo) {
2842 if (out_hi != in_hi) {
2843 /* a -> a, b -> d */
2846 /* a -> a, b -> b */
2849 } else if (out_lo == in_hi) {
2850 if (out_hi == in_lo) {
2851 /* a -> b, b -> a */
2852 bemit_helper_xchg(in_lo, in_hi);
2855 /* a -> b, b -> d */
2856 bemit_helper_mov(in_hi, out_hi);
2857 bemit_helper_mov(in_lo, out_lo);
2861 if (out_hi == in_lo) {
2862 /* a -> c, b -> a */
2863 bemit_helper_mov(in_lo, out_lo);
2865 } else if (out_hi == in_hi) {
2866 /* a -> c, b -> b */
2867 bemit_helper_mov(in_lo, out_lo);
2870 /* a -> c, b -> d */
2871 bemit_helper_mov(in_lo, out_lo);
2877 bemit_helper_neg( out_hi);
2878 bemit_helper_neg( out_lo);
2879 bemit_helper_sbb0(out_hi);
2883 bemit_helper_zero(out_hi);
2884 bemit_helper_neg( out_lo);
2885 bemit_helper_sbb( in_hi, out_hi);
2889 * Emit a single opcode.
2891 #define EMIT_SINGLEOP(op, code) \
2892 static void bemit_ ## op(const ir_node *node) { \
2897 //EMIT_SINGLEOP(daa, 0x27)
2898 //EMIT_SINGLEOP(das, 0x2F)
2899 //EMIT_SINGLEOP(aaa, 0x37)
2900 //EMIT_SINGLEOP(aas, 0x3F)
2901 //EMIT_SINGLEOP(nop, 0x90)
2902 EMIT_SINGLEOP(cwtl, 0x98)
2903 EMIT_SINGLEOP(cltd, 0x99)
2904 //EMIT_SINGLEOP(fwait, 0x9B)
2905 EMIT_SINGLEOP(sahf, 0x9E)
2906 //EMIT_SINGLEOP(popf, 0x9D)
2907 EMIT_SINGLEOP(leave, 0xC9)
2908 EMIT_SINGLEOP(int3, 0xCC)
2909 //EMIT_SINGLEOP(iret, 0xCF)
2910 //EMIT_SINGLEOP(xlat, 0xD7)
2911 //EMIT_SINGLEOP(lock, 0xF0)
2912 EMIT_SINGLEOP(rep, 0xF3)
2913 //EMIT_SINGLEOP(halt, 0xF4)
2914 EMIT_SINGLEOP(cmc, 0xF5)
2915 EMIT_SINGLEOP(stc, 0xF9)
2916 //EMIT_SINGLEOP(cli, 0xFA)
2917 //EMIT_SINGLEOP(sti, 0xFB)
2918 //EMIT_SINGLEOP(std, 0xFD)
2921 * Emits a MOV out, [MEM].
2923 static void bemit_load(const ir_node *node)
2925 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2927 if (out->index == REG_GP_EAX) {
2928 ir_node *base = get_irn_n(node, n_ia32_base);
2929 int has_base = !is_ia32_NoReg_GP(base);
2930 ir_node *idx = get_irn_n(node, n_ia32_index);
2931 int has_index = !is_ia32_NoReg_GP(idx);
2932 if (!has_base && !has_index) {
2933 ir_entity *ent = get_ia32_am_sc(node);
2934 int offs = get_ia32_am_offs_int(node);
2935 /* load from constant address to EAX can be encoded
2938 bemit_entity(ent, 0, offs, false);
2943 bemit_mod_am(reg_gp_map[out->index], node);
2947 * Emits a MOV [mem], in.
2949 static void bemit_store(const ir_node *node)
2951 const ir_node *value = get_irn_n(node, n_ia32_Store_val);
2952 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
2954 if (is_ia32_Immediate(value)) {
2957 bemit_mod_am(0, node);
2958 bemit8(get_ia32_immediate_attr_const(value)->offset);
2959 } else if (size == 16) {
2962 bemit_mod_am(0, node);
2963 bemit16(get_ia32_immediate_attr_const(value)->offset);
2966 bemit_mod_am(0, node);
2967 bemit_immediate(value, false);
2970 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Store_val);
2972 if (in->index == REG_GP_EAX) {
2973 ir_node *base = get_irn_n(node, n_ia32_base);
2974 int has_base = !is_ia32_NoReg_GP(base);
2975 ir_node *idx = get_irn_n(node, n_ia32_index);
2976 int has_index = !is_ia32_NoReg_GP(idx);
2977 if (!has_base && !has_index) {
2978 ir_entity *ent = get_ia32_am_sc(node);
2979 int offs = get_ia32_am_offs_int(node);
2980 /* store to constant address from EAX can be encoded as
2981 * 0xA2/0xA3 [offset]*/
2989 bemit_entity(ent, 0, offs, false);
3001 bemit_mod_am(reg_gp_map[in->index], node);
3005 static void bemit_conv_i2i(const ir_node *node)
3010 ir_mode *const smaller_mode = get_ia32_ls_mode(node);
3011 unsigned opcode = 0xB6;
3012 if (mode_is_signed(smaller_mode)) opcode |= 0x08;
3013 if (get_mode_size_bits(smaller_mode) == 16) opcode |= 0x01;
3014 bemit_0f_unop_reg(node, opcode, n_ia32_Conv_I2I_val);
3017 static void bemit_popcnt(ir_node const *const node)
3020 bemit_0f_unop_reg(node, 0xB8, n_ia32_Popcnt_operand);
3026 static void bemit_push(const ir_node *node)
3028 const ir_node *value = get_irn_n(node, n_ia32_Push_val);
3030 if (is_ia32_Immediate(value)) {
3031 const ia32_immediate_attr_t *attr
3032 = get_ia32_immediate_attr_const(value);
3033 unsigned size = get_signed_imm_size(attr->offset);
3039 bemit8((unsigned char)attr->offset);
3044 bemit_immediate(value, false);
3047 } else if (is_ia32_NoReg_GP(value)) {
3049 bemit_mod_am(6, node);
3051 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_Push_val);
3052 bemit8(0x50 + reg_gp_map[reg->index]);
3059 static void bemit_pop(const ir_node *node)
3061 const arch_register_t *reg = arch_get_irn_register_out(node, pn_ia32_Pop_res);
3062 bemit8(0x58 + reg_gp_map[reg->index]);
3065 static void bemit_popmem(const ir_node *node)
3068 bemit_mod_am(0, node);
3071 static void bemit_call(const ir_node *node)
3073 ir_node *proc = get_irn_n(node, n_ia32_Call_addr);
3075 if (is_ia32_Immediate(proc)) {
3077 bemit_immediate(proc, true);
3079 bemit_unop(node, 0xFF, 2, n_ia32_Call_addr);
3083 static void bemit_jmp(const ir_node *dest_block)
3086 bemit_jmp_destination(dest_block);
3089 static void bemit_jump(const ir_node *node)
3091 if (can_be_fallthrough(node))
3094 bemit_jmp(get_cfop_target_block(node));
3097 static void bemit_jcc(ia32_condition_code_t pnc, const ir_node *dest_block)
3099 unsigned char cc = pnc2cc(pnc);
3102 bemit_jmp_destination(dest_block);
3105 static void bemit_jp(bool odd, const ir_node *dest_block)
3109 bemit_jmp_destination(dest_block);
3112 static void bemit_ia32_jcc(const ir_node *node)
3114 ia32_condition_code_t cc = get_ia32_condcode(node);
3115 const ir_node *proj_true;
3116 const ir_node *proj_false;
3117 const ir_node *dest_true;
3118 const ir_node *dest_false;
3120 cc = determine_final_cc(node, 0, cc);
3122 /* get both Projs */
3123 proj_true = get_proj(node, pn_ia32_Jcc_true);
3124 assert(proj_true && "Jcc without true Proj");
3126 proj_false = get_proj(node, pn_ia32_Jcc_false);
3127 assert(proj_false && "Jcc without false Proj");
3129 if (can_be_fallthrough(proj_true)) {
3130 /* exchange both proj's so the second one can be omitted */
3131 const ir_node *t = proj_true;
3133 proj_true = proj_false;
3135 cc = ia32_negate_condition_code(cc);
3138 dest_true = get_cfop_target_block(proj_true);
3139 dest_false = get_cfop_target_block(proj_false);
3141 if (cc & ia32_cc_float_parity_cases) {
3142 /* Some floating point comparisons require a test of the parity flag,
3143 * which indicates that the result is unordered */
3144 if (cc & ia32_cc_negated) {
3145 bemit_jp(false, dest_true);
3147 /* we need a local label if the false proj is a fallthrough
3148 * as the falseblock might have no label emitted then */
3149 if (can_be_fallthrough(proj_false)) {
3151 bemit8(0x06); // jp + 6
3153 bemit_jp(false, dest_false);
3157 bemit_jcc(cc, dest_true);
3159 /* the second Proj might be a fallthrough */
3160 if (can_be_fallthrough(proj_false)) {
3161 /* it's a fallthrough */
3163 bemit_jmp(dest_false);
3167 static void bemit_switchjmp(const ir_node *node)
3169 ir_entity *jump_table = get_ia32_am_sc(node);
3170 const ir_switch_table *table = get_ia32_switch_table(node);
3172 bemit8(0xFF); // jmp *tbl.label(,%in,4)
3173 bemit_mod_am(0x05, node);
3175 be_emit_jump_table(node, table, jump_table, get_cfop_target_block);
3181 static void bemit_return(const ir_node *node)
3183 unsigned pop = be_Return_get_pop(node);
3184 if (pop > 0 || be_Return_get_emit_pop(node)) {
3186 assert(pop <= 0xffff);
3193 static void bemit_subsp(const ir_node *node)
3195 const arch_register_t *out;
3198 /* mov %esp, %out */
3200 out = arch_get_irn_register_out(node, 1);
3201 bemit8(MOD_REG | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x04));
3204 static void bemit_incsp(const ir_node *node)
3207 const arch_register_t *reg;
3211 offs = be_get_IncSP_offset(node);
3222 size = get_signed_imm_size(offs);
3223 bemit8(size == 1 ? 0x83 : 0x81);
3225 reg = arch_get_irn_register_out(node, 0);
3226 bemit_modru(reg, ext);
3235 static void bemit_copybi(const ir_node *node)
3237 unsigned size = get_ia32_copyb_size(node);
3239 bemit8(0xA4); // movsb
3242 bemit8(0xA5); // movsw
3246 bemit8(0xA5); // movsl
3250 static void bemit_fbinop(ir_node const *const node, unsigned const op_fwd, unsigned const op_rev)
3252 ia32_x87_attr_t const *const attr = get_ia32_x87_attr_const(node);
3253 arch_register_t const *const st0 = &ia32_registers[REG_ST0];
3254 if (get_ia32_op_type(node) == ia32_Normal) {
3255 arch_register_t const *const out = attr->x87[2];
3256 assert(out == attr->x87[0] || out == attr->x87[1]);
3257 assert(!attr->attr.data.ins_permuted);
3259 unsigned char op0 = 0xD8;
3260 if (out != st0) op0 |= 0x04;
3261 if (attr->pop) op0 |= 0x02;
3264 unsigned op = op_rev;
3265 arch_register_t const *reg = attr->x87[0];
3270 bemit8(MOD_REG | ENC_REG(op) | ENC_RM(reg->index));
3272 assert(attr->x87[2] == st0);
3275 unsigned const size = get_mode_size_bits(get_ia32_ls_mode(node));
3276 bemit8(size == 32 ? 0xD8 : 0xDC);
3277 bemit_mod_am(attr->attr.data.ins_permuted ? op_rev : op_fwd, node);
3281 static void bemit_fop_reg(ir_node const *const node, unsigned char const op0, unsigned char const op1)
3284 bemit8(op1 + get_ia32_x87_attr_const(node)->x87[0]->index);
3287 static void bemit_fabs(const ir_node *node)
3295 static void bemit_fadd(const ir_node *node)
3297 bemit_fbinop(node, 0, 0);
3300 static void bemit_fchs(const ir_node *node)
3308 static void bemit_fdiv(const ir_node *node)
3310 bemit_fbinop(node, 6, 7);
3313 static void bemit_ffreep(ir_node const *const node)
3315 bemit_fop_reg(node, 0xDF, 0xC0);
3318 static void bemit_fild(const ir_node *node)
3320 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3322 bemit8(0xDF); // filds
3323 bemit_mod_am(0, node);
3327 bemit8(0xDB); // fildl
3328 bemit_mod_am(0, node);
3332 bemit8(0xDF); // fildll
3333 bemit_mod_am(5, node);
3337 panic("invalid mode size");
3341 static void bemit_fist(const ir_node *node)
3344 unsigned const size = get_mode_size_bits(get_ia32_ls_mode(node));
3346 case 16: bemit8(0xDF); op = 2; break; // fist[p]s
3347 case 32: bemit8(0xDB); op = 2; break; // fist[p]l
3348 case 64: bemit8(0xDF); op = 6; break; // fistpll
3349 default: panic("invalid mode size");
3351 if (get_ia32_x87_attr_const(node)->pop)
3353 // There is only a pop variant for 64 bit integer store.
3354 assert(size < 64 || get_ia32_x87_attr_const(node)->pop);
3355 bemit_mod_am(op, node);
3358 static void bemit_fisttp(ir_node const *const node)
3360 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3361 case 16: bemit8(0xDF); break; // fisttps
3362 case 32: bemit8(0xDB); break; // fisttpl
3363 case 64: bemit8(0xDD); break; // fisttpll
3364 default: panic("Invalid mode size");
3366 bemit_mod_am(1, node);
3369 static void bemit_fld(const ir_node *node)
3371 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3373 bemit8(0xD9); // flds
3374 bemit_mod_am(0, node);
3378 bemit8(0xDD); // fldl
3379 bemit_mod_am(0, node);
3384 bemit8(0xDB); // fldt
3385 bemit_mod_am(5, node);
3389 panic("invalid mode size");
3393 static void bemit_fld1(const ir_node *node)
3397 bemit8(0xE8); // fld1
3400 static void bemit_fldcw(const ir_node *node)
3402 bemit8(0xD9); // fldcw
3403 bemit_mod_am(5, node);
3406 static void bemit_fldz(const ir_node *node)
3410 bemit8(0xEE); // fldz
3413 static void bemit_fmul(const ir_node *node)
3415 bemit_fbinop(node, 1, 1);
3418 static void bemit_fpop(const ir_node *node)
3420 bemit_fop_reg(node, 0xDD, 0xD8);
3423 static void bemit_fpush(const ir_node *node)
3425 bemit_fop_reg(node, 0xD9, 0xC0);
3428 static void bemit_fpushcopy(const ir_node *node)
3430 bemit_fop_reg(node, 0xD9, 0xC0);
3433 static void bemit_fst(const ir_node *node)
3436 unsigned const size = get_mode_size_bits(get_ia32_ls_mode(node));
3438 case 32: bemit8(0xD9); op = 2; break; // fst[p]s
3439 case 64: bemit8(0xDD); op = 2; break; // fst[p]l
3441 case 96: bemit8(0xDB); op = 6; break; // fstpt
3442 default: panic("invalid mode size");
3444 if (get_ia32_x87_attr_const(node)->pop)
3446 // There is only a pop variant for long double store.
3447 assert(size < 80 || get_ia32_x87_attr_const(node)->pop);
3448 bemit_mod_am(op, node);
3451 static void bemit_fsub(const ir_node *node)
3453 bemit_fbinop(node, 4, 5);
3456 static void bemit_fnstcw(const ir_node *node)
3458 bemit8(0xD9); // fnstcw
3459 bemit_mod_am(7, node);
3462 static void bemit_fnstsw(void)
3464 bemit8(0xDF); // fnstsw %ax
3468 static void bemit_ftstfnstsw(const ir_node *node)
3472 bemit8(0xD9); // ftst
3477 static void bemit_fucomi(const ir_node *node)
3479 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3480 bemit8(attr->pop ? 0xDF : 0xDB); // fucom[p]i
3481 bemit8(0xE8 + attr->x87[1]->index);
3484 static void bemit_fucomfnstsw(const ir_node *node)
3486 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3487 bemit8(0xDD); // fucom[p]
3488 bemit8((attr->pop ? 0xE8 : 0xE0) + attr->x87[1]->index);
3492 static void bemit_fucomppfnstsw(const ir_node *node)
3496 bemit8(0xDA); // fucompp
3501 static void bemit_fxch(const ir_node *node)
3503 bemit_fop_reg(node, 0xD9, 0xC8);
3507 * The type of a emitter function.
3509 typedef void (*emit_func) (const ir_node *);
3512 * Set a node emitter. Make it a bit more type safe.
3514 static void register_emitter(ir_op *op, emit_func func)
3516 op->ops.generic = (op_func) func;
3519 static void ia32_register_binary_emitters(void)
3521 /* first clear the generic function pointer for all ops */
3522 ir_clear_opcodes_generic_func();
3524 /* benode emitter */
3525 register_emitter(op_be_Copy, bemit_copy);
3526 register_emitter(op_be_CopyKeep, bemit_copy);
3527 register_emitter(op_be_IncSP, bemit_incsp);
3528 register_emitter(op_be_Perm, bemit_perm);
3529 register_emitter(op_be_Return, bemit_return);
3530 register_emitter(op_ia32_Adc, bemit_adc);
3531 register_emitter(op_ia32_Add, bemit_add);
3532 register_emitter(op_ia32_AddMem, bemit_addmem);
3533 register_emitter(op_ia32_AddMem8Bit, bemit_addmem8bit);
3534 register_emitter(op_ia32_And, bemit_and);
3535 register_emitter(op_ia32_AndMem, bemit_andmem);
3536 register_emitter(op_ia32_AndMem8Bit, bemit_andmem8bit);
3537 register_emitter(op_ia32_Asm, emit_ia32_Asm); // TODO implement binary emitter
3538 register_emitter(op_ia32_Breakpoint, bemit_int3);
3539 register_emitter(op_ia32_Bsf, bemit_bsf);
3540 register_emitter(op_ia32_Bsr, bemit_bsr);
3541 register_emitter(op_ia32_Bswap, bemit_bswap);
3542 register_emitter(op_ia32_Bt, bemit_bt);
3543 register_emitter(op_ia32_CMovcc, bemit_cmovcc);
3544 register_emitter(op_ia32_Call, bemit_call);
3545 register_emitter(op_ia32_Cltd, bemit_cltd);
3546 register_emitter(op_ia32_Cmc, bemit_cmc);
3547 register_emitter(op_ia32_Cmp, bemit_cmp);
3548 register_emitter(op_ia32_Cmp8Bit, bemit_cmp8bit);
3549 register_emitter(op_ia32_Const, bemit_mov_const);
3550 register_emitter(op_ia32_Conv_I2I, bemit_conv_i2i);
3551 register_emitter(op_ia32_Conv_I2I8Bit, bemit_conv_i2i);
3552 register_emitter(op_ia32_CopyB_i, bemit_copybi);
3553 register_emitter(op_ia32_Cwtl, bemit_cwtl);
3554 register_emitter(op_ia32_Dec, bemit_dec);
3555 register_emitter(op_ia32_DecMem, bemit_decmem);
3556 register_emitter(op_ia32_Div, bemit_div);
3557 register_emitter(op_ia32_FldCW, bemit_fldcw);
3558 register_emitter(op_ia32_FnstCW, bemit_fnstcw);
3559 register_emitter(op_ia32_FtstFnstsw, bemit_ftstfnstsw);
3560 register_emitter(op_ia32_FucomFnstsw, bemit_fucomfnstsw);
3561 register_emitter(op_ia32_Fucomi, bemit_fucomi);
3562 register_emitter(op_ia32_FucomppFnstsw, bemit_fucomppfnstsw);
3563 register_emitter(op_ia32_IDiv, bemit_idiv);
3564 register_emitter(op_ia32_IJmp, bemit_ijmp);
3565 register_emitter(op_ia32_IMul, bemit_imul);
3566 register_emitter(op_ia32_IMul1OP, bemit_imul1op);
3567 register_emitter(op_ia32_Inc, bemit_inc);
3568 register_emitter(op_ia32_IncMem, bemit_incmem);
3569 register_emitter(op_ia32_Jcc, bemit_ia32_jcc);
3570 register_emitter(op_ia32_Jmp, bemit_jump);
3571 register_emitter(op_ia32_LdTls, bemit_ldtls);
3572 register_emitter(op_ia32_Lea, bemit_lea);
3573 register_emitter(op_ia32_Leave, bemit_leave);
3574 register_emitter(op_ia32_Load, bemit_load);
3575 register_emitter(op_ia32_Minus64Bit, bemit_minus64bit);
3576 register_emitter(op_ia32_Mul, bemit_mul);
3577 register_emitter(op_ia32_Neg, bemit_neg);
3578 register_emitter(op_ia32_NegMem, bemit_negmem);
3579 register_emitter(op_ia32_Not, bemit_not);
3580 register_emitter(op_ia32_NotMem, bemit_notmem);
3581 register_emitter(op_ia32_Or, bemit_or);
3582 register_emitter(op_ia32_OrMem, bemit_ormem);
3583 register_emitter(op_ia32_OrMem8Bit, bemit_ormem8bit);
3584 register_emitter(op_ia32_Pop, bemit_pop);
3585 register_emitter(op_ia32_PopEbp, bemit_pop);
3586 register_emitter(op_ia32_PopMem, bemit_popmem);
3587 register_emitter(op_ia32_Popcnt, bemit_popcnt);
3588 register_emitter(op_ia32_Push, bemit_push);
3589 register_emitter(op_ia32_RepPrefix, bemit_rep);
3590 register_emitter(op_ia32_Rol, bemit_rol);
3591 register_emitter(op_ia32_RolMem, bemit_rolmem);
3592 register_emitter(op_ia32_Ror, bemit_ror);
3593 register_emitter(op_ia32_RorMem, bemit_rormem);
3594 register_emitter(op_ia32_Sahf, bemit_sahf);
3595 register_emitter(op_ia32_Sar, bemit_sar);
3596 register_emitter(op_ia32_SarMem, bemit_sarmem);
3597 register_emitter(op_ia32_Sbb, bemit_sbb);
3598 register_emitter(op_ia32_Sbb0, bemit_sbb0);
3599 register_emitter(op_ia32_Setcc, bemit_setcc);
3600 register_emitter(op_ia32_Shl, bemit_shl);
3601 register_emitter(op_ia32_ShlD, bemit_shld);
3602 register_emitter(op_ia32_ShlMem, bemit_shlmem);
3603 register_emitter(op_ia32_Shr, bemit_shr);
3604 register_emitter(op_ia32_ShrD, bemit_shrd);
3605 register_emitter(op_ia32_ShrMem, bemit_shrmem);
3606 register_emitter(op_ia32_Stc, bemit_stc);
3607 register_emitter(op_ia32_Store, bemit_store);
3608 register_emitter(op_ia32_Store8Bit, bemit_store);
3609 register_emitter(op_ia32_Sub, bemit_sub);
3610 register_emitter(op_ia32_SubMem, bemit_submem);
3611 register_emitter(op_ia32_SubMem8Bit, bemit_submem8bit);
3612 register_emitter(op_ia32_SubSP, bemit_subsp);
3613 register_emitter(op_ia32_SwitchJmp, bemit_switchjmp);
3614 register_emitter(op_ia32_Test, bemit_test);
3615 register_emitter(op_ia32_Test8Bit, bemit_test8bit);
3616 register_emitter(op_ia32_Xor, bemit_xor);
3617 register_emitter(op_ia32_Xor0, bemit_xor0);
3618 register_emitter(op_ia32_XorMem, bemit_xormem);
3619 register_emitter(op_ia32_XorMem8Bit, bemit_xormem8bit);
3620 register_emitter(op_ia32_fabs, bemit_fabs);
3621 register_emitter(op_ia32_fadd, bemit_fadd);
3622 register_emitter(op_ia32_fchs, bemit_fchs);
3623 register_emitter(op_ia32_fdiv, bemit_fdiv);
3624 register_emitter(op_ia32_ffreep, bemit_ffreep);
3625 register_emitter(op_ia32_fild, bemit_fild);
3626 register_emitter(op_ia32_fist, bemit_fist);
3627 register_emitter(op_ia32_fisttp, bemit_fisttp);
3628 register_emitter(op_ia32_fld, bemit_fld);
3629 register_emitter(op_ia32_fld1, bemit_fld1);
3630 register_emitter(op_ia32_fldz, bemit_fldz);
3631 register_emitter(op_ia32_fmul, bemit_fmul);
3632 register_emitter(op_ia32_fpop, bemit_fpop);
3633 register_emitter(op_ia32_fpush, bemit_fpush);
3634 register_emitter(op_ia32_fpushCopy, bemit_fpushcopy);
3635 register_emitter(op_ia32_fst, bemit_fst);
3636 register_emitter(op_ia32_fsub, bemit_fsub);
3637 register_emitter(op_ia32_fxch, bemit_fxch);
3639 /* ignore the following nodes */
3640 register_emitter(op_ia32_ProduceVal, emit_Nothing);
3641 register_emitter(op_ia32_Unknown, emit_Nothing);
3642 register_emitter(op_be_Keep, emit_Nothing);
3643 register_emitter(op_be_Start, emit_Nothing);
3644 register_emitter(op_Phi, emit_Nothing);
3645 register_emitter(op_Start, emit_Nothing);
3648 static void gen_binary_block(ir_node *block)
3650 ia32_emit_block_header(block);
3652 /* emit the contents of the block */
3653 sched_foreach(block, node) {
3654 ia32_emit_node(node);
3658 void ia32_gen_binary_routine(ir_graph *irg)
3660 ir_entity *entity = get_irg_entity(irg);
3661 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
3662 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
3663 ir_node **blk_sched = irg_data->blk_sched;
3665 parameter_dbg_info_t *infos;
3667 isa = (ia32_isa_t*) arch_env;
3669 ia32_register_binary_emitters();
3671 infos = construct_parameter_infos(irg);
3672 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment,
3676 /* we use links to point to target blocks */
3677 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
3678 irg_block_walk_graph(irg, ia32_gen_labels, NULL, NULL);
3680 /* initialize next block links */
3681 n = ARR_LEN(blk_sched);
3682 for (i = 0; i < n; ++i) {
3683 ir_node *block = blk_sched[i];
3684 ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
3686 set_irn_link(block, prev);
3689 for (i = 0; i < n; ++i) {
3690 ir_node *block = blk_sched[i];
3691 gen_binary_block(block);
3694 be_gas_emit_function_epilog(entity);
3696 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
3700 void ia32_init_emitter(void)
3702 lc_opt_entry_t *be_grp;
3703 lc_opt_entry_t *ia32_grp;
3705 be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
3706 ia32_grp = lc_opt_get_grp(be_grp, "ia32");
3708 lc_opt_add_table(ia32_grp, ia32_emitter_options);
3712 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.emitter");