2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the ia32 node emitter.
23 * @author Christian Wuerdig, Matthias Braun
25 * Summary table for x86 floatingpoint compares:
26 * (remember effect of unordered on x86: ZF=1, PF=1, CF=1)
34 * pnc_Leg => NP (ordered)
56 #include "iredges_t.h"
60 #include "raw_bitset.h"
68 #include "be_dbgout.h"
69 #include "beemitter.h"
72 #include "be_dbgout.h"
74 #include "ia32_emitter.h"
75 #include "ia32_common_transform.h"
76 #include "gen_ia32_emitter.h"
77 #include "gen_ia32_regalloc_if.h"
78 #include "ia32_nodes_attr.h"
79 #include "ia32_new_nodes.h"
80 #include "ia32_architecture.h"
81 #include "bearch_ia32_t.h"
83 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
85 #define SNPRINTF_BUF_LEN 128
87 static const ia32_isa_t *isa;
88 static char pic_base_label[128];
89 static ir_label_t exc_label_id;
90 static int mark_spill_reload = 0;
93 /** Return the next block in Block schedule */
94 static ir_node *get_prev_block_sched(const ir_node *block)
96 return (ir_node*)get_irn_link(block);
99 /** Checks if the current block is a fall-through target. */
100 static int is_fallthrough(const ir_node *cfgpred)
104 if (!is_Proj(cfgpred))
106 pred = get_Proj_pred(cfgpred);
107 if (is_ia32_SwitchJmp(pred))
114 * returns non-zero if the given block needs a label
115 * because of being a jump-target (and not a fall-through)
117 static int block_needs_label(const ir_node *block)
120 int n_cfgpreds = get_Block_n_cfgpreds(block);
122 if (get_Block_entity(block) != NULL)
125 if (n_cfgpreds == 0) {
127 } else if (n_cfgpreds == 1) {
128 ir_node *cfgpred = get_Block_cfgpred(block, 0);
129 ir_node *cfgpred_block = get_nodes_block(cfgpred);
131 if (get_prev_block_sched(block) == cfgpred_block
132 && is_fallthrough(cfgpred)) {
141 * Add a number to a prefix. This number will not be used a second time.
143 static char *get_unique_label(char *buf, size_t buflen, const char *prefix)
145 static unsigned long id = 0;
146 snprintf(buf, buflen, "%s%s%lu", be_gas_get_private_prefix(), prefix, ++id);
151 * Emit the name of the 8bit low register
153 static void emit_8bit_register(const arch_register_t *reg)
155 const char *reg_name = arch_register_get_name(reg);
156 assert(reg->index == REG_GP_EAX || reg->index == REG_GP_EBX
157 || reg->index == REG_GP_ECX || reg->index == REG_GP_EDX);
160 be_emit_char(reg_name[1]); /* get the basic name of the register */
165 * Emit the name of the 8bit high register
167 static void emit_8bit_register_high(const arch_register_t *reg)
169 const char *reg_name = arch_register_get_name(reg);
170 assert(reg->index == REG_GP_EAX || reg->index == REG_GP_EBX
171 || reg->index == REG_GP_ECX || reg->index == REG_GP_EDX);
174 be_emit_char(reg_name[1]); /* get the basic name of the register */
178 static void emit_16bit_register(const arch_register_t *reg)
180 const char *reg_name = arch_register_get_name(reg);
183 be_emit_string(reg_name+1); /* skip the 'e' prefix of the 32bit names */
187 * emit a register, possible shortened by a mode
189 * @param reg the register
190 * @param mode the mode of the register or NULL for full register
192 static void emit_register(const arch_register_t *reg, const ir_mode *mode)
194 const char *reg_name;
197 int size = get_mode_size_bits(mode);
199 case 8: emit_8bit_register(reg); return;
200 case 16: emit_16bit_register(reg); return;
202 assert(mode_is_float(mode) || size == 32);
205 reg_name = arch_register_get_name(reg);
208 be_emit_string(reg_name);
211 void ia32_emit_source_register(const ir_node *node, int pos)
213 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
215 emit_register(reg, NULL);
218 static void ia32_emit_entity(ir_entity *entity, int no_pic_adjust)
220 be_gas_emit_entity(entity);
222 if (get_entity_owner(entity) == get_tls_type()) {
223 if (get_entity_visibility(entity) == ir_visibility_external) {
224 be_emit_cstring("@INDNTPOFF");
226 be_emit_cstring("@NTPOFF");
230 if (do_pic && !no_pic_adjust) {
232 be_emit_string(pic_base_label);
236 static void emit_ia32_Immediate_no_prefix(const ir_node *node)
238 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
240 if (attr->symconst != NULL) {
243 ia32_emit_entity(attr->symconst, attr->no_pic_adjust);
245 if (attr->symconst == NULL || attr->offset != 0) {
246 if (attr->symconst != NULL) {
247 be_emit_irprintf("%+d", attr->offset);
249 be_emit_irprintf("0x%X", attr->offset);
254 static void emit_ia32_Immediate(const ir_node *node)
257 emit_ia32_Immediate_no_prefix(node);
260 void ia32_emit_8bit_source_register_or_immediate(const ir_node *node, int pos)
262 const arch_register_t *reg;
263 const ir_node *in = get_irn_n(node, pos);
264 if (is_ia32_Immediate(in)) {
265 emit_ia32_Immediate(in);
269 reg = arch_get_irn_register_in(node, pos);
270 emit_8bit_register(reg);
273 void ia32_emit_8bit_high_source_register(const ir_node *node, int pos)
275 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
276 emit_8bit_register_high(reg);
279 void ia32_emit_16bit_source_register_or_immediate(const ir_node *node, int pos)
281 const arch_register_t *reg;
282 const ir_node *in = get_irn_n(node, pos);
283 if (is_ia32_Immediate(in)) {
284 emit_ia32_Immediate(in);
288 reg = arch_get_irn_register_in(node, pos);
289 emit_16bit_register(reg);
292 void ia32_emit_dest_register(const ir_node *node, int pos)
294 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
296 emit_register(reg, NULL);
299 void ia32_emit_dest_register_size(const ir_node *node, int pos)
301 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
303 emit_register(reg, get_ia32_ls_mode(node));
306 void ia32_emit_8bit_dest_register(const ir_node *node, int pos)
308 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
310 emit_register(reg, mode_Bu);
313 void ia32_emit_x87_register(const ir_node *node, int pos)
315 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
319 be_emit_string(attr->x87[pos]->name);
322 static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
324 assert(mode_is_int(mode) || mode_is_reference(mode));
325 switch (get_mode_size_bits(mode)) {
326 case 8: be_emit_char('b'); return;
327 case 16: be_emit_char('w'); return;
328 case 32: be_emit_char('l'); return;
329 /* gas docu says q is the suffix but gcc, objdump and icc use ll
331 case 64: be_emit_cstring("ll"); return;
333 panic("Can't output mode_suffix for %+F", mode);
336 void ia32_emit_mode_suffix(const ir_node *node)
338 ir_mode *mode = get_ia32_ls_mode(node);
342 ia32_emit_mode_suffix_mode(mode);
345 void ia32_emit_x87_mode_suffix(const ir_node *node)
349 /* we only need to emit the mode on address mode */
350 if (get_ia32_op_type(node) == ia32_Normal)
353 mode = get_ia32_ls_mode(node);
354 assert(mode != NULL);
356 if (mode_is_float(mode)) {
357 switch (get_mode_size_bits(mode)) {
358 case 32: be_emit_char('s'); return;
359 case 64: be_emit_char('l'); return;
360 /* long doubles have different sizes due to alignment on different
364 case 128: be_emit_char('t'); return;
367 assert(mode_is_int(mode) || mode_is_reference(mode));
368 switch (get_mode_size_bits(mode)) {
369 case 16: be_emit_char('s'); return;
370 case 32: be_emit_char('l'); return;
371 /* gas docu says q is the suffix but gcc, objdump and icc use ll
373 case 64: be_emit_cstring("ll"); return;
376 panic("Can't output mode_suffix for %+F", mode);
379 static char get_xmm_mode_suffix(ir_mode *mode)
381 assert(mode_is_float(mode));
382 switch (get_mode_size_bits(mode)) {
385 default: panic("Invalid XMM mode");
389 void ia32_emit_xmm_mode_suffix(const ir_node *node)
391 ir_mode *mode = get_ia32_ls_mode(node);
392 assert(mode != NULL);
394 be_emit_char(get_xmm_mode_suffix(mode));
397 void ia32_emit_xmm_mode_suffix_s(const ir_node *node)
399 ir_mode *mode = get_ia32_ls_mode(node);
400 assert(mode != NULL);
401 be_emit_char(get_xmm_mode_suffix(mode));
404 void ia32_emit_extend_suffix(const ir_node *node)
406 ir_mode *mode = get_ia32_ls_mode(node);
407 if (get_mode_size_bits(mode) == 32)
409 be_emit_char(mode_is_signed(mode) ? 's' : 'z');
410 ia32_emit_mode_suffix_mode(mode);
413 void ia32_emit_source_register_or_immediate(const ir_node *node, int pos)
415 ir_node *in = get_irn_n(node, pos);
416 if (is_ia32_Immediate(in)) {
417 emit_ia32_Immediate(in);
419 const ir_mode *mode = get_ia32_ls_mode(node);
420 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
421 emit_register(reg, mode);
426 * Returns the target block for a control flow node.
428 static ir_node *get_cfop_target_block(const ir_node *irn)
430 assert(get_irn_mode(irn) == mode_X);
431 return (ir_node*)get_irn_link(irn);
435 * Emits the target label for a control flow node.
437 static void ia32_emit_cfop_target(const ir_node *node)
439 ir_node *block = get_cfop_target_block(node);
440 be_gas_emit_block_name(block);
444 * Emit the suffix for a compare instruction.
446 static void ia32_emit_condition_code(ia32_condition_code_t cc)
449 case ia32_cc_overflow: be_emit_cstring("o"); return;
450 case ia32_cc_not_overflow: be_emit_cstring("no"); return;
451 case ia32_cc_float_below:
452 case ia32_cc_float_unordered_below:
453 case ia32_cc_below: be_emit_cstring("b"); return;
454 case ia32_cc_float_above_equal:
455 case ia32_cc_float_unordered_above_equal:
456 case ia32_cc_above_equal: be_emit_cstring("ae"); return;
457 case ia32_cc_float_equal:
458 case ia32_cc_equal: be_emit_cstring("e"); return;
459 case ia32_cc_float_not_equal:
460 case ia32_cc_not_equal: be_emit_cstring("ne"); return;
461 case ia32_cc_float_below_equal:
462 case ia32_cc_float_unordered_below_equal:
463 case ia32_cc_below_equal: be_emit_cstring("be"); return;
464 case ia32_cc_float_above:
465 case ia32_cc_float_unordered_above:
466 case ia32_cc_above: be_emit_cstring("a"); return;
467 case ia32_cc_sign: be_emit_cstring("s"); return;
468 case ia32_cc_not_sign: be_emit_cstring("ns"); return;
469 case ia32_cc_parity: be_emit_cstring("p"); return;
470 case ia32_cc_not_parity: be_emit_cstring("np"); return;
471 case ia32_cc_less: be_emit_cstring("l"); return;
472 case ia32_cc_greater_equal: be_emit_cstring("ge"); return;
473 case ia32_cc_less_equal: be_emit_cstring("le"); return;
474 case ia32_cc_greater: be_emit_cstring("g"); return;
475 case ia32_cc_float_parity_cases:
476 case ia32_cc_additional_float_cases:
479 panic("Invalid ia32 condition code");
482 typedef enum ia32_emit_mod_t {
484 EMIT_RESPECT_LS = 1U << 0,
485 EMIT_ALTERNATE_AM = 1U << 1,
487 EMIT_HIGH_REG = 1U << 3,
488 EMIT_LOW_REG = 1U << 4
490 ENUM_BITSET(ia32_emit_mod_t)
493 * Emits address mode.
495 void ia32_emit_am(const ir_node *node)
497 ir_entity *ent = get_ia32_am_sc(node);
498 int offs = get_ia32_am_offs_int(node);
499 ir_node *base = get_irn_n(node, n_ia32_base);
500 int has_base = !is_ia32_NoReg_GP(base);
501 ir_node *idx = get_irn_n(node, n_ia32_index);
502 int has_index = !is_ia32_NoReg_GP(idx);
504 /* just to be sure... */
505 assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
507 if (get_ia32_am_tls_segment(node))
508 be_emit_cstring("%gs:");
512 const ia32_attr_t *attr = get_ia32_attr_const(node);
513 if (is_ia32_am_sc_sign(node))
515 ia32_emit_entity(ent, attr->data.am_sc_no_pic_adjust);
518 /* also handle special case if nothing is set */
519 if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
521 be_emit_irprintf("%+d", offs);
523 be_emit_irprintf("%d", offs);
527 if (has_base || has_index) {
532 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_base);
533 emit_register(reg, NULL);
536 /* emit index + scale */
538 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_index);
541 emit_register(reg, NULL);
543 scale = get_ia32_am_scale(node);
545 be_emit_irprintf(",%d", 1 << scale);
553 * fmt parameter output
554 * ---- ---------------------- ---------------------------------------------
556 * %AM <node> address mode of the node
557 * %AR const arch_register_t* address mode of the node or register
558 * %ASx <node> address mode of the node or source register x
559 * %Dx <node> destination register x
560 * %I <node> immediate of the node
561 * %L <node> control flow target of the node
562 * %M <node> mode suffix of the node
563 * %P int condition code
564 * %R const arch_register_t* register
565 * %Sx <node> source register x
566 * %s const char* string
567 * %u unsigned int unsigned int
568 * %d signed int signed int
571 * # modifier for %ASx, %D, %R, and %S uses ls mode of node to alter register width
572 * * modifier does not prefix immediates with $, but AM with *
573 * l modifier for %lu and %ld
574 * > modifier to output high 8bit register (ah, bh)
575 * < modifier to output low 8bit register (al, bl)
577 static void ia32_emitf(const ir_node *node, const char *fmt, ...)
583 const char *start = fmt;
584 ia32_emit_mod_t mod = EMIT_NONE;
586 while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
589 be_emit_string_len(start, fmt - start);
593 be_emit_finish_line_gas(node);
606 case '*': mod |= EMIT_ALTERNATE_AM; break;
607 case '#': mod |= EMIT_RESPECT_LS; break;
608 case 'l': mod |= EMIT_LONG; break;
609 case '>': mod |= EMIT_HIGH_REG; break;
610 case '<': mod |= EMIT_LOW_REG; break;
619 arch_register_t const *reg;
630 if (mod & EMIT_ALTERNATE_AM)
636 reg = va_arg(ap, const arch_register_t*);
637 if (get_ia32_op_type(node) == ia32_AddrModeS) {
644 if (get_ia32_op_type(node) == ia32_AddrModeS) {
648 assert(get_ia32_op_type(node) == ia32_Normal);
652 default: goto unknown;
658 if (*fmt < '0' || '9' <= *fmt)
660 reg = arch_get_irn_register_out(node, *fmt++ - '0');
666 if (!(mod & EMIT_ALTERNATE_AM))
668 emit_ia32_Immediate_no_prefix(imm);
672 ia32_emit_cfop_target(node);
676 ia32_emit_mode_suffix_mode(get_ia32_ls_mode(node));
680 ia32_condition_code_t cc = va_arg(ap, ia32_condition_code_t);
681 ia32_emit_condition_code(cc);
686 reg = va_arg(ap, const arch_register_t*);
688 if (mod & EMIT_ALTERNATE_AM)
690 if (mod & EMIT_HIGH_REG) {
691 emit_8bit_register_high(reg);
692 } else if (mod & EMIT_LOW_REG) {
693 emit_8bit_register(reg);
695 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
703 if (*fmt < '0' || '9' <= *fmt)
707 imm = get_irn_n(node, pos);
708 if (is_ia32_Immediate(imm)) {
711 reg = arch_get_irn_register_in(node, pos);
717 const char *str = va_arg(ap, const char*);
723 if (mod & EMIT_LONG) {
724 unsigned long num = va_arg(ap, unsigned long);
725 be_emit_irprintf("%lu", num);
727 unsigned num = va_arg(ap, unsigned);
728 be_emit_irprintf("%u", num);
733 if (mod & EMIT_LONG) {
734 long num = va_arg(ap, long);
735 be_emit_irprintf("%ld", num);
737 int num = va_arg(ap, int);
738 be_emit_irprintf("%d", num);
744 panic("unknown format conversion in ia32_emitf()");
752 * Emits registers and/or address mode of a binary operation.
754 void ia32_emit_binop(const ir_node *node)
756 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
757 ia32_emitf(node, "%#S4, %#AS3");
759 ia32_emitf(node, "%#AS4, %#S3");
764 * Emits registers and/or address mode of a binary operation.
766 void ia32_emit_x87_binop(const ir_node *node)
768 switch (get_ia32_op_type(node)) {
771 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
772 const arch_register_t *in1 = x87_attr->x87[0];
773 const arch_register_t *in = x87_attr->x87[1];
774 const arch_register_t *out = x87_attr->x87[2];
778 } else if (out == in) {
783 be_emit_string(arch_register_get_name(in));
784 be_emit_cstring(", %");
785 be_emit_string(arch_register_get_name(out));
793 assert(0 && "unsupported op type");
798 * Emits registers and/or address mode of a unary operation.
800 void ia32_emit_unop(const ir_node *node, int pos)
804 ia32_emitf(node, fmt);
807 static void emit_ia32_IMul(const ir_node *node)
809 ir_node *left = get_irn_n(node, n_ia32_IMul_left);
810 const arch_register_t *out_reg = arch_get_irn_register_out(node, pn_ia32_IMul_res);
812 /* do we need the 3-address form? */
813 if (is_ia32_NoReg_GP(left) ||
814 arch_get_irn_register_in(node, n_ia32_IMul_left) != out_reg) {
815 ia32_emitf(node, "\timul%M %#S4, %#AS3, %#D0\n");
817 ia32_emitf(node, "\timul%M %#AS4, %#S3\n");
822 * walks up a tree of copies/perms/spills/reloads to find the original value
823 * that is moved around
825 static ir_node *find_original_value(ir_node *node)
827 if (irn_visited(node))
830 mark_irn_visited(node);
831 if (be_is_Copy(node)) {
832 return find_original_value(be_get_Copy_op(node));
833 } else if (be_is_CopyKeep(node)) {
834 return find_original_value(be_get_CopyKeep_op(node));
835 } else if (is_Proj(node)) {
836 ir_node *pred = get_Proj_pred(node);
837 if (be_is_Perm(pred)) {
838 return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
839 } else if (be_is_MemPerm(pred)) {
840 return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
841 } else if (is_ia32_Load(pred)) {
842 return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
843 } else if (is_ia32_Store(pred)) {
844 return find_original_value(get_irn_n(pred, n_ia32_Store_val));
848 } else if (is_Phi(node)) {
850 arity = get_irn_arity(node);
851 for (i = 0; i < arity; ++i) {
852 ir_node *in = get_irn_n(node, i);
853 ir_node *res = find_original_value(in);
864 static int determine_final_cc(const ir_node *node, int flags_pos, int cc)
866 ir_node *flags = get_irn_n(node, flags_pos);
867 const ia32_attr_t *flags_attr;
868 flags = skip_Proj(flags);
870 if (is_ia32_Sahf(flags)) {
871 ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
872 if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
873 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
874 inc_irg_visited(current_ir_graph);
875 cmp = find_original_value(cmp);
877 assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
878 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
881 flags_attr = get_ia32_attr_const(cmp);
883 flags_attr = get_ia32_attr_const(flags);
886 if (flags_attr->data.ins_permuted)
887 cc = ia32_invert_condition_code(cc);
891 void ia32_emit_cmp_suffix_node(const ir_node *node, int flags_pos)
893 ia32_condition_code_t cc = get_ia32_condcode(node);
894 cc = determine_final_cc(node, flags_pos, cc);
896 ia32_emit_condition_code(cc);
900 * Emits an exception label for a given node.
902 static void ia32_emit_exc_label(const ir_node *node)
904 be_emit_string(be_gas_insn_label_prefix());
905 be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
909 * Returns the Proj with projection number proj and NOT mode_M
911 static ir_node *get_proj(const ir_node *node, long proj)
913 const ir_edge_t *edge;
916 assert(get_irn_mode(node) == mode_T && "expected mode_T node");
918 foreach_out_edge(node, edge) {
919 src = get_edge_src_irn(edge);
921 assert(is_Proj(src) && "Proj expected");
922 if (get_irn_mode(src) == mode_M)
925 if (get_Proj_proj(src) == proj)
931 static int can_be_fallthrough(const ir_node *node)
933 ir_node *target_block = get_cfop_target_block(node);
934 ir_node *block = get_nodes_block(node);
935 return get_prev_block_sched(target_block) == block;
939 * Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
941 static void emit_ia32_Jcc(const ir_node *node)
943 int need_parity_label = 0;
944 ia32_condition_code_t cc = get_ia32_condcode(node);
945 const ir_node *proj_true;
946 const ir_node *proj_false;
948 cc = determine_final_cc(node, 0, cc);
951 proj_true = get_proj(node, pn_ia32_Jcc_true);
952 assert(proj_true && "Jcc without true Proj");
954 proj_false = get_proj(node, pn_ia32_Jcc_false);
955 assert(proj_false && "Jcc without false Proj");
957 if (can_be_fallthrough(proj_true)) {
958 /* exchange both proj's so the second one can be omitted */
959 const ir_node *t = proj_true;
961 proj_true = proj_false;
963 cc = ia32_negate_condition_code(cc);
966 if (cc & ia32_cc_float_parity_cases) {
967 /* Some floating point comparisons require a test of the parity flag,
968 * which indicates that the result is unordered */
969 if (cc & ia32_cc_negated) {
970 ia32_emitf(proj_true, "\tjp %L\n");
972 /* we need a local label if the false proj is a fallthrough
973 * as the falseblock might have no label emitted then */
974 if (can_be_fallthrough(proj_false)) {
975 need_parity_label = 1;
976 ia32_emitf(proj_false, "\tjp 1f\n");
978 ia32_emitf(proj_false, "\tjp %L\n");
982 ia32_emitf(proj_true, "\tj%P %L\n", cc);
983 if (need_parity_label) {
984 ia32_emitf(NULL, "1:\n");
987 /* the second Proj might be a fallthrough */
988 if (can_be_fallthrough(proj_false)) {
989 ia32_emitf(proj_false, "\t/* fallthrough to %L */\n");
991 ia32_emitf(proj_false, "\tjmp %L\n");
996 * Emits an ia32 Setcc. This is mostly easy but some floating point compares
999 static void emit_ia32_Setcc(const ir_node *node)
1001 const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
1003 ia32_condition_code_t cc = get_ia32_condcode(node);
1004 cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
1005 if (cc & ia32_cc_float_parity_cases) {
1006 if (cc & ia32_cc_negated) {
1007 ia32_emitf(node, "\tset%P %<R\n", cc, dreg);
1008 ia32_emitf(node, "\tsetp %>R\n", dreg);
1009 ia32_emitf(node, "\torb %>R, %<R\n", dreg, dreg);
1011 ia32_emitf(node, "\tset%P %<R\n", cc, dreg);
1012 ia32_emitf(node, "\tsetnp %>R\n", dreg);
1013 ia32_emitf(node, "\tandb %>R, %<R\n", dreg, dreg);
1016 ia32_emitf(node, "\tset%P %#R\n", cc, dreg);
1020 static void emit_ia32_CMovcc(const ir_node *node)
1022 const ia32_attr_t *attr = get_ia32_attr_const(node);
1023 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
1024 ia32_condition_code_t cc = get_ia32_condcode(node);
1025 const arch_register_t *in_true;
1026 const arch_register_t *in_false;
1028 cc = determine_final_cc(node, n_ia32_CMovcc_eflags, cc);
1029 /* although you can't set ins_permuted in the constructor it might still
1030 * be set by memory operand folding
1031 * Permuting inputs of a cmov means the condition is negated!
1033 if (attr->data.ins_permuted)
1034 cc = ia32_negate_condition_code(cc);
1036 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
1037 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
1039 /* should be same constraint fullfilled? */
1040 if (out == in_false) {
1041 /* yes -> nothing to do */
1042 } else if (out == in_true) {
1043 const arch_register_t *tmp;
1045 assert(get_ia32_op_type(node) == ia32_Normal);
1047 cc = ia32_negate_condition_code(cc);
1054 ia32_emitf(node, "\tmovl %R, %R\n", in_false, out);
1057 if (cc & ia32_cc_float_parity_cases) {
1058 panic("CMov with floatingpoint compare/parity not supported yet");
1061 ia32_emitf(node, "\tcmov%P %#AR, %#R\n", cc, in_true, out);
1065 * Emits code for a SwitchJmp
1067 static void emit_ia32_SwitchJmp(const ir_node *node)
1069 ir_entity *jump_table = get_ia32_am_sc(node);
1070 const ir_switch_table *table = get_ia32_switch_table(node);
1072 ia32_emitf(node, "\tjmp %*AM\n");
1073 be_emit_jump_table(node, table, jump_table, get_cfop_target_block);
1077 * Emits code for a unconditional jump.
1079 static void emit_ia32_Jmp(const ir_node *node)
1081 /* we have a block schedule */
1082 if (can_be_fallthrough(node)) {
1083 ia32_emitf(node, "\t/* fallthrough to %L */\n");
1085 ia32_emitf(node, "\tjmp %L\n");
1090 * Emit an inline assembler operand.
1092 * @param node the ia32_ASM node
1093 * @param s points to the operand (a %c)
1095 * @return pointer to the first char in s NOT in the current operand
1097 static const char* emit_asm_operand(const ir_node *node, const char *s)
1099 const ia32_attr_t *ia32_attr = get_ia32_attr_const(node);
1100 const ia32_asm_attr_t *attr = CONST_CAST_IA32_ATTR(ia32_asm_attr_t,
1102 const arch_register_t *reg;
1103 const ia32_asm_reg_t *asm_regs = attr->register_map;
1104 const ia32_asm_reg_t *asm_reg;
1113 /* parse modifiers */
1116 ir_fprintf(stderr, "Warning: asm text (%+F) ends with %%\n", node);
1141 "Warning: asm text (%+F) contains unknown modifier '%c' for asm op\n",
1148 if (sscanf(s, "%d%n", &num, &p) != 1) {
1149 ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
1156 if (num < 0 || ARR_LEN(asm_regs) <= (size_t)num) {
1158 "Error: Custom assembler references invalid input/output (%+F)\n",
1162 asm_reg = & asm_regs[num];
1163 assert(asm_reg->valid);
1166 if (asm_reg->use_input == 0) {
1167 reg = arch_get_irn_register_out(node, asm_reg->inout_pos);
1169 ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
1171 /* might be an immediate value */
1172 if (is_ia32_Immediate(pred)) {
1173 emit_ia32_Immediate(pred);
1176 reg = arch_get_irn_register_in(node, asm_reg->inout_pos);
1180 "Warning: no register assigned for %d asm op (%+F)\n",
1185 if (asm_reg->memory) {
1190 if (modifier != 0) {
1193 emit_8bit_register(reg);
1196 emit_8bit_register_high(reg);
1199 emit_16bit_register(reg);
1202 panic("Invalid asm op modifier");
1205 emit_register(reg, asm_reg->memory ? mode_Iu : asm_reg->mode);
1208 if (asm_reg->memory) {
1216 * Emits code for an ASM pseudo op.
1218 static void emit_ia32_Asm(const ir_node *node)
1220 const void *gen_attr = get_irn_generic_attr_const(node);
1221 const ia32_asm_attr_t *attr
1222 = CONST_CAST_IA32_ATTR(ia32_asm_attr_t, gen_attr);
1223 ident *asm_text = attr->asm_text;
1224 const char *s = get_id_str(asm_text);
1226 ia32_emitf(node, "#APP\t\n");
1233 s = emit_asm_operand(node, s);
1239 ia32_emitf(NULL, "\n#NO_APP\n");
1244 * Emit movsb/w instructions to make mov count divideable by 4
1246 static void emit_CopyB_prolog(unsigned size)
1249 ia32_emitf(NULL, "\tmovsb\n");
1251 ia32_emitf(NULL, "\tmovsw\n");
1255 * Emit rep movsd instruction for memcopy.
1257 static void emit_ia32_CopyB(const ir_node *node)
1259 unsigned size = get_ia32_copyb_size(node);
1261 emit_CopyB_prolog(size);
1262 ia32_emitf(node, "\trep movsd\n");
1266 * Emits unrolled memcopy.
1268 static void emit_ia32_CopyB_i(const ir_node *node)
1270 unsigned size = get_ia32_copyb_size(node);
1272 emit_CopyB_prolog(size);
1276 ia32_emitf(NULL, "\tmovsd\n");
1282 * Emit code for conversions (I, FP), (FP, I) and (FP, FP).
1284 static void emit_ia32_Conv_with_FP(const ir_node *node, const char* conv_f,
1287 ir_mode *ls_mode = get_ia32_ls_mode(node);
1288 int ls_bits = get_mode_size_bits(ls_mode);
1289 const char *conv = ls_bits == 32 ? conv_f : conv_d;
1291 ia32_emitf(node, "\tcvt%s %AS3, %D0\n", conv);
1294 static void emit_ia32_Conv_I2FP(const ir_node *node)
1296 emit_ia32_Conv_with_FP(node, "si2ss", "si2sd");
1299 static void emit_ia32_Conv_FP2I(const ir_node *node)
1301 emit_ia32_Conv_with_FP(node, "ss2si", "sd2si");
1304 static void emit_ia32_Conv_FP2FP(const ir_node *node)
1306 emit_ia32_Conv_with_FP(node, "sd2ss", "ss2sd");
1310 * Emits code for an Int conversion.
1312 static void emit_ia32_Conv_I2I(const ir_node *node)
1314 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1315 int signed_mode = mode_is_signed(smaller_mode);
1316 const char *sign_suffix;
1318 assert(!mode_is_float(smaller_mode));
1320 sign_suffix = signed_mode ? "s" : "z";
1321 ia32_emitf(node, "\tmov%s%Ml %#AS3, %D0\n", sign_suffix);
1327 static void emit_ia32_Call(const ir_node *node)
1329 /* Special case: Call must not have its immediates prefixed by $, instead
1330 * address mode is prefixed by *. */
1331 ia32_emitf(node, "\tcall %*AS3\n");
1336 * Emits code to increase stack pointer.
1338 static void emit_be_IncSP(const ir_node *node)
1340 int offs = be_get_IncSP_offset(node);
1346 ia32_emitf(node, "\tsubl $%u, %D0\n", offs);
1348 ia32_emitf(node, "\taddl $%u, %D0\n", -offs);
1353 * Emits code for Copy/CopyKeep.
1355 static void Copy_emitter(const ir_node *node, const ir_node *op)
1357 const arch_register_t *in = arch_get_irn_register(op);
1358 const arch_register_t *out = arch_get_irn_register(node);
1363 /* copies of vf nodes aren't real... */
1364 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
1367 ia32_emitf(node, "\tmovl %R, %R\n", in, out);
1370 static void emit_be_Copy(const ir_node *node)
1372 Copy_emitter(node, be_get_Copy_op(node));
1375 static void emit_be_CopyKeep(const ir_node *node)
1377 Copy_emitter(node, be_get_CopyKeep_op(node));
1381 * Emits code for exchange.
1383 static void emit_be_Perm(const ir_node *node)
1385 const arch_register_t *in0, *in1;
1386 const arch_register_class_t *cls0, *cls1;
1388 in0 = arch_get_irn_register(get_irn_n(node, 0));
1389 in1 = arch_get_irn_register(get_irn_n(node, 1));
1391 cls0 = arch_register_get_class(in0);
1392 cls1 = arch_register_get_class(in1);
1394 assert(cls0 == cls1 && "Register class mismatch at Perm");
1396 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
1397 ia32_emitf(node, "\txchg %R, %R\n", in1, in0);
1398 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
1399 ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
1400 ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
1401 ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
1402 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
1404 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
1407 panic("unexpected register class in be_Perm (%+F)", node);
1412 * Emits code for Constant loading.
1414 static void emit_ia32_Const(const ir_node *node)
1416 ia32_emitf(node, "\tmovl %I, %D0\n");
1419 /* helper function for emit_ia32_Minus64Bit */
1420 static void emit_mov(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1422 ia32_emitf(node, "\tmovl %R, %R\n", src, dst);
1425 /* helper function for emit_ia32_Minus64Bit */
1426 static void emit_neg(const ir_node* node, const arch_register_t *reg)
1428 ia32_emitf(node, "\tnegl %R\n", reg);
1431 /* helper function for emit_ia32_Minus64Bit */
1432 static void emit_sbb0(const ir_node* node, const arch_register_t *reg)
1434 ia32_emitf(node, "\tsbbl $0, %R\n", reg);
1437 /* helper function for emit_ia32_Minus64Bit */
1438 static void emit_sbb(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1440 ia32_emitf(node, "\tsbbl %R, %R\n", src, dst);
1443 /* helper function for emit_ia32_Minus64Bit */
1444 static void emit_xchg(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1446 ia32_emitf(node, "\txchgl %R, %R\n", src, dst);
1449 /* helper function for emit_ia32_Minus64Bit */
1450 static void emit_zero(const ir_node* node, const arch_register_t *reg)
1452 ia32_emitf(node, "\txorl %R, %R\n", reg, reg);
1455 static void emit_ia32_Minus64Bit(const ir_node *node)
1457 const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
1458 const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
1459 const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
1460 const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
1462 if (out_lo == in_lo) {
1463 if (out_hi != in_hi) {
1464 /* a -> a, b -> d */
1467 /* a -> a, b -> b */
1470 } else if (out_lo == in_hi) {
1471 if (out_hi == in_lo) {
1472 /* a -> b, b -> a */
1473 emit_xchg(node, in_lo, in_hi);
1476 /* a -> b, b -> d */
1477 emit_mov(node, in_hi, out_hi);
1478 emit_mov(node, in_lo, out_lo);
1482 if (out_hi == in_lo) {
1483 /* a -> c, b -> a */
1484 emit_mov(node, in_lo, out_lo);
1486 } else if (out_hi == in_hi) {
1487 /* a -> c, b -> b */
1488 emit_mov(node, in_lo, out_lo);
1491 /* a -> c, b -> d */
1492 emit_mov(node, in_lo, out_lo);
1498 emit_neg( node, out_hi);
1499 emit_neg( node, out_lo);
1500 emit_sbb0(node, out_hi);
1504 emit_zero(node, out_hi);
1505 emit_neg( node, out_lo);
1506 emit_sbb( node, in_hi, out_hi);
1509 static void emit_ia32_GetEIP(const ir_node *node)
1511 ia32_emitf(node, "\tcall %s\n", pic_base_label);
1512 ia32_emitf(NULL, "%s:\n", pic_base_label);
1513 ia32_emitf(node, "\tpopl %D0\n");
1516 static void emit_ia32_ClimbFrame(const ir_node *node)
1518 const ia32_climbframe_attr_t *attr = get_ia32_climbframe_attr_const(node);
1520 ia32_emitf(node, "\tmovl %S0, %D0\n");
1521 ia32_emitf(node, "\tmovl $%u, %S1\n", attr->count);
1522 be_gas_emit_block_name(node);
1523 be_emit_cstring(":\n");
1524 be_emit_write_line();
1525 ia32_emitf(node, "\tmovl (%D0), %D0\n");
1526 ia32_emitf(node, "\tdec %S1\n");
1527 be_emit_cstring("\tjnz ");
1528 be_gas_emit_block_name(node);
1529 be_emit_finish_line_gas(node);
1532 static void emit_be_Return(const ir_node *node)
1534 unsigned pop = be_Return_get_pop(node);
1536 if (pop > 0 || be_Return_get_emit_pop(node)) {
1537 ia32_emitf(node, "\tret $%u\n", pop);
1539 ia32_emitf(node, "\tret\n");
1543 static void emit_Nothing(const ir_node *node)
1550 * Enters the emitter functions for handled nodes into the generic
1551 * pointer of an opcode.
1553 static void ia32_register_emitters(void)
1555 #define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
1556 #define IA32_EMIT(a) IA32_EMIT2(a,a)
1557 #define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
1558 #define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
1559 #define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
1560 #define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
1562 /* first clear the generic function pointer for all ops */
1563 clear_irp_opcodes_generic_func();
1565 /* register all emitter functions defined in spec */
1566 ia32_register_spec_emitters();
1568 /* other ia32 emitter functions */
1569 IA32_EMIT2(Conv_I2I8Bit, Conv_I2I);
1574 IA32_EMIT(Conv_FP2FP);
1575 IA32_EMIT(Conv_FP2I);
1576 IA32_EMIT(Conv_I2FP);
1577 IA32_EMIT(Conv_I2I);
1584 IA32_EMIT(Minus64Bit);
1585 IA32_EMIT(SwitchJmp);
1586 IA32_EMIT(ClimbFrame);
1589 /* benode emitter */
1609 typedef void (*emit_func_ptr) (const ir_node *);
1612 * Assign and emit an exception label if the current instruction can fail.
1614 static void ia32_assign_exc_label(ir_node *node)
1616 /* assign a new ID to the instruction */
1617 set_ia32_exc_label_id(node, ++exc_label_id);
1619 ia32_emit_exc_label(node);
1621 be_emit_pad_comment();
1622 be_emit_cstring("/* exception to Block ");
1623 ia32_emit_cfop_target(node);
1624 be_emit_cstring(" */\n");
1625 be_emit_write_line();
1629 * Emits code for a node.
1631 static void ia32_emit_node(ir_node *node)
1633 ir_op *op = get_irn_op(node);
1635 DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
1637 if (is_ia32_irn(node)) {
1638 if (get_ia32_exc_label(node)) {
1639 /* emit the exception label of this instruction */
1640 ia32_assign_exc_label(node);
1642 if (mark_spill_reload) {
1643 if (is_ia32_is_spill(node)) {
1644 ia32_emitf(NULL, "\txchg %ebx, %ebx /* spill mark */\n");
1646 if (is_ia32_is_reload(node)) {
1647 ia32_emitf(NULL, "\txchg %edx, %edx /* reload mark */\n");
1649 if (is_ia32_is_remat(node)) {
1650 ia32_emitf(NULL, "\txchg %ecx, %ecx /* remat mark */\n");
1654 if (op->ops.generic) {
1655 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
1657 be_dbg_set_dbg_info(get_irn_dbg_info(node));
1662 ir_fprintf(stderr, "Error: No emit handler for node %+F (%+G, graph %+F)\n", node, node, current_ir_graph);
1668 * Emits gas alignment directives
1670 static void ia32_emit_alignment(unsigned align, unsigned skip)
1672 ia32_emitf(NULL, "\t.p2align %u,,%u\n", align, skip);
1676 * Emits gas alignment directives for Labels depended on cpu architecture.
1678 static void ia32_emit_align_label(void)
1680 unsigned align = ia32_cg_config.label_alignment;
1681 unsigned maximum_skip = ia32_cg_config.label_alignment_max_skip;
1682 ia32_emit_alignment(align, maximum_skip);
1686 * Test whether a block should be aligned.
1687 * For cpus in the P4/Athlon class it is useful to align jump labels to
1688 * 16 bytes. However we should only do that if the alignment nops before the
1689 * label aren't executed more often than we have jumps to the label.
1691 static int should_align_block(const ir_node *block)
1693 static const double DELTA = .0001;
1694 ir_graph *irg = get_irn_irg(block);
1695 ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
1696 ir_node *prev = get_prev_block_sched(block);
1698 double prev_freq = 0; /**< execfreq of the fallthrough block */
1699 double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
1702 if (exec_freq == NULL)
1704 if (ia32_cg_config.label_alignment_factor <= 0)
1707 block_freq = get_block_execfreq(exec_freq, block);
1708 if (block_freq < DELTA)
1711 n_cfgpreds = get_Block_n_cfgpreds(block);
1712 for (i = 0; i < n_cfgpreds; ++i) {
1713 const ir_node *pred = get_Block_cfgpred_block(block, i);
1714 double pred_freq = get_block_execfreq(exec_freq, pred);
1717 prev_freq += pred_freq;
1719 jmp_freq += pred_freq;
1723 if (prev_freq < DELTA && !(jmp_freq < DELTA))
1726 jmp_freq /= prev_freq;
1728 return jmp_freq > ia32_cg_config.label_alignment_factor;
1732 * Emit the block header for a block.
1734 * @param block the block
1735 * @param prev_block the previous block
1737 static void ia32_emit_block_header(ir_node *block)
1739 ir_graph *irg = current_ir_graph;
1740 int need_label = block_needs_label(block);
1741 ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
1744 if (block == get_irg_end_block(irg))
1747 if (ia32_cg_config.label_alignment > 0) {
1748 /* align the current block if:
1749 * a) if should be aligned due to its execution frequency
1750 * b) there is no fall-through here
1752 if (should_align_block(block)) {
1753 ia32_emit_align_label();
1755 /* if the predecessor block has no fall-through,
1756 we can always align the label. */
1758 int has_fallthrough = 0;
1760 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1761 ir_node *cfg_pred = get_Block_cfgpred(block, i);
1762 if (can_be_fallthrough(cfg_pred)) {
1763 has_fallthrough = 1;
1768 if (!has_fallthrough)
1769 ia32_emit_align_label();
1774 be_gas_emit_block_name(block);
1777 be_emit_pad_comment();
1778 be_emit_cstring(" /* ");
1780 be_emit_cstring("\t/* ");
1781 be_gas_emit_block_name(block);
1782 be_emit_cstring(": ");
1785 be_emit_cstring("preds:");
1787 /* emit list of pred blocks in comment */
1788 arity = get_irn_arity(block);
1790 be_emit_cstring(" none");
1793 for (i = 0; i < arity; ++i) {
1794 ir_node *predblock = get_Block_cfgpred_block(block, i);
1795 be_emit_irprintf(" %d", get_irn_node_nr(predblock));
1798 if (exec_freq != NULL) {
1799 be_emit_irprintf(", freq: %f",
1800 get_block_execfreq(exec_freq, block));
1802 be_emit_cstring(" */\n");
1803 be_emit_write_line();
1807 * Walks over the nodes in a block connected by scheduling edges
1808 * and emits code for each node.
1810 static void ia32_gen_block(ir_node *block)
1814 ia32_emit_block_header(block);
1816 /* emit the contents of the block */
1817 be_dbg_set_dbg_info(get_irn_dbg_info(block));
1818 sched_foreach(block, node) {
1819 ia32_emit_node(node);
1823 typedef struct exc_entry {
1824 ir_node *exc_instr; /** The instruction that can issue an exception. */
1825 ir_node *block; /** The block to call then. */
1830 * Sets labels for control flow nodes (jump target).
1831 * Links control predecessors to there destination blocks.
1833 static void ia32_gen_labels(ir_node *block, void *data)
1835 exc_entry **exc_list = (exc_entry**)data;
1839 for (n = get_Block_n_cfgpreds(block) - 1; n >= 0; --n) {
1840 pred = get_Block_cfgpred(block, n);
1841 set_irn_link(pred, block);
1843 pred = skip_Proj(pred);
1844 if (is_ia32_irn(pred) && get_ia32_exc_label(pred)) {
1849 ARR_APP1(exc_entry, *exc_list, e);
1850 set_irn_link(pred, block);
1856 * Compare two exception_entries.
1858 static int cmp_exc_entry(const void *a, const void *b)
1860 const exc_entry *ea = (const exc_entry*)a;
1861 const exc_entry *eb = (const exc_entry*)b;
1863 if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
1869 * Main driver. Emits the code for one routine.
1871 void ia32_gen_routine(ir_graph *irg)
1873 ir_entity *entity = get_irg_entity(irg);
1874 exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
1875 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1876 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1877 ir_node **blk_sched = irg_data->blk_sched;
1880 isa = (ia32_isa_t*) arch_env;
1881 do_pic = be_get_irg_options(irg)->pic;
1883 be_gas_elf_type_char = '@';
1885 ia32_register_emitters();
1887 get_unique_label(pic_base_label, sizeof(pic_base_label), "PIC_BASE");
1889 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
1891 /* we use links to point to target blocks */
1892 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1893 irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
1895 /* initialize next block links */
1896 n = ARR_LEN(blk_sched);
1897 for (i = 0; i < n; ++i) {
1898 ir_node *block = blk_sched[i];
1899 ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
1901 set_irn_link(block, prev);
1904 for (i = 0; i < n; ++i) {
1905 ir_node *block = blk_sched[i];
1907 ia32_gen_block(block);
1910 be_gas_emit_function_epilog(entity);
1912 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1914 /* Sort the exception table using the exception label id's.
1915 Those are ascending with ascending addresses. */
1916 qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
1920 for (e = 0; e < ARR_LEN(exc_list); ++e) {
1921 be_emit_cstring("\t.long ");
1922 ia32_emit_exc_label(exc_list[e].exc_instr);
1924 be_emit_cstring("\t.long ");
1925 be_gas_emit_block_name(exc_list[e].block);
1929 DEL_ARR_F(exc_list);
1932 static const lc_opt_table_entry_t ia32_emitter_options[] = {
1933 LC_OPT_ENT_BOOL("mark_spill_reload", "mark spills and reloads with ud opcodes", &mark_spill_reload),
1937 /* ==== Experimental binary emitter ==== */
1939 static unsigned char reg_gp_map[N_ia32_gp_REGS];
1940 //static unsigned char reg_mmx_map[N_ia32_mmx_REGS];
1941 //static unsigned char reg_sse_map[N_ia32_xmm_REGS];
1943 static void build_reg_map(void)
1945 reg_gp_map[REG_GP_EAX] = 0x0;
1946 reg_gp_map[REG_GP_ECX] = 0x1;
1947 reg_gp_map[REG_GP_EDX] = 0x2;
1948 reg_gp_map[REG_GP_EBX] = 0x3;
1949 reg_gp_map[REG_GP_ESP] = 0x4;
1950 reg_gp_map[REG_GP_EBP] = 0x5;
1951 reg_gp_map[REG_GP_ESI] = 0x6;
1952 reg_gp_map[REG_GP_EDI] = 0x7;
1955 /** Returns the encoding for a pnc field. */
1956 static unsigned char pnc2cc(ia32_condition_code_t cc)
1961 /** Sign extension bit values for binops */
1963 UNSIGNED_IMM = 0, /**< unsigned immediate */
1964 SIGNEXT_IMM = 2, /**< sign extended immediate */
1967 /** The mod encoding of the ModR/M */
1969 MOD_IND = 0x00, /**< [reg1] */
1970 MOD_IND_BYTE_OFS = 0x40, /**< [reg1 + byte ofs] */
1971 MOD_IND_WORD_OFS = 0x80, /**< [reg1 + word ofs] */
1972 MOD_REG = 0xC0 /**< reg1 */
1975 /** create R/M encoding for ModR/M */
1976 #define ENC_RM(x) (x)
1977 /** create REG encoding for ModR/M */
1978 #define ENC_REG(x) ((x) << 3)
1980 /** create encoding for a SIB byte */
1981 #define ENC_SIB(scale, index, base) ((scale) << 6 | (index) << 3 | (base))
1983 /* Node: The following routines are supposed to append bytes, words, dwords
1984 to the output stream.
1985 Currently the implementation is stupid in that it still creates output
1986 for an "assembler" in the form of .byte, .long
1987 We will change this when enough infrastructure is there to create complete
1988 machine code in memory/object files */
1990 static void bemit8(const unsigned char byte)
1992 be_emit_irprintf("\t.byte 0x%x\n", byte);
1993 be_emit_write_line();
1996 static void bemit16(const unsigned short u16)
1998 be_emit_irprintf("\t.word 0x%x\n", u16);
1999 be_emit_write_line();
2002 static void bemit32(const unsigned u32)
2004 be_emit_irprintf("\t.long 0x%x\n", u32);
2005 be_emit_write_line();
2009 * Emit address of an entity. If @p is_relative is true then a relative
2010 * offset from behind the address to the entity is created.
2012 static void bemit_entity(ir_entity *entity, bool entity_sign, int offset,
2015 if (entity == NULL) {
2020 /* the final version should remember the position in the bytestream
2021 and patch it with the correct address at linktime... */
2022 be_emit_cstring("\t.long ");
2025 be_gas_emit_entity(entity);
2027 if (get_entity_owner(entity) == get_tls_type()) {
2028 if (get_entity_visibility(entity) == ir_visibility_external) {
2029 be_emit_cstring("@INDNTPOFF");
2031 be_emit_cstring("@NTPOFF");
2036 be_emit_cstring("-.");
2041 be_emit_irprintf("%+d", offset);
2044 be_emit_write_line();
2047 static void bemit_jmp_destination(const ir_node *dest_block)
2049 be_emit_cstring("\t.long ");
2050 be_gas_emit_block_name(dest_block);
2051 be_emit_cstring(" - . - 4\n");
2052 be_emit_write_line();
2055 /* end emit routines, all emitters following here should only use the functions
2058 typedef enum reg_modifier {
2063 /** Create a ModR/M byte for src1,src2 registers */
2064 static void bemit_modrr(const arch_register_t *src1,
2065 const arch_register_t *src2)
2067 unsigned char modrm = MOD_REG;
2068 modrm |= ENC_RM(reg_gp_map[src1->index]);
2069 modrm |= ENC_REG(reg_gp_map[src2->index]);
2073 /** Create a ModR/M8 byte for src1,src2 registers */
2074 static void bemit_modrr8(reg_modifier_t high_part1, const arch_register_t *src1,
2075 reg_modifier_t high_part2, const arch_register_t *src2)
2077 unsigned char modrm = MOD_REG;
2078 modrm |= ENC_RM(reg_gp_map[src1->index] + (high_part1 == REG_HIGH ? 4 : 0));
2079 modrm |= ENC_REG(reg_gp_map[src2->index] + (high_part2 == REG_HIGH ? 4 : 0));
2083 /** Create a ModR/M byte for one register and extension */
2084 static void bemit_modru(const arch_register_t *reg, unsigned ext)
2086 unsigned char modrm = MOD_REG;
2088 modrm |= ENC_RM(reg_gp_map[reg->index]);
2089 modrm |= ENC_REG(ext);
2093 /** Create a ModR/M8 byte for one register */
2094 static void bemit_modrm8(reg_modifier_t high_part, const arch_register_t *reg)
2096 unsigned char modrm = MOD_REG;
2097 assert(reg_gp_map[reg->index] < 4);
2098 modrm |= ENC_RM(reg_gp_map[reg->index] + (high_part == REG_HIGH ? 4 : 0));
2104 * Calculate the size of an signed immediate in bytes.
2106 * @param offset an offset
2108 static unsigned get_signed_imm_size(int offset)
2110 if (-128 <= offset && offset < 128) {
2112 } else if (-32768 <= offset && offset < 32768) {
2120 * Emit an address mode.
2122 * @param reg content of the reg field: either a register index or an opcode extension
2123 * @param node the node
2125 static void bemit_mod_am(unsigned reg, const ir_node *node)
2127 ir_entity *ent = get_ia32_am_sc(node);
2128 int offs = get_ia32_am_offs_int(node);
2129 ir_node *base = get_irn_n(node, n_ia32_base);
2130 int has_base = !is_ia32_NoReg_GP(base);
2131 ir_node *idx = get_irn_n(node, n_ia32_index);
2132 int has_index = !is_ia32_NoReg_GP(idx);
2135 unsigned emitoffs = 0;
2136 bool emitsib = false;
2139 /* set the mod part depending on displacement */
2141 modrm |= MOD_IND_WORD_OFS;
2143 } else if (offs == 0) {
2146 } else if (-128 <= offs && offs < 128) {
2147 modrm |= MOD_IND_BYTE_OFS;
2150 modrm |= MOD_IND_WORD_OFS;
2155 const arch_register_t *base_reg = arch_get_irn_register(base);
2156 base_enc = reg_gp_map[base_reg->index];
2158 /* Use the EBP encoding + MOD_IND if NO base register. There is
2159 * always a 32bit offset present in this case. */
2165 /* Determine if we need a SIB byte. */
2167 const arch_register_t *reg_index = arch_get_irn_register(idx);
2168 int scale = get_ia32_am_scale(node);
2170 /* R/M set to ESP means SIB in 32bit mode. */
2171 modrm |= ENC_RM(0x04);
2172 sib = ENC_SIB(scale, reg_gp_map[reg_index->index], base_enc);
2174 } else if (base_enc == 0x04) {
2175 /* for the above reason we are forced to emit a SIB when base is ESP.
2176 * Only the base is used, index must be ESP too, which means no index.
2178 modrm |= ENC_RM(0x04);
2179 sib = ENC_SIB(0, 0x04, 0x04);
2182 modrm |= ENC_RM(base_enc);
2185 /* We are forced to emit an 8bit offset as EBP base without offset is a
2186 * special case for SIB without base register. */
2187 if (base_enc == 0x05 && emitoffs == 0) {
2188 modrm |= MOD_IND_BYTE_OFS;
2192 modrm |= ENC_REG(reg);
2198 /* emit displacement */
2199 if (emitoffs == 8) {
2200 bemit8((unsigned) offs);
2201 } else if (emitoffs == 32) {
2202 bemit_entity(ent, is_ia32_am_sc_sign(node), offs, false);
2207 * Emit a binop with a immediate operand.
2209 * @param node the node to emit
2210 * @param opcode_eax the opcode for the op eax, imm variant
2211 * @param opcode the opcode for the reg, imm variant
2212 * @param ruval the opcode extension for opcode
2214 static void bemit_binop_with_imm(
2215 const ir_node *node,
2216 unsigned char opcode_ax,
2217 unsigned char opcode, unsigned char ruval)
2219 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
2220 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
2221 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
2224 /* Some instructions (test) have no short form with 32bit value + 8bit
2226 if (attr->symconst != NULL || opcode & SIGNEXT_IMM) {
2229 /* check for sign extension */
2230 size = get_signed_imm_size(attr->offset);
2235 bemit8(opcode | SIGNEXT_IMM);
2236 /* cmp has this special mode */
2237 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2238 bemit_mod_am(ruval, node);
2240 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2241 bemit_modru(reg, ruval);
2243 bemit8((unsigned char)attr->offset);
2247 /* check for eax variant: this variant is shorter for 32bit immediates only */
2248 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2250 bemit_mod_am(ruval, node);
2252 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2253 if (reg->index == REG_GP_EAX) {
2257 bemit_modru(reg, ruval);
2260 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
2263 panic("invalid imm size?!?");
2269 static void bemit_binop_2(const ir_node *node, unsigned code)
2271 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
2273 if (get_ia32_op_type(node) == ia32_Normal) {
2274 const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
2275 bemit_modrr(op2, out);
2277 bemit_mod_am(reg_gp_map[out->index], node);
2284 static void bemit_binop(const ir_node *node, const unsigned char opcodes[4])
2286 ir_node *right = get_irn_n(node, n_ia32_binary_right);
2287 if (is_ia32_Immediate(right)) {
2288 bemit_binop_with_imm(node, opcodes[1], opcodes[2], opcodes[3]);
2290 bemit_binop_2(node, opcodes[0]);
2297 static void bemit_unop(const ir_node *node, unsigned char code, unsigned char ext, int input)
2300 if (get_ia32_op_type(node) == ia32_Normal) {
2301 const arch_register_t *in = arch_get_irn_register_in(node, input);
2302 bemit_modru(in, ext);
2304 bemit_mod_am(ext, node);
2308 static void bemit_unop_reg(const ir_node *node, unsigned char code, int input)
2310 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2311 bemit_unop(node, code, reg_gp_map[out->index], input);
2314 static void bemit_unop_mem(const ir_node *node, unsigned char code, unsigned char ext)
2316 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
2319 bemit8(size == 8 ? code : code + 1);
2320 bemit_mod_am(ext, node);
2323 static void bemit_immediate(const ir_node *node, bool relative)
2325 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
2326 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, relative);
2329 static void bemit_copy(const ir_node *copy)
2331 const arch_register_t *in = arch_get_irn_register_in(copy, 0);
2332 const arch_register_t *out = arch_get_irn_register_out(copy, 0);
2336 /* copies of vf nodes aren't real... */
2337 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
2340 assert(arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_gp]);
2342 bemit_modrr(in, out);
2345 static void bemit_perm(const ir_node *node)
2347 const arch_register_t *in0 = arch_get_irn_register(get_irn_n(node, 0));
2348 const arch_register_t *in1 = arch_get_irn_register(get_irn_n(node, 1));
2349 const arch_register_class_t *cls0 = arch_register_get_class(in0);
2351 assert(cls0 == arch_register_get_class(in1) && "Register class mismatch at Perm");
2353 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
2354 if (in0->index == REG_GP_EAX) {
2355 bemit8(0x90 + reg_gp_map[in1->index]);
2356 } else if (in1->index == REG_GP_EAX) {
2357 bemit8(0x90 + reg_gp_map[in0->index]);
2360 bemit_modrr(in0, in1);
2362 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
2363 panic("unimplemented"); // TODO implement
2364 //ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
2365 //ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
2366 //ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
2367 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
2369 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
2372 panic("unexpected register class in be_Perm (%+F)", node);
2376 static void bemit_xor0(const ir_node *node)
2378 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2380 bemit_modrr(out, out);
2383 static void bemit_mov_const(const ir_node *node)
2385 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2386 bemit8(0xB8 + reg_gp_map[out->index]);
2387 bemit_immediate(node, false);
2391 * Creates a function for a Binop with 3 possible encodings.
2393 #define BINOP(op, op0, op1, op2, op2_ext) \
2394 static void bemit_ ## op(const ir_node *node) { \
2395 static const unsigned char op ## _codes[] = {op0, op1, op2, op2_ext}; \
2396 bemit_binop(node, op ## _codes); \
2399 /* insn def eax,imm imm */
2400 BINOP(add, 0x03, 0x05, 0x81, 0)
2401 BINOP(or, 0x0B, 0x0D, 0x81, 1)
2402 BINOP(adc, 0x13, 0x15, 0x81, 2)
2403 BINOP(sbb, 0x1B, 0x1D, 0x81, 3)
2404 BINOP(and, 0x23, 0x25, 0x81, 4)
2405 BINOP(sub, 0x2B, 0x2D, 0x81, 5)
2406 BINOP(xor, 0x33, 0x35, 0x81, 6)
2407 BINOP(test, 0x85, 0xA9, 0xF7, 0)
2409 #define BINOPMEM(op, ext) \
2410 static void bemit_##op(const ir_node *node) \
2413 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2416 val = get_irn_n(node, n_ia32_unary_op); \
2417 if (is_ia32_Immediate(val)) { \
2418 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(val); \
2419 int offset = attr->offset; \
2420 if (attr->symconst == NULL && get_signed_imm_size(offset) == 1) { \
2422 bemit_mod_am(ext, node); \
2426 bemit_mod_am(ext, node); \
2430 bemit_entity(attr->symconst, attr->sc_sign, offset, false); \
2434 bemit8(ext << 3 | 1); \
2435 bemit_mod_am(reg_gp_map[arch_get_irn_register_out(val, 0)->index], node); \
2439 static void bemit_##op##8bit(const ir_node *node) \
2441 ir_node *val = get_irn_n(node, n_ia32_unary_op); \
2442 if (is_ia32_Immediate(val)) { \
2444 bemit_mod_am(ext, node); \
2445 bemit8(get_ia32_immediate_attr_const(val)->offset); \
2448 bemit_mod_am(reg_gp_map[arch_get_irn_register_out(val, 0)->index], node); \
2460 * Creates a function for an Unop with code /ext encoding.
2462 #define UNOP(op, code, ext, input) \
2463 static void bemit_ ## op(const ir_node *node) { \
2464 bemit_unop(node, code, ext, input); \
2467 UNOP(not, 0xF7, 2, n_ia32_Not_val)
2468 UNOP(neg, 0xF7, 3, n_ia32_Neg_val)
2469 UNOP(mul, 0xF7, 4, n_ia32_Mul_right)
2470 UNOP(imul1op, 0xF7, 5, n_ia32_IMul1OP_right)
2471 UNOP(div, 0xF7, 6, n_ia32_Div_divisor)
2472 UNOP(idiv, 0xF7, 7, n_ia32_IDiv_divisor)
2474 /* TODO: am support for IJmp */
2475 UNOP(ijmp, 0xFF, 4, n_ia32_IJmp_target)
2477 #define SHIFT(op, ext) \
2478 static void bemit_##op(const ir_node *node) \
2480 const arch_register_t *out = arch_get_irn_register_out(node, 0); \
2481 ir_node *count = get_irn_n(node, 1); \
2482 if (is_ia32_Immediate(count)) { \
2483 int offset = get_ia32_immediate_attr_const(count)->offset; \
2484 if (offset == 1) { \
2486 bemit_modru(out, ext); \
2489 bemit_modru(out, ext); \
2494 bemit_modru(out, ext); \
2498 static void bemit_##op##mem(const ir_node *node) \
2501 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2504 count = get_irn_n(node, 1); \
2505 if (is_ia32_Immediate(count)) { \
2506 int offset = get_ia32_immediate_attr_const(count)->offset; \
2507 if (offset == 1) { \
2508 bemit8(size == 8 ? 0xD0 : 0xD1); \
2509 bemit_mod_am(ext, node); \
2511 bemit8(size == 8 ? 0xC0 : 0xC1); \
2512 bemit_mod_am(ext, node); \
2516 bemit8(size == 8 ? 0xD2 : 0xD3); \
2517 bemit_mod_am(ext, node); \
2527 static void bemit_shld(const ir_node *node)
2529 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShlD_val_low);
2530 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShlD_res);
2531 ir_node *count = get_irn_n(node, n_ia32_ShlD_count);
2533 if (is_ia32_Immediate(count)) {
2535 bemit_modrr(out, in);
2536 bemit8(get_ia32_immediate_attr_const(count)->offset);
2539 bemit_modrr(out, in);
2543 static void bemit_shrd(const ir_node *node)
2545 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShrD_val_low);
2546 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShrD_res);
2547 ir_node *count = get_irn_n(node, n_ia32_ShrD_count);
2549 if (is_ia32_Immediate(count)) {
2551 bemit_modrr(out, in);
2552 bemit8(get_ia32_immediate_attr_const(count)->offset);
2555 bemit_modrr(out, in);
2560 * binary emitter for setcc.
2562 static void bemit_setcc(const ir_node *node)
2564 const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
2566 ia32_condition_code_t cc = get_ia32_condcode(node);
2567 cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
2568 if (cc & ia32_cc_float_parity_cases) {
2569 if (cc & ia32_cc_negated) {
2572 bemit8(0x90 | pnc2cc(cc));
2573 bemit_modrm8(REG_LOW, dreg);
2578 bemit_modrm8(REG_HIGH, dreg);
2580 /* orb %>dreg, %<dreg */
2582 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
2586 bemit8(0x90 | pnc2cc(cc));
2587 bemit_modrm8(REG_LOW, dreg);
2592 bemit_modrm8(REG_HIGH, dreg);
2594 /* andb %>dreg, %<dreg */
2596 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
2601 bemit8(0x90 | pnc2cc(cc));
2602 bemit_modrm8(REG_LOW, dreg);
2606 static void bemit_cmovcc(const ir_node *node)
2608 const ia32_attr_t *attr = get_ia32_attr_const(node);
2609 int ins_permuted = attr->data.ins_permuted;
2610 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
2611 ia32_condition_code_t cc = get_ia32_condcode(node);
2612 const arch_register_t *in_true;
2613 const arch_register_t *in_false;
2615 cc = determine_final_cc(node, n_ia32_CMovcc_eflags, cc);
2617 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
2618 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
2620 /* should be same constraint fullfilled? */
2621 if (out == in_false) {
2622 /* yes -> nothing to do */
2623 } else if (out == in_true) {
2624 assert(get_ia32_op_type(node) == ia32_Normal);
2625 ins_permuted = !ins_permuted;
2629 bemit8(0x8B); // mov %in_false, %out
2630 bemit_modrr(in_false, out);
2634 cc = ia32_negate_condition_code(cc);
2636 if (cc & ia32_cc_float_parity_cases)
2637 panic("cmov can't handle parity float cases");
2640 bemit8(0x40 | pnc2cc(cc));
2641 if (get_ia32_op_type(node) == ia32_Normal) {
2642 bemit_modrr(in_true, out);
2644 bemit_mod_am(reg_gp_map[out->index], node);
2648 static void bemit_cmp(const ir_node *node)
2650 unsigned ls_size = get_mode_size_bits(get_ia32_ls_mode(node));
2656 right = get_irn_n(node, n_ia32_binary_right);
2657 if (is_ia32_Immediate(right)) {
2658 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
2659 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
2660 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
2663 if (attr->symconst != NULL) {
2666 /* check for sign extension */
2667 size = get_signed_imm_size(attr->offset);
2672 bemit8(0x81 | SIGNEXT_IMM);
2673 /* cmp has this special mode */
2674 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2675 bemit_mod_am(7, node);
2677 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2678 bemit_modru(reg, 7);
2680 bemit8((unsigned char)attr->offset);
2684 /* check for eax variant: this variant is shorter for 32bit immediates only */
2685 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2687 bemit_mod_am(7, node);
2689 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2690 if (reg->index == REG_GP_EAX) {
2694 bemit_modru(reg, 7);
2697 if (ls_size == 16) {
2698 bemit16(attr->offset);
2700 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
2704 panic("invalid imm size?!?");
2706 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
2708 if (get_ia32_op_type(node) == ia32_Normal) {
2709 const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
2710 bemit_modrr(op2, out);
2712 bemit_mod_am(reg_gp_map[out->index], node);
2717 static void bemit_cmp8bit(const ir_node *node)
2719 ir_node *right = get_irn_n(node, n_ia32_binary_right);
2720 if (is_ia32_Immediate(right)) {
2721 if (get_ia32_op_type(node) == ia32_Normal) {
2722 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
2723 if (out->index == REG_GP_EAX) {
2727 bemit_modru(out, 7);
2731 bemit_mod_am(7, node);
2733 bemit8(get_ia32_immediate_attr_const(right)->offset);
2735 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
2737 if (get_ia32_op_type(node) == ia32_Normal) {
2738 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Cmp_right);
2739 bemit_modrr(out, in);
2741 bemit_mod_am(reg_gp_map[out->index], node);
2746 static void bemit_test8bit(const ir_node *node)
2748 ir_node *right = get_irn_n(node, n_ia32_Test8Bit_right);
2749 if (is_ia32_Immediate(right)) {
2750 if (get_ia32_op_type(node) == ia32_Normal) {
2751 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
2752 if (out->index == REG_GP_EAX) {
2756 bemit_modru(out, 0);
2760 bemit_mod_am(0, node);
2762 bemit8(get_ia32_immediate_attr_const(right)->offset);
2764 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
2766 if (get_ia32_op_type(node) == ia32_Normal) {
2767 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Test8Bit_right);
2768 bemit_modrr(out, in);
2770 bemit_mod_am(reg_gp_map[out->index], node);
2775 static void bemit_imul(const ir_node *node)
2777 ir_node *right = get_irn_n(node, n_ia32_IMul_right);
2778 /* Do we need the immediate form? */
2779 if (is_ia32_Immediate(right)) {
2780 int imm = get_ia32_immediate_attr_const(right)->offset;
2781 if (get_signed_imm_size(imm) == 1) {
2782 bemit_unop_reg(node, 0x6B, n_ia32_IMul_left);
2785 bemit_unop_reg(node, 0x69, n_ia32_IMul_left);
2790 bemit_unop_reg(node, 0xAF, n_ia32_IMul_right);
2794 static void bemit_dec(const ir_node *node)
2796 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Dec_res);
2797 bemit8(0x48 + reg_gp_map[out->index]);
2800 static void bemit_inc(const ir_node *node)
2802 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Inc_res);
2803 bemit8(0x40 + reg_gp_map[out->index]);
2806 #define UNOPMEM(op, code, ext) \
2807 static void bemit_##op(const ir_node *node) \
2809 bemit_unop_mem(node, code, ext); \
2812 UNOPMEM(notmem, 0xF6, 2)
2813 UNOPMEM(negmem, 0xF6, 3)
2814 UNOPMEM(incmem, 0xFE, 0)
2815 UNOPMEM(decmem, 0xFE, 1)
2817 static void bemit_ldtls(const ir_node *node)
2819 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2821 bemit8(0x65); // gs:
2822 if (out->index == REG_GP_EAX) {
2823 bemit8(0xA1); // movl 0, %eax
2825 bemit8(0x8B); // movl 0, %reg
2826 bemit8(MOD_IND | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x05));
2834 static void bemit_lea(const ir_node *node)
2836 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2838 bemit_mod_am(reg_gp_map[out->index], node);
2841 /* helper function for bemit_minus64bit */
2842 static void bemit_helper_mov(const arch_register_t *src, const arch_register_t *dst)
2844 bemit8(0x8B); // movl %src, %dst
2845 bemit_modrr(src, dst);
2848 /* helper function for bemit_minus64bit */
2849 static void bemit_helper_neg(const arch_register_t *reg)
2851 bemit8(0xF7); // negl %reg
2852 bemit_modru(reg, 3);
2855 /* helper function for bemit_minus64bit */
2856 static void bemit_helper_sbb0(const arch_register_t *reg)
2858 bemit8(0x83); // sbbl $0, %reg
2859 bemit_modru(reg, 3);
2863 /* helper function for bemit_minus64bit */
2864 static void bemit_helper_sbb(const arch_register_t *src, const arch_register_t *dst)
2866 bemit8(0x1B); // sbbl %src, %dst
2867 bemit_modrr(src, dst);
2870 /* helper function for bemit_minus64bit */
2871 static void bemit_helper_xchg(const arch_register_t *src, const arch_register_t *dst)
2873 if (src->index == REG_GP_EAX) {
2874 bemit8(0x90 + reg_gp_map[dst->index]); // xchgl %eax, %dst
2875 } else if (dst->index == REG_GP_EAX) {
2876 bemit8(0x90 + reg_gp_map[src->index]); // xchgl %src, %eax
2878 bemit8(0x87); // xchgl %src, %dst
2879 bemit_modrr(src, dst);
2883 /* helper function for bemit_minus64bit */
2884 static void bemit_helper_zero(const arch_register_t *reg)
2886 bemit8(0x33); // xorl %reg, %reg
2887 bemit_modrr(reg, reg);
2890 static void bemit_minus64bit(const ir_node *node)
2892 const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
2893 const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
2894 const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
2895 const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
2897 if (out_lo == in_lo) {
2898 if (out_hi != in_hi) {
2899 /* a -> a, b -> d */
2902 /* a -> a, b -> b */
2905 } else if (out_lo == in_hi) {
2906 if (out_hi == in_lo) {
2907 /* a -> b, b -> a */
2908 bemit_helper_xchg(in_lo, in_hi);
2911 /* a -> b, b -> d */
2912 bemit_helper_mov(in_hi, out_hi);
2913 bemit_helper_mov(in_lo, out_lo);
2917 if (out_hi == in_lo) {
2918 /* a -> c, b -> a */
2919 bemit_helper_mov(in_lo, out_lo);
2921 } else if (out_hi == in_hi) {
2922 /* a -> c, b -> b */
2923 bemit_helper_mov(in_lo, out_lo);
2926 /* a -> c, b -> d */
2927 bemit_helper_mov(in_lo, out_lo);
2933 bemit_helper_neg( out_hi);
2934 bemit_helper_neg( out_lo);
2935 bemit_helper_sbb0(out_hi);
2939 bemit_helper_zero(out_hi);
2940 bemit_helper_neg( out_lo);
2941 bemit_helper_sbb( in_hi, out_hi);
2945 * Emit a single opcode.
2947 #define EMIT_SINGLEOP(op, code) \
2948 static void bemit_ ## op(const ir_node *node) { \
2953 //EMIT_SINGLEOP(daa, 0x27)
2954 //EMIT_SINGLEOP(das, 0x2F)
2955 //EMIT_SINGLEOP(aaa, 0x37)
2956 //EMIT_SINGLEOP(aas, 0x3F)
2957 //EMIT_SINGLEOP(nop, 0x90)
2958 EMIT_SINGLEOP(cwtl, 0x98)
2959 EMIT_SINGLEOP(cltd, 0x99)
2960 //EMIT_SINGLEOP(fwait, 0x9B)
2961 EMIT_SINGLEOP(sahf, 0x9E)
2962 //EMIT_SINGLEOP(popf, 0x9D)
2963 EMIT_SINGLEOP(leave, 0xC9)
2964 EMIT_SINGLEOP(int3, 0xCC)
2965 //EMIT_SINGLEOP(iret, 0xCF)
2966 //EMIT_SINGLEOP(xlat, 0xD7)
2967 //EMIT_SINGLEOP(lock, 0xF0)
2968 EMIT_SINGLEOP(rep, 0xF3)
2969 //EMIT_SINGLEOP(halt, 0xF4)
2970 EMIT_SINGLEOP(cmc, 0xF5)
2971 EMIT_SINGLEOP(stc, 0xF9)
2972 //EMIT_SINGLEOP(cli, 0xFA)
2973 //EMIT_SINGLEOP(sti, 0xFB)
2974 //EMIT_SINGLEOP(std, 0xFD)
2977 * Emits a MOV out, [MEM].
2979 static void bemit_load(const ir_node *node)
2981 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2983 if (out->index == REG_GP_EAX) {
2984 ir_node *base = get_irn_n(node, n_ia32_base);
2985 int has_base = !is_ia32_NoReg_GP(base);
2986 ir_node *idx = get_irn_n(node, n_ia32_index);
2987 int has_index = !is_ia32_NoReg_GP(idx);
2988 if (!has_base && !has_index) {
2989 ir_entity *ent = get_ia32_am_sc(node);
2990 int offs = get_ia32_am_offs_int(node);
2991 /* load from constant address to EAX can be encoded
2994 bemit_entity(ent, 0, offs, false);
2999 bemit_mod_am(reg_gp_map[out->index], node);
3003 * Emits a MOV [mem], in.
3005 static void bemit_store(const ir_node *node)
3007 const ir_node *value = get_irn_n(node, n_ia32_Store_val);
3008 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
3010 if (is_ia32_Immediate(value)) {
3013 bemit_mod_am(0, node);
3014 bemit8(get_ia32_immediate_attr_const(value)->offset);
3015 } else if (size == 16) {
3018 bemit_mod_am(0, node);
3019 bemit16(get_ia32_immediate_attr_const(value)->offset);
3022 bemit_mod_am(0, node);
3023 bemit_immediate(value, false);
3026 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Store_val);
3028 if (in->index == REG_GP_EAX) {
3029 ir_node *base = get_irn_n(node, n_ia32_base);
3030 int has_base = !is_ia32_NoReg_GP(base);
3031 ir_node *idx = get_irn_n(node, n_ia32_index);
3032 int has_index = !is_ia32_NoReg_GP(idx);
3033 if (!has_base && !has_index) {
3034 ir_entity *ent = get_ia32_am_sc(node);
3035 int offs = get_ia32_am_offs_int(node);
3036 /* store to constant address from EAX can be encoded as
3037 * 0xA2/0xA3 [offset]*/
3045 bemit_entity(ent, 0, offs, false);
3057 bemit_mod_am(reg_gp_map[in->index], node);
3061 static void bemit_conv_i2i(const ir_node *node)
3063 ir_mode *smaller_mode = get_ia32_ls_mode(node);
3072 if (mode_is_signed(smaller_mode)) opcode |= 0x08;
3073 if (get_mode_size_bits(smaller_mode) == 16) opcode |= 0x01;
3074 bemit_unop_reg(node, opcode, n_ia32_Conv_I2I_val);
3080 static void bemit_push(const ir_node *node)
3082 const ir_node *value = get_irn_n(node, n_ia32_Push_val);
3084 if (is_ia32_Immediate(value)) {
3085 const ia32_immediate_attr_t *attr
3086 = get_ia32_immediate_attr_const(value);
3087 unsigned size = get_signed_imm_size(attr->offset);
3093 bemit8((unsigned char)attr->offset);
3098 bemit_immediate(value, false);
3101 } else if (is_ia32_NoReg_GP(value)) {
3103 bemit_mod_am(6, node);
3105 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_Push_val);
3106 bemit8(0x50 + reg_gp_map[reg->index]);
3113 static void bemit_pop(const ir_node *node)
3115 const arch_register_t *reg = arch_get_irn_register_out(node, pn_ia32_Pop_res);
3116 bemit8(0x58 + reg_gp_map[reg->index]);
3119 static void bemit_popmem(const ir_node *node)
3122 bemit_mod_am(0, node);
3125 static void bemit_call(const ir_node *node)
3127 ir_node *proc = get_irn_n(node, n_ia32_Call_addr);
3129 if (is_ia32_Immediate(proc)) {
3131 bemit_immediate(proc, true);
3133 bemit_unop(node, 0xFF, 2, n_ia32_Call_addr);
3137 static void bemit_jmp(const ir_node *dest_block)
3140 bemit_jmp_destination(dest_block);
3143 static void bemit_jump(const ir_node *node)
3145 if (can_be_fallthrough(node))
3148 bemit_jmp(get_cfop_target_block(node));
3151 static void bemit_jcc(int pnc, const ir_node *dest_block)
3153 unsigned char cc = pnc2cc(pnc);
3156 bemit_jmp_destination(dest_block);
3159 static void bemit_jp(bool odd, const ir_node *dest_block)
3163 bemit_jmp_destination(dest_block);
3166 static void bemit_ia32_jcc(const ir_node *node)
3168 ia32_condition_code_t cc = get_ia32_condcode(node);
3169 const ir_node *proj_true;
3170 const ir_node *proj_false;
3171 const ir_node *dest_true;
3172 const ir_node *dest_false;
3174 cc = determine_final_cc(node, 0, cc);
3176 /* get both Projs */
3177 proj_true = get_proj(node, pn_ia32_Jcc_true);
3178 assert(proj_true && "Jcc without true Proj");
3180 proj_false = get_proj(node, pn_ia32_Jcc_false);
3181 assert(proj_false && "Jcc without false Proj");
3183 if (can_be_fallthrough(proj_true)) {
3184 /* exchange both proj's so the second one can be omitted */
3185 const ir_node *t = proj_true;
3187 proj_true = proj_false;
3189 cc = ia32_negate_condition_code(cc);
3192 dest_true = get_cfop_target_block(proj_true);
3193 dest_false = get_cfop_target_block(proj_false);
3195 if (cc & ia32_cc_float_parity_cases) {
3196 /* Some floating point comparisons require a test of the parity flag,
3197 * which indicates that the result is unordered */
3198 if (cc & ia32_cc_negated) {
3199 bemit_jp(false, dest_true);
3201 /* we need a local label if the false proj is a fallthrough
3202 * as the falseblock might have no label emitted then */
3203 if (can_be_fallthrough(proj_false)) {
3205 bemit8(0x06); // jp + 6
3207 bemit_jp(false, dest_false);
3211 bemit_jcc(cc, dest_true);
3213 /* the second Proj might be a fallthrough */
3214 if (can_be_fallthrough(proj_false)) {
3215 /* it's a fallthrough */
3217 bemit_jmp(dest_false);
3221 static void bemit_switchjmp(const ir_node *node)
3223 ir_entity *jump_table = get_ia32_am_sc(node);
3224 const ir_switch_table *table = get_ia32_switch_table(node);
3226 bemit8(0xFF); // jmp *tbl.label(,%in,4)
3227 bemit_mod_am(0x05, node);
3229 be_emit_jump_table(node, table, jump_table, get_cfop_target_block);
3235 static void bemit_return(const ir_node *node)
3237 unsigned pop = be_Return_get_pop(node);
3238 if (pop > 0 || be_Return_get_emit_pop(node)) {
3240 assert(pop <= 0xffff);
3247 static void bemit_subsp(const ir_node *node)
3249 const arch_register_t *out;
3252 /* mov %esp, %out */
3254 out = arch_get_irn_register_out(node, 1);
3255 bemit8(MOD_REG | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x04));
3258 static void bemit_incsp(const ir_node *node)
3261 const arch_register_t *reg;
3265 offs = be_get_IncSP_offset(node);
3276 size = get_signed_imm_size(offs);
3277 bemit8(size == 1 ? 0x83 : 0x81);
3279 reg = arch_get_irn_register_out(node, 0);
3280 bemit_modru(reg, ext);
3289 static void bemit_copybi(const ir_node *node)
3291 unsigned size = get_ia32_copyb_size(node);
3293 bemit8(0xA4); // movsb
3296 bemit8(0xA5); // movsw
3300 bemit8(0xA5); // movsl
3304 static void bemit_fbinop(const ir_node *node, unsigned code, unsigned code_to)
3306 if (get_ia32_op_type(node) == ia32_Normal) {
3307 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
3308 const arch_register_t *in1 = x87_attr->x87[0];
3309 const arch_register_t *in = x87_attr->x87[1];
3310 const arch_register_t *out = x87_attr->x87[2];
3314 } else if (out == in) {
3318 if (out->index == 0) {
3320 bemit8(MOD_REG | ENC_REG(code) | ENC_RM(in->index));
3323 bemit8(MOD_REG | ENC_REG(code_to) | ENC_RM(out->index));
3326 if (get_mode_size_bits(get_ia32_ls_mode(node)) == 32) {
3331 bemit_mod_am(code, node);
3335 static void bemit_fbinopp(const ir_node *node, unsigned const code)
3337 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
3338 const arch_register_t *out = x87_attr->x87[2];
3340 bemit8(code + out->index);
3343 static void bemit_fabs(const ir_node *node)
3351 static void bemit_fadd(const ir_node *node)
3353 bemit_fbinop(node, 0, 0);
3356 static void bemit_faddp(const ir_node *node)
3358 bemit_fbinopp(node, 0xC0);
3361 static void bemit_fchs(const ir_node *node)
3369 static void bemit_fdiv(const ir_node *node)
3371 bemit_fbinop(node, 6, 7);
3374 static void bemit_fdivp(const ir_node *node)
3376 bemit_fbinopp(node, 0xF8);
3379 static void bemit_fdivr(const ir_node *node)
3381 bemit_fbinop(node, 7, 6);
3384 static void bemit_fdivrp(const ir_node *node)
3386 bemit_fbinopp(node, 0xF0);
3389 static void bemit_fild(const ir_node *node)
3391 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3393 bemit8(0xDF); // filds
3394 bemit_mod_am(0, node);
3398 bemit8(0xDB); // fildl
3399 bemit_mod_am(0, node);
3403 bemit8(0xDF); // fildll
3404 bemit_mod_am(5, node);
3408 panic("invalid mode size");
3412 static void bemit_fist(const ir_node *node)
3414 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3416 bemit8(0xDF); // fists
3420 bemit8(0xDB); // fistl
3424 panic("invalid mode size");
3426 bemit_mod_am(2, node);
3429 static void bemit_fistp(const ir_node *node)
3431 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3433 bemit8(0xDF); // fistps
3434 bemit_mod_am(3, node);
3438 bemit8(0xDB); // fistpl
3439 bemit_mod_am(3, node);
3443 bemit8(0xDF); // fistpll
3444 bemit_mod_am(7, node);
3448 panic("invalid mode size");
3452 static void bemit_fld(const ir_node *node)
3454 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3456 bemit8(0xD9); // flds
3457 bemit_mod_am(0, node);
3461 bemit8(0xDD); // fldl
3462 bemit_mod_am(0, node);
3467 bemit8(0xDB); // fldt
3468 bemit_mod_am(5, node);
3472 panic("invalid mode size");
3476 static void bemit_fld1(const ir_node *node)
3480 bemit8(0xE8); // fld1
3483 static void bemit_fldcw(const ir_node *node)
3485 bemit8(0xD9); // fldcw
3486 bemit_mod_am(5, node);
3489 static void bemit_fldz(const ir_node *node)
3493 bemit8(0xEE); // fldz
3496 static void bemit_fmul(const ir_node *node)
3498 bemit_fbinop(node, 1, 1);
3501 static void bemit_fmulp(const ir_node *node)
3503 bemit_fbinopp(node, 0xC8);
3506 static void bemit_fpop(const ir_node *node)
3508 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3510 bemit8(0xD8 + attr->x87[0]->index);
3513 static void bemit_fpush(const ir_node *node)
3515 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3517 bemit8(0xC0 + attr->x87[0]->index);
3520 static void bemit_fpushcopy(const ir_node *node)
3522 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3524 bemit8(0xC0 + attr->x87[0]->index);
3527 static void bemit_fst(const ir_node *node)
3529 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3531 bemit8(0xD9); // fsts
3535 bemit8(0xDD); // fstl
3539 panic("invalid mode size");
3541 bemit_mod_am(2, node);
3544 static void bemit_fstp(const ir_node *node)
3546 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3548 bemit8(0xD9); // fstps
3549 bemit_mod_am(3, node);
3553 bemit8(0xDD); // fstpl
3554 bemit_mod_am(3, node);
3559 bemit8(0xDB); // fstpt
3560 bemit_mod_am(7, node);
3564 panic("invalid mode size");
3568 static void bemit_fsub(const ir_node *node)
3570 bemit_fbinop(node, 4, 5);
3573 static void bemit_fsubp(const ir_node *node)
3575 bemit_fbinopp(node, 0xE8);
3578 static void bemit_fsubr(const ir_node *node)
3580 bemit_fbinop(node, 5, 4);
3583 static void bemit_fsubrp(const ir_node *node)
3585 bemit_fbinopp(node, 0xE0);
3588 static void bemit_fnstcw(const ir_node *node)
3590 bemit8(0xD9); // fnstcw
3591 bemit_mod_am(7, node);
3594 static void bemit_fnstsw(void)
3596 bemit8(0xDF); // fnstsw %ax
3600 static void bemit_ftstfnstsw(const ir_node *node)
3604 bemit8(0xD9); // ftst
3609 static void bemit_fucomi(const ir_node *node)
3611 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3612 bemit8(0xDB); // fucomi
3613 bemit8(0xE8 + attr->x87[1]->index);
3616 static void bemit_fucomip(const ir_node *node)
3618 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3619 bemit8(0xDF); // fucomip
3620 bemit8(0xE8 + attr->x87[1]->index);
3623 static void bemit_fucomfnstsw(const ir_node *node)
3625 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3626 bemit8(0xDD); // fucom
3627 bemit8(0xE0 + attr->x87[1]->index);
3631 static void bemit_fucompfnstsw(const ir_node *node)
3633 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3634 bemit8(0xDD); // fucomp
3635 bemit8(0xE8 + attr->x87[1]->index);
3639 static void bemit_fucomppfnstsw(const ir_node *node)
3643 bemit8(0xDA); // fucompp
3648 static void bemit_fxch(const ir_node *node)
3650 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3652 bemit8(0xC8 + attr->x87[0]->index);
3656 * The type of a emitter function.
3658 typedef void (*emit_func) (const ir_node *);
3661 * Set a node emitter. Make it a bit more type safe.
3663 static void register_emitter(ir_op *op, emit_func func)
3665 op->ops.generic = (op_func) func;
3668 static void ia32_register_binary_emitters(void)
3670 /* first clear the generic function pointer for all ops */
3671 clear_irp_opcodes_generic_func();
3673 /* benode emitter */
3674 register_emitter(op_be_Copy, bemit_copy);
3675 register_emitter(op_be_CopyKeep, bemit_copy);
3676 register_emitter(op_be_IncSP, bemit_incsp);
3677 register_emitter(op_be_Perm, bemit_perm);
3678 register_emitter(op_be_Return, bemit_return);
3679 register_emitter(op_ia32_Adc, bemit_adc);
3680 register_emitter(op_ia32_Add, bemit_add);
3681 register_emitter(op_ia32_AddMem, bemit_addmem);
3682 register_emitter(op_ia32_AddMem8Bit, bemit_addmem8bit);
3683 register_emitter(op_ia32_And, bemit_and);
3684 register_emitter(op_ia32_AndMem, bemit_andmem);
3685 register_emitter(op_ia32_AndMem8Bit, bemit_andmem8bit);
3686 register_emitter(op_ia32_Breakpoint, bemit_int3);
3687 register_emitter(op_ia32_CMovcc, bemit_cmovcc);
3688 register_emitter(op_ia32_Call, bemit_call);
3689 register_emitter(op_ia32_Cltd, bemit_cltd);
3690 register_emitter(op_ia32_Cmc, bemit_cmc);
3691 register_emitter(op_ia32_Cmp, bemit_cmp);
3692 register_emitter(op_ia32_Cmp8Bit, bemit_cmp8bit);
3693 register_emitter(op_ia32_Const, bemit_mov_const);
3694 register_emitter(op_ia32_Conv_I2I, bemit_conv_i2i);
3695 register_emitter(op_ia32_Conv_I2I8Bit, bemit_conv_i2i);
3696 register_emitter(op_ia32_CopyB_i, bemit_copybi);
3697 register_emitter(op_ia32_Cwtl, bemit_cwtl);
3698 register_emitter(op_ia32_Dec, bemit_dec);
3699 register_emitter(op_ia32_DecMem, bemit_decmem);
3700 register_emitter(op_ia32_Div, bemit_div);
3701 register_emitter(op_ia32_FldCW, bemit_fldcw);
3702 register_emitter(op_ia32_FnstCW, bemit_fnstcw);
3703 register_emitter(op_ia32_FtstFnstsw, bemit_ftstfnstsw);
3704 register_emitter(op_ia32_FucomFnstsw, bemit_fucomfnstsw);
3705 register_emitter(op_ia32_Fucomi, bemit_fucomi);
3706 register_emitter(op_ia32_FucompFnstsw, bemit_fucompfnstsw);
3707 register_emitter(op_ia32_Fucompi, bemit_fucomip);
3708 register_emitter(op_ia32_FucomppFnstsw, bemit_fucomppfnstsw);
3709 register_emitter(op_ia32_IDiv, bemit_idiv);
3710 register_emitter(op_ia32_IJmp, bemit_ijmp);
3711 register_emitter(op_ia32_IMul, bemit_imul);
3712 register_emitter(op_ia32_IMul1OP, bemit_imul1op);
3713 register_emitter(op_ia32_Inc, bemit_inc);
3714 register_emitter(op_ia32_IncMem, bemit_incmem);
3715 register_emitter(op_ia32_Jcc, bemit_ia32_jcc);
3716 register_emitter(op_ia32_Jmp, bemit_jump);
3717 register_emitter(op_ia32_LdTls, bemit_ldtls);
3718 register_emitter(op_ia32_Lea, bemit_lea);
3719 register_emitter(op_ia32_Leave, bemit_leave);
3720 register_emitter(op_ia32_Load, bemit_load);
3721 register_emitter(op_ia32_Minus64Bit, bemit_minus64bit);
3722 register_emitter(op_ia32_Mul, bemit_mul);
3723 register_emitter(op_ia32_Neg, bemit_neg);
3724 register_emitter(op_ia32_NegMem, bemit_negmem);
3725 register_emitter(op_ia32_Not, bemit_not);
3726 register_emitter(op_ia32_NotMem, bemit_notmem);
3727 register_emitter(op_ia32_Or, bemit_or);
3728 register_emitter(op_ia32_OrMem, bemit_ormem);
3729 register_emitter(op_ia32_OrMem8Bit, bemit_ormem8bit);
3730 register_emitter(op_ia32_Pop, bemit_pop);
3731 register_emitter(op_ia32_PopEbp, bemit_pop);
3732 register_emitter(op_ia32_PopMem, bemit_popmem);
3733 register_emitter(op_ia32_Push, bemit_push);
3734 register_emitter(op_ia32_RepPrefix, bemit_rep);
3735 register_emitter(op_ia32_Rol, bemit_rol);
3736 register_emitter(op_ia32_RolMem, bemit_rolmem);
3737 register_emitter(op_ia32_Ror, bemit_ror);
3738 register_emitter(op_ia32_RorMem, bemit_rormem);
3739 register_emitter(op_ia32_Sahf, bemit_sahf);
3740 register_emitter(op_ia32_Sar, bemit_sar);
3741 register_emitter(op_ia32_SarMem, bemit_sarmem);
3742 register_emitter(op_ia32_Sbb, bemit_sbb);
3743 register_emitter(op_ia32_Setcc, bemit_setcc);
3744 register_emitter(op_ia32_Shl, bemit_shl);
3745 register_emitter(op_ia32_ShlD, bemit_shld);
3746 register_emitter(op_ia32_ShlMem, bemit_shlmem);
3747 register_emitter(op_ia32_Shr, bemit_shr);
3748 register_emitter(op_ia32_ShrD, bemit_shrd);
3749 register_emitter(op_ia32_ShrMem, bemit_shrmem);
3750 register_emitter(op_ia32_Stc, bemit_stc);
3751 register_emitter(op_ia32_Store, bemit_store);
3752 register_emitter(op_ia32_Store8Bit, bemit_store);
3753 register_emitter(op_ia32_Sub, bemit_sub);
3754 register_emitter(op_ia32_SubMem, bemit_submem);
3755 register_emitter(op_ia32_SubMem8Bit, bemit_submem8bit);
3756 register_emitter(op_ia32_SubSP, bemit_subsp);
3757 register_emitter(op_ia32_SwitchJmp, bemit_switchjmp);
3758 register_emitter(op_ia32_Test, bemit_test);
3759 register_emitter(op_ia32_Test8Bit, bemit_test8bit);
3760 register_emitter(op_ia32_Xor, bemit_xor);
3761 register_emitter(op_ia32_Xor0, bemit_xor0);
3762 register_emitter(op_ia32_XorMem, bemit_xormem);
3763 register_emitter(op_ia32_XorMem8Bit, bemit_xormem8bit);
3764 register_emitter(op_ia32_fabs, bemit_fabs);
3765 register_emitter(op_ia32_fadd, bemit_fadd);
3766 register_emitter(op_ia32_faddp, bemit_faddp);
3767 register_emitter(op_ia32_fchs, bemit_fchs);
3768 register_emitter(op_ia32_fdiv, bemit_fdiv);
3769 register_emitter(op_ia32_fdivp, bemit_fdivp);
3770 register_emitter(op_ia32_fdivr, bemit_fdivr);
3771 register_emitter(op_ia32_fdivrp, bemit_fdivrp);
3772 register_emitter(op_ia32_fild, bemit_fild);
3773 register_emitter(op_ia32_fist, bemit_fist);
3774 register_emitter(op_ia32_fistp, bemit_fistp);
3775 register_emitter(op_ia32_fld, bemit_fld);
3776 register_emitter(op_ia32_fld1, bemit_fld1);
3777 register_emitter(op_ia32_fldz, bemit_fldz);
3778 register_emitter(op_ia32_fmul, bemit_fmul);
3779 register_emitter(op_ia32_fmulp, bemit_fmulp);
3780 register_emitter(op_ia32_fpop, bemit_fpop);
3781 register_emitter(op_ia32_fpush, bemit_fpush);
3782 register_emitter(op_ia32_fpushCopy, bemit_fpushcopy);
3783 register_emitter(op_ia32_fst, bemit_fst);
3784 register_emitter(op_ia32_fstp, bemit_fstp);
3785 register_emitter(op_ia32_fsub, bemit_fsub);
3786 register_emitter(op_ia32_fsubp, bemit_fsubp);
3787 register_emitter(op_ia32_fsubr, bemit_fsubr);
3788 register_emitter(op_ia32_fsubrp, bemit_fsubrp);
3789 register_emitter(op_ia32_fxch, bemit_fxch);
3791 /* ignore the following nodes */
3792 register_emitter(op_ia32_ProduceVal, emit_Nothing);
3793 register_emitter(op_be_Keep, emit_Nothing);
3794 register_emitter(op_be_Start, emit_Nothing);
3795 register_emitter(op_Phi, emit_Nothing);
3796 register_emitter(op_Start, emit_Nothing);
3799 static void gen_binary_block(ir_node *block)
3803 ia32_emit_block_header(block);
3805 /* emit the contents of the block */
3806 sched_foreach(block, node) {
3807 ia32_emit_node(node);
3811 void ia32_gen_binary_routine(ir_graph *irg)
3813 ir_entity *entity = get_irg_entity(irg);
3814 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
3815 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
3816 ir_node **blk_sched = irg_data->blk_sched;
3819 isa = (ia32_isa_t*) arch_env;
3821 ia32_register_binary_emitters();
3823 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
3825 /* we use links to point to target blocks */
3826 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
3827 irg_block_walk_graph(irg, ia32_gen_labels, NULL, NULL);
3829 /* initialize next block links */
3830 n = ARR_LEN(blk_sched);
3831 for (i = 0; i < n; ++i) {
3832 ir_node *block = blk_sched[i];
3833 ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
3835 set_irn_link(block, prev);
3838 for (i = 0; i < n; ++i) {
3839 ir_node *block = blk_sched[i];
3840 gen_binary_block(block);
3843 be_gas_emit_function_epilog(entity);
3845 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
3849 void ia32_init_emitter(void)
3851 lc_opt_entry_t *be_grp;
3852 lc_opt_entry_t *ia32_grp;
3854 be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
3855 ia32_grp = lc_opt_get_grp(be_grp, "ia32");
3857 lc_opt_add_table(ia32_grp, ia32_emitter_options);
3861 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.emitter");