2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the ia32 node emitter.
23 * @author Christian Wuerdig, Matthias Braun
25 * Summary table for x86 floatingpoint compares:
26 * (remember effect of unordered on x86: ZF=1, PF=1, CF=1)
34 * pnc_Leg => NP (ordered)
56 #include "iredges_t.h"
60 #include "raw_bitset.h"
69 #include "beemitter.h"
73 #include "ia32_emitter.h"
74 #include "ia32_common_transform.h"
75 #include "gen_ia32_emitter.h"
76 #include "gen_ia32_regalloc_if.h"
77 #include "ia32_nodes_attr.h"
78 #include "ia32_new_nodes.h"
79 #include "ia32_architecture.h"
80 #include "bearch_ia32_t.h"
82 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
84 static const ia32_isa_t *isa;
85 static char pic_base_label[128];
86 static ir_label_t exc_label_id;
87 static int mark_spill_reload = 0;
90 static bool sp_relative;
91 static int frame_type_size;
92 static int callframe_offset;
94 /** Return the next block in Block schedule */
95 static ir_node *get_prev_block_sched(const ir_node *block)
97 return (ir_node*)get_irn_link(block);
100 /** Checks if the current block is a fall-through target. */
101 static int is_fallthrough(const ir_node *cfgpred)
105 if (!is_Proj(cfgpred))
107 pred = get_Proj_pred(cfgpred);
108 if (is_ia32_SwitchJmp(pred))
115 * returns non-zero if the given block needs a label
116 * because of being a jump-target (and not a fall-through)
118 static int block_needs_label(const ir_node *block)
121 int n_cfgpreds = get_Block_n_cfgpreds(block);
123 if (get_Block_entity(block) != NULL)
126 if (n_cfgpreds == 0) {
128 } else if (n_cfgpreds == 1) {
129 ir_node *cfgpred = get_Block_cfgpred(block, 0);
130 ir_node *cfgpred_block = get_nodes_block(cfgpred);
132 if (get_prev_block_sched(block) == cfgpred_block
133 && is_fallthrough(cfgpred)) {
142 * Add a number to a prefix. This number will not be used a second time.
144 static char *get_unique_label(char *buf, size_t buflen, const char *prefix)
146 static unsigned long id = 0;
147 snprintf(buf, buflen, "%s%s%lu", be_gas_get_private_prefix(), prefix, ++id);
152 * Emit the name of the 8bit low register
154 static void emit_8bit_register(const arch_register_t *reg)
156 const char *reg_name = arch_register_get_name(reg);
157 assert(reg->index == REG_GP_EAX || reg->index == REG_GP_EBX
158 || reg->index == REG_GP_ECX || reg->index == REG_GP_EDX);
161 be_emit_char(reg_name[1]); /* get the basic name of the register */
166 * Emit the name of the 8bit high register
168 static void emit_8bit_register_high(const arch_register_t *reg)
170 const char *reg_name = arch_register_get_name(reg);
171 assert(reg->index == REG_GP_EAX || reg->index == REG_GP_EBX
172 || reg->index == REG_GP_ECX || reg->index == REG_GP_EDX);
175 be_emit_char(reg_name[1]); /* get the basic name of the register */
179 static void emit_16bit_register(const arch_register_t *reg)
181 const char *reg_name = arch_register_get_name(reg);
184 be_emit_string(reg_name+1); /* skip the 'e' prefix of the 32bit names */
188 * emit a register, possible shortened by a mode
190 * @param reg the register
191 * @param mode the mode of the register or NULL for full register
193 static void emit_register(const arch_register_t *reg, const ir_mode *mode)
195 const char *reg_name;
198 int size = get_mode_size_bits(mode);
200 case 8: emit_8bit_register(reg); return;
201 case 16: emit_16bit_register(reg); return;
203 assert(mode_is_float(mode) || size == 32);
206 reg_name = arch_register_get_name(reg);
209 be_emit_string(reg_name);
212 void ia32_emit_source_register(const ir_node *node, int pos)
214 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
216 emit_register(reg, NULL);
219 static void ia32_emit_entity(ir_entity *entity, int no_pic_adjust)
221 be_gas_emit_entity(entity);
223 if (get_entity_owner(entity) == get_tls_type()) {
224 if (!entity_has_definition(entity)) {
225 be_emit_cstring("@INDNTPOFF");
227 be_emit_cstring("@NTPOFF");
231 if (do_pic && !no_pic_adjust) {
233 be_emit_string(pic_base_label);
237 static void emit_ia32_Immediate_no_prefix(const ir_node *node)
239 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
241 if (attr->symconst != NULL) {
244 ia32_emit_entity(attr->symconst, attr->no_pic_adjust);
246 if (attr->symconst == NULL || attr->offset != 0) {
247 if (attr->symconst != NULL) {
248 be_emit_irprintf("%+d", attr->offset);
250 be_emit_irprintf("0x%X", attr->offset);
255 static void emit_ia32_Immediate(const ir_node *node)
258 emit_ia32_Immediate_no_prefix(node);
261 void ia32_emit_8bit_source_register_or_immediate(const ir_node *node, int pos)
263 const arch_register_t *reg;
264 const ir_node *in = get_irn_n(node, pos);
265 if (is_ia32_Immediate(in)) {
266 emit_ia32_Immediate(in);
270 reg = arch_get_irn_register_in(node, pos);
271 emit_8bit_register(reg);
274 void ia32_emit_8bit_high_source_register(const ir_node *node, int pos)
276 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
277 emit_8bit_register_high(reg);
280 void ia32_emit_16bit_source_register_or_immediate(const ir_node *node, int pos)
282 const arch_register_t *reg;
283 const ir_node *in = get_irn_n(node, pos);
284 if (is_ia32_Immediate(in)) {
285 emit_ia32_Immediate(in);
289 reg = arch_get_irn_register_in(node, pos);
290 emit_16bit_register(reg);
293 void ia32_emit_dest_register(const ir_node *node, int pos)
295 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
297 emit_register(reg, NULL);
300 void ia32_emit_dest_register_size(const ir_node *node, int pos)
302 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
304 emit_register(reg, get_ia32_ls_mode(node));
307 void ia32_emit_8bit_dest_register(const ir_node *node, int pos)
309 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
311 emit_register(reg, mode_Bu);
314 void ia32_emit_x87_register(const ir_node *node, int pos)
316 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
320 be_emit_string(attr->x87[pos]->name);
323 static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
325 assert(mode_is_int(mode) || mode_is_reference(mode));
326 switch (get_mode_size_bits(mode)) {
327 case 8: be_emit_char('b'); return;
328 case 16: be_emit_char('w'); return;
329 case 32: be_emit_char('l'); return;
330 /* gas docu says q is the suffix but gcc, objdump and icc use ll
332 case 64: be_emit_cstring("ll"); return;
334 panic("Can't output mode_suffix for %+F", mode);
337 void ia32_emit_mode_suffix(const ir_node *node)
339 ir_mode *mode = get_ia32_ls_mode(node);
343 ia32_emit_mode_suffix_mode(mode);
346 void ia32_emit_x87_mode_suffix(const ir_node *node)
350 /* we only need to emit the mode on address mode */
351 if (get_ia32_op_type(node) == ia32_Normal)
354 mode = get_ia32_ls_mode(node);
355 assert(mode != NULL);
357 if (mode_is_float(mode)) {
358 switch (get_mode_size_bits(mode)) {
359 case 32: be_emit_char('s'); return;
360 case 64: be_emit_char('l'); return;
361 /* long doubles have different sizes due to alignment on different
365 case 128: be_emit_char('t'); return;
368 assert(mode_is_int(mode) || mode_is_reference(mode));
369 switch (get_mode_size_bits(mode)) {
370 case 16: be_emit_char('s'); return;
371 case 32: be_emit_char('l'); return;
372 /* gas docu says q is the suffix but gcc, objdump and icc use ll
374 case 64: be_emit_cstring("ll"); return;
377 panic("Can't output mode_suffix for %+F", mode);
380 static char get_xmm_mode_suffix(ir_mode *mode)
382 assert(mode_is_float(mode));
383 switch (get_mode_size_bits(mode)) {
386 default: panic("Invalid XMM mode");
390 void ia32_emit_xmm_mode_suffix(const ir_node *node)
392 ir_mode *mode = get_ia32_ls_mode(node);
393 assert(mode != NULL);
395 be_emit_char(get_xmm_mode_suffix(mode));
398 void ia32_emit_xmm_mode_suffix_s(const ir_node *node)
400 ir_mode *mode = get_ia32_ls_mode(node);
401 assert(mode != NULL);
402 be_emit_char(get_xmm_mode_suffix(mode));
405 void ia32_emit_extend_suffix(const ir_node *node)
407 ir_mode *mode = get_ia32_ls_mode(node);
408 if (get_mode_size_bits(mode) == 32)
410 be_emit_char(mode_is_signed(mode) ? 's' : 'z');
411 ia32_emit_mode_suffix_mode(mode);
414 void ia32_emit_source_register_or_immediate(const ir_node *node, int pos)
416 ir_node *in = get_irn_n(node, pos);
417 if (is_ia32_Immediate(in)) {
418 emit_ia32_Immediate(in);
420 const ir_mode *mode = get_ia32_ls_mode(node);
421 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
422 emit_register(reg, mode);
427 * Returns the target block for a control flow node.
429 static ir_node *get_cfop_target_block(const ir_node *irn)
431 assert(get_irn_mode(irn) == mode_X);
432 return (ir_node*)get_irn_link(irn);
436 * Emits the target label for a control flow node.
438 static void ia32_emit_cfop_target(const ir_node *node)
440 ir_node *block = get_cfop_target_block(node);
441 be_gas_emit_block_name(block);
445 * Emit the suffix for a compare instruction.
447 static void ia32_emit_condition_code(ia32_condition_code_t cc)
450 case ia32_cc_overflow: be_emit_cstring("o"); return;
451 case ia32_cc_not_overflow: be_emit_cstring("no"); return;
452 case ia32_cc_float_below:
453 case ia32_cc_float_unordered_below:
454 case ia32_cc_below: be_emit_cstring("b"); return;
455 case ia32_cc_float_above_equal:
456 case ia32_cc_float_unordered_above_equal:
457 case ia32_cc_above_equal: be_emit_cstring("ae"); return;
458 case ia32_cc_float_equal:
459 case ia32_cc_equal: be_emit_cstring("e"); return;
460 case ia32_cc_float_not_equal:
461 case ia32_cc_not_equal: be_emit_cstring("ne"); return;
462 case ia32_cc_float_below_equal:
463 case ia32_cc_float_unordered_below_equal:
464 case ia32_cc_below_equal: be_emit_cstring("be"); return;
465 case ia32_cc_float_above:
466 case ia32_cc_float_unordered_above:
467 case ia32_cc_above: be_emit_cstring("a"); return;
468 case ia32_cc_sign: be_emit_cstring("s"); return;
469 case ia32_cc_not_sign: be_emit_cstring("ns"); return;
470 case ia32_cc_parity: be_emit_cstring("p"); return;
471 case ia32_cc_not_parity: be_emit_cstring("np"); return;
472 case ia32_cc_less: be_emit_cstring("l"); return;
473 case ia32_cc_greater_equal: be_emit_cstring("ge"); return;
474 case ia32_cc_less_equal: be_emit_cstring("le"); return;
475 case ia32_cc_greater: be_emit_cstring("g"); return;
476 case ia32_cc_float_parity_cases:
477 case ia32_cc_additional_float_cases:
480 panic("Invalid ia32 condition code");
483 typedef enum ia32_emit_mod_t {
485 EMIT_RESPECT_LS = 1U << 0,
486 EMIT_ALTERNATE_AM = 1U << 1,
488 EMIT_HIGH_REG = 1U << 3,
489 EMIT_LOW_REG = 1U << 4
491 ENUM_BITSET(ia32_emit_mod_t)
494 * Emits address mode.
496 void ia32_emit_am(const ir_node *node)
498 ir_entity *ent = get_ia32_am_sc(node);
499 int offs = get_ia32_am_offs_int(node);
500 ir_node *base = get_irn_n(node, n_ia32_base);
501 int has_base = !is_ia32_NoReg_GP(base);
502 ir_node *idx = get_irn_n(node, n_ia32_index);
503 int has_index = !is_ia32_NoReg_GP(idx);
505 /* just to be sure... */
506 assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
508 if (get_ia32_am_tls_segment(node))
509 be_emit_cstring("%gs:");
513 const ia32_attr_t *attr = get_ia32_attr_const(node);
514 if (is_ia32_am_sc_sign(node))
516 ia32_emit_entity(ent, attr->data.am_sc_no_pic_adjust);
519 /* also handle special case if nothing is set */
520 if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
522 be_emit_irprintf("%+d", offs);
524 be_emit_irprintf("%d", offs);
528 if (has_base || has_index) {
533 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_base);
534 emit_register(reg, NULL);
537 /* emit index + scale */
539 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_index);
542 emit_register(reg, NULL);
544 scale = get_ia32_am_scale(node);
546 be_emit_irprintf(",%d", 1 << scale);
554 * fmt parameter output
555 * ---- ---------------------- ---------------------------------------------
557 * %AM <node> address mode of the node
558 * %AR const arch_register_t* address mode of the node or register
559 * %ASx <node> address mode of the node or source register x
560 * %Dx <node> destination register x
561 * %I <node> immediate of the node
562 * %L <node> control flow target of the node
563 * %M <node> mode suffix of the node
564 * %P int condition code
565 * %R const arch_register_t* register
566 * %Sx <node> source register x
567 * %s const char* string
568 * %u unsigned int unsigned int
569 * %d signed int signed int
572 * # modifier for %ASx, %D, %R, and %S uses ls mode of node to alter register width
573 * * modifier does not prefix immediates with $, but AM with *
574 * l modifier for %lu and %ld
575 * > modifier to output high 8bit register (ah, bh)
576 * < modifier to output low 8bit register (al, bl)
578 static void ia32_emitf(const ir_node *node, const char *fmt, ...)
584 const char *start = fmt;
585 ia32_emit_mod_t mod = EMIT_NONE;
587 while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
590 be_emit_string_len(start, fmt - start);
594 be_emit_finish_line_gas(node);
607 case '*': mod |= EMIT_ALTERNATE_AM; break;
608 case '#': mod |= EMIT_RESPECT_LS; break;
609 case 'l': mod |= EMIT_LONG; break;
610 case '>': mod |= EMIT_HIGH_REG; break;
611 case '<': mod |= EMIT_LOW_REG; break;
620 arch_register_t const *reg;
631 if (mod & EMIT_ALTERNATE_AM)
637 reg = va_arg(ap, const arch_register_t*);
638 if (get_ia32_op_type(node) == ia32_AddrModeS) {
645 if (get_ia32_op_type(node) == ia32_AddrModeS) {
649 assert(get_ia32_op_type(node) == ia32_Normal);
653 default: goto unknown;
659 if (*fmt < '0' || '9' <= *fmt)
661 reg = arch_get_irn_register_out(node, *fmt++ - '0');
667 if (!(mod & EMIT_ALTERNATE_AM))
669 emit_ia32_Immediate_no_prefix(imm);
673 ia32_emit_cfop_target(node);
677 ia32_emit_mode_suffix_mode(get_ia32_ls_mode(node));
681 ia32_condition_code_t cc = va_arg(ap, ia32_condition_code_t);
682 ia32_emit_condition_code(cc);
687 reg = va_arg(ap, const arch_register_t*);
689 if (mod & EMIT_ALTERNATE_AM)
691 if (mod & EMIT_HIGH_REG) {
692 emit_8bit_register_high(reg);
693 } else if (mod & EMIT_LOW_REG) {
694 emit_8bit_register(reg);
696 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
704 if (*fmt < '0' || '9' <= *fmt)
708 imm = get_irn_n(node, pos);
709 if (is_ia32_Immediate(imm)) {
712 reg = arch_get_irn_register_in(node, pos);
718 const char *str = va_arg(ap, const char*);
724 if (mod & EMIT_LONG) {
725 unsigned long num = va_arg(ap, unsigned long);
726 be_emit_irprintf("%lu", num);
728 unsigned num = va_arg(ap, unsigned);
729 be_emit_irprintf("%u", num);
734 if (mod & EMIT_LONG) {
735 long num = va_arg(ap, long);
736 be_emit_irprintf("%ld", num);
738 int num = va_arg(ap, int);
739 be_emit_irprintf("%d", num);
745 panic("unknown format conversion in ia32_emitf()");
753 * Emits registers and/or address mode of a binary operation.
755 void ia32_emit_binop(const ir_node *node)
757 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
758 ia32_emitf(node, "%#S4, %#AS3");
760 ia32_emitf(node, "%#AS4, %#S3");
765 * Emits registers and/or address mode of a binary operation.
767 void ia32_emit_x87_binop(const ir_node *node)
769 switch (get_ia32_op_type(node)) {
772 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
773 const arch_register_t *in1 = x87_attr->x87[0];
774 const arch_register_t *in = x87_attr->x87[1];
775 const arch_register_t *out = x87_attr->x87[2];
779 } else if (out == in) {
784 be_emit_string(arch_register_get_name(in));
785 be_emit_cstring(", %");
786 be_emit_string(arch_register_get_name(out));
794 assert(0 && "unsupported op type");
799 * Emits registers and/or address mode of a unary operation.
801 void ia32_emit_unop(const ir_node *node, int pos)
805 ia32_emitf(node, fmt);
808 static void emit_ia32_IMul(const ir_node *node)
810 ir_node *left = get_irn_n(node, n_ia32_IMul_left);
811 const arch_register_t *out_reg = arch_get_irn_register_out(node, pn_ia32_IMul_res);
813 /* do we need the 3-address form? */
814 if (is_ia32_NoReg_GP(left) ||
815 arch_get_irn_register_in(node, n_ia32_IMul_left) != out_reg) {
816 ia32_emitf(node, "\timul%M %#S4, %#AS3, %#D0\n");
818 ia32_emitf(node, "\timul%M %#AS4, %#S3\n");
823 * walks up a tree of copies/perms/spills/reloads to find the original value
824 * that is moved around
826 static ir_node *find_original_value(ir_node *node)
828 if (irn_visited(node))
831 mark_irn_visited(node);
832 if (be_is_Copy(node)) {
833 return find_original_value(be_get_Copy_op(node));
834 } else if (be_is_CopyKeep(node)) {
835 return find_original_value(be_get_CopyKeep_op(node));
836 } else if (is_Proj(node)) {
837 ir_node *pred = get_Proj_pred(node);
838 if (be_is_Perm(pred)) {
839 return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
840 } else if (be_is_MemPerm(pred)) {
841 return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
842 } else if (is_ia32_Load(pred)) {
843 return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
844 } else if (is_ia32_Store(pred)) {
845 return find_original_value(get_irn_n(pred, n_ia32_Store_val));
849 } else if (is_Phi(node)) {
851 arity = get_irn_arity(node);
852 for (i = 0; i < arity; ++i) {
853 ir_node *in = get_irn_n(node, i);
854 ir_node *res = find_original_value(in);
865 static int determine_final_cc(const ir_node *node, int flags_pos, int cc)
867 ir_node *flags = get_irn_n(node, flags_pos);
868 const ia32_attr_t *flags_attr;
869 flags = skip_Proj(flags);
871 if (is_ia32_Sahf(flags)) {
872 ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
873 if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
874 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
875 inc_irg_visited(current_ir_graph);
876 cmp = find_original_value(cmp);
878 assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
879 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
882 flags_attr = get_ia32_attr_const(cmp);
884 flags_attr = get_ia32_attr_const(flags);
887 if (flags_attr->data.ins_permuted)
888 cc = ia32_invert_condition_code(cc);
892 void ia32_emit_cmp_suffix_node(const ir_node *node, int flags_pos)
894 ia32_condition_code_t cc = get_ia32_condcode(node);
895 cc = determine_final_cc(node, flags_pos, cc);
897 ia32_emit_condition_code(cc);
901 * Emits an exception label for a given node.
903 static void ia32_emit_exc_label(const ir_node *node)
905 be_emit_string(be_gas_insn_label_prefix());
906 be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
910 * Returns the Proj with projection number proj and NOT mode_M
912 static ir_node *get_proj(const ir_node *node, long proj)
914 const ir_edge_t *edge;
917 assert(get_irn_mode(node) == mode_T && "expected mode_T node");
919 foreach_out_edge(node, edge) {
920 src = get_edge_src_irn(edge);
922 assert(is_Proj(src) && "Proj expected");
923 if (get_irn_mode(src) == mode_M)
926 if (get_Proj_proj(src) == proj)
932 static int can_be_fallthrough(const ir_node *node)
934 ir_node *target_block = get_cfop_target_block(node);
935 ir_node *block = get_nodes_block(node);
936 return get_prev_block_sched(target_block) == block;
940 * Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
942 static void emit_ia32_Jcc(const ir_node *node)
944 int need_parity_label = 0;
945 ia32_condition_code_t cc = get_ia32_condcode(node);
946 const ir_node *proj_true;
947 const ir_node *proj_false;
949 cc = determine_final_cc(node, 0, cc);
952 proj_true = get_proj(node, pn_ia32_Jcc_true);
953 assert(proj_true && "Jcc without true Proj");
955 proj_false = get_proj(node, pn_ia32_Jcc_false);
956 assert(proj_false && "Jcc without false Proj");
958 if (can_be_fallthrough(proj_true)) {
959 /* exchange both proj's so the second one can be omitted */
960 const ir_node *t = proj_true;
962 proj_true = proj_false;
964 cc = ia32_negate_condition_code(cc);
967 if (cc & ia32_cc_float_parity_cases) {
968 /* Some floating point comparisons require a test of the parity flag,
969 * which indicates that the result is unordered */
970 if (cc & ia32_cc_negated) {
971 ia32_emitf(proj_true, "\tjp %L\n");
973 /* we need a local label if the false proj is a fallthrough
974 * as the falseblock might have no label emitted then */
975 if (can_be_fallthrough(proj_false)) {
976 need_parity_label = 1;
977 ia32_emitf(proj_false, "\tjp 1f\n");
979 ia32_emitf(proj_false, "\tjp %L\n");
983 ia32_emitf(proj_true, "\tj%P %L\n", cc);
984 if (need_parity_label) {
985 ia32_emitf(NULL, "1:\n");
988 /* the second Proj might be a fallthrough */
989 if (can_be_fallthrough(proj_false)) {
990 if (be_options.verbose_asm)
991 ia32_emitf(proj_false, "\t/* fallthrough to %L */\n");
993 ia32_emitf(proj_false, "\tjmp %L\n");
998 * Emits an ia32 Setcc. This is mostly easy but some floating point compares
1001 static void emit_ia32_Setcc(const ir_node *node)
1003 const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
1005 ia32_condition_code_t cc = get_ia32_condcode(node);
1006 cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
1007 if (cc & ia32_cc_float_parity_cases) {
1008 if (cc & ia32_cc_negated) {
1009 ia32_emitf(node, "\tset%P %<R\n", cc, dreg);
1010 ia32_emitf(node, "\tsetp %>R\n", dreg);
1011 ia32_emitf(node, "\torb %>R, %<R\n", dreg, dreg);
1013 ia32_emitf(node, "\tset%P %<R\n", cc, dreg);
1014 ia32_emitf(node, "\tsetnp %>R\n", dreg);
1015 ia32_emitf(node, "\tandb %>R, %<R\n", dreg, dreg);
1018 ia32_emitf(node, "\tset%P %#R\n", cc, dreg);
1022 static void emit_ia32_CMovcc(const ir_node *node)
1024 const ia32_attr_t *attr = get_ia32_attr_const(node);
1025 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
1026 ia32_condition_code_t cc = get_ia32_condcode(node);
1027 const arch_register_t *in_true;
1028 const arch_register_t *in_false;
1030 cc = determine_final_cc(node, n_ia32_CMovcc_eflags, cc);
1031 /* although you can't set ins_permuted in the constructor it might still
1032 * be set by memory operand folding
1033 * Permuting inputs of a cmov means the condition is negated!
1035 if (attr->data.ins_permuted)
1036 cc = ia32_negate_condition_code(cc);
1038 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
1039 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
1041 /* should be same constraint fullfilled? */
1042 if (out == in_false) {
1043 /* yes -> nothing to do */
1044 } else if (out == in_true) {
1045 const arch_register_t *tmp;
1047 assert(get_ia32_op_type(node) == ia32_Normal);
1049 cc = ia32_negate_condition_code(cc);
1056 ia32_emitf(node, "\tmovl %R, %R\n", in_false, out);
1059 if (cc & ia32_cc_float_parity_cases) {
1060 panic("CMov with floatingpoint compare/parity not supported yet");
1063 ia32_emitf(node, "\tcmov%P %#AR, %#R\n", cc, in_true, out);
1067 * Emits code for a SwitchJmp
1069 static void emit_ia32_SwitchJmp(const ir_node *node)
1071 ir_entity *jump_table = get_ia32_am_sc(node);
1072 const ir_switch_table *table = get_ia32_switch_table(node);
1074 ia32_emitf(node, "\tjmp %*AM\n");
1075 be_emit_jump_table(node, table, jump_table, get_cfop_target_block);
1079 * Emits code for a unconditional jump.
1081 static void emit_ia32_Jmp(const ir_node *node)
1083 /* we have a block schedule */
1084 if (can_be_fallthrough(node)) {
1085 if (be_options.verbose_asm)
1086 ia32_emitf(node, "\t/* fallthrough to %L */\n");
1088 ia32_emitf(node, "\tjmp %L\n");
1093 * Emit an inline assembler operand.
1095 * @param node the ia32_ASM node
1096 * @param s points to the operand (a %c)
1098 * @return pointer to the first char in s NOT in the current operand
1100 static const char* emit_asm_operand(const ir_node *node, const char *s)
1102 const ia32_attr_t *ia32_attr = get_ia32_attr_const(node);
1103 const ia32_asm_attr_t *attr = CONST_CAST_IA32_ATTR(ia32_asm_attr_t,
1105 const arch_register_t *reg;
1106 const ia32_asm_reg_t *asm_regs = attr->register_map;
1107 const ia32_asm_reg_t *asm_reg;
1116 /* parse modifiers */
1119 ir_fprintf(stderr, "Warning: asm text (%+F) ends with %%\n", node);
1144 "Warning: asm text (%+F) contains unknown modifier '%c' for asm op\n",
1151 if (sscanf(s, "%d%n", &num, &p) != 1) {
1152 ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
1159 if (num < 0 || ARR_LEN(asm_regs) <= (size_t)num) {
1161 "Error: Custom assembler references invalid input/output (%+F)\n",
1165 asm_reg = & asm_regs[num];
1166 assert(asm_reg->valid);
1169 if (asm_reg->use_input == 0) {
1170 reg = arch_get_irn_register_out(node, asm_reg->inout_pos);
1172 ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
1174 /* might be an immediate value */
1175 if (is_ia32_Immediate(pred)) {
1176 emit_ia32_Immediate(pred);
1179 reg = arch_get_irn_register_in(node, asm_reg->inout_pos);
1183 "Warning: no register assigned for %d asm op (%+F)\n",
1188 if (asm_reg->memory) {
1193 if (modifier != 0) {
1196 emit_8bit_register(reg);
1199 emit_8bit_register_high(reg);
1202 emit_16bit_register(reg);
1205 panic("Invalid asm op modifier");
1208 emit_register(reg, asm_reg->memory ? mode_Iu : asm_reg->mode);
1211 if (asm_reg->memory) {
1219 * Emits code for an ASM pseudo op.
1221 static void emit_ia32_Asm(const ir_node *node)
1223 const void *gen_attr = get_irn_generic_attr_const(node);
1224 const ia32_asm_attr_t *attr
1225 = CONST_CAST_IA32_ATTR(ia32_asm_attr_t, gen_attr);
1226 ident *asm_text = attr->asm_text;
1227 const char *s = get_id_str(asm_text);
1229 ia32_emitf(node, "#APP\t\n");
1236 s = emit_asm_operand(node, s);
1242 ia32_emitf(NULL, "\n#NO_APP\n");
1247 * Emit movsb/w instructions to make mov count divideable by 4
1249 static void emit_CopyB_prolog(unsigned size)
1252 ia32_emitf(NULL, "\tmovsb\n");
1254 ia32_emitf(NULL, "\tmovsw\n");
1258 * Emit rep movsd instruction for memcopy.
1260 static void emit_ia32_CopyB(const ir_node *node)
1262 unsigned size = get_ia32_copyb_size(node);
1264 emit_CopyB_prolog(size);
1265 ia32_emitf(node, "\trep movsd\n");
1269 * Emits unrolled memcopy.
1271 static void emit_ia32_CopyB_i(const ir_node *node)
1273 unsigned size = get_ia32_copyb_size(node);
1275 emit_CopyB_prolog(size);
1279 ia32_emitf(NULL, "\tmovsd\n");
1285 * Emit code for conversions (I, FP), (FP, I) and (FP, FP).
1287 static void emit_ia32_Conv_with_FP(const ir_node *node, const char* conv_f,
1290 ir_mode *ls_mode = get_ia32_ls_mode(node);
1291 int ls_bits = get_mode_size_bits(ls_mode);
1292 const char *conv = ls_bits == 32 ? conv_f : conv_d;
1294 ia32_emitf(node, "\tcvt%s %AS3, %D0\n", conv);
1297 static void emit_ia32_Conv_I2FP(const ir_node *node)
1299 emit_ia32_Conv_with_FP(node, "si2ss", "si2sd");
1302 static void emit_ia32_Conv_FP2I(const ir_node *node)
1304 emit_ia32_Conv_with_FP(node, "ss2si", "sd2si");
1307 static void emit_ia32_Conv_FP2FP(const ir_node *node)
1309 emit_ia32_Conv_with_FP(node, "sd2ss", "ss2sd");
1313 * Emits code for an Int conversion.
1315 static void emit_ia32_Conv_I2I(const ir_node *node)
1317 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1318 int signed_mode = mode_is_signed(smaller_mode);
1319 const char *sign_suffix;
1321 assert(!mode_is_float(smaller_mode));
1323 sign_suffix = signed_mode ? "s" : "z";
1324 ia32_emitf(node, "\tmov%s%Ml %#AS3, %D0\n", sign_suffix);
1330 static void emit_ia32_Call(const ir_node *node)
1332 /* Special case: Call must not have its immediates prefixed by $, instead
1333 * address mode is prefixed by *. */
1334 ia32_emitf(node, "\tcall %*AS3\n");
1339 * Emits code to increase stack pointer.
1341 static void emit_be_IncSP(const ir_node *node)
1343 int offs = be_get_IncSP_offset(node);
1349 ia32_emitf(node, "\tsubl $%u, %D0\n", offs);
1351 ia32_emitf(node, "\taddl $%u, %D0\n", -offs);
1356 * Emits code for Copy/CopyKeep.
1358 static void Copy_emitter(const ir_node *node, const ir_node *op)
1360 const arch_register_t *in = arch_get_irn_register(op);
1361 const arch_register_t *out = arch_get_irn_register(node);
1366 /* copies of vf nodes aren't real... */
1367 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
1370 ia32_emitf(node, "\tmovl %R, %R\n", in, out);
1373 static void emit_be_Copy(const ir_node *node)
1375 Copy_emitter(node, be_get_Copy_op(node));
1378 static void emit_be_CopyKeep(const ir_node *node)
1380 Copy_emitter(node, be_get_CopyKeep_op(node));
1384 * Emits code for exchange.
1386 static void emit_be_Perm(const ir_node *node)
1388 const arch_register_t *in0, *in1;
1389 const arch_register_class_t *cls0, *cls1;
1391 in0 = arch_get_irn_register(get_irn_n(node, 0));
1392 in1 = arch_get_irn_register(get_irn_n(node, 1));
1394 cls0 = arch_register_get_class(in0);
1395 cls1 = arch_register_get_class(in1);
1397 assert(cls0 == cls1 && "Register class mismatch at Perm");
1399 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
1400 ia32_emitf(node, "\txchg %R, %R\n", in1, in0);
1401 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
1402 ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
1403 ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
1404 ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
1405 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
1407 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
1410 panic("unexpected register class in be_Perm (%+F)", node);
1415 * Emits code for Constant loading.
1417 static void emit_ia32_Const(const ir_node *node)
1419 ia32_emitf(node, "\tmovl %I, %D0\n");
1422 /* helper function for emit_ia32_Minus64Bit */
1423 static void emit_mov(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1425 ia32_emitf(node, "\tmovl %R, %R\n", src, dst);
1428 /* helper function for emit_ia32_Minus64Bit */
1429 static void emit_neg(const ir_node* node, const arch_register_t *reg)
1431 ia32_emitf(node, "\tnegl %R\n", reg);
1434 /* helper function for emit_ia32_Minus64Bit */
1435 static void emit_sbb0(const ir_node* node, const arch_register_t *reg)
1437 ia32_emitf(node, "\tsbbl $0, %R\n", reg);
1440 /* helper function for emit_ia32_Minus64Bit */
1441 static void emit_sbb(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1443 ia32_emitf(node, "\tsbbl %R, %R\n", src, dst);
1446 /* helper function for emit_ia32_Minus64Bit */
1447 static void emit_xchg(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1449 ia32_emitf(node, "\txchgl %R, %R\n", src, dst);
1452 /* helper function for emit_ia32_Minus64Bit */
1453 static void emit_zero(const ir_node* node, const arch_register_t *reg)
1455 ia32_emitf(node, "\txorl %R, %R\n", reg, reg);
1458 static void emit_ia32_Minus64Bit(const ir_node *node)
1460 const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
1461 const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
1462 const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
1463 const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
1465 if (out_lo == in_lo) {
1466 if (out_hi != in_hi) {
1467 /* a -> a, b -> d */
1470 /* a -> a, b -> b */
1473 } else if (out_lo == in_hi) {
1474 if (out_hi == in_lo) {
1475 /* a -> b, b -> a */
1476 emit_xchg(node, in_lo, in_hi);
1479 /* a -> b, b -> d */
1480 emit_mov(node, in_hi, out_hi);
1481 emit_mov(node, in_lo, out_lo);
1485 if (out_hi == in_lo) {
1486 /* a -> c, b -> a */
1487 emit_mov(node, in_lo, out_lo);
1489 } else if (out_hi == in_hi) {
1490 /* a -> c, b -> b */
1491 emit_mov(node, in_lo, out_lo);
1494 /* a -> c, b -> d */
1495 emit_mov(node, in_lo, out_lo);
1501 emit_neg( node, out_hi);
1502 emit_neg( node, out_lo);
1503 emit_sbb0(node, out_hi);
1507 emit_zero(node, out_hi);
1508 emit_neg( node, out_lo);
1509 emit_sbb( node, in_hi, out_hi);
1512 static void emit_ia32_GetEIP(const ir_node *node)
1514 ia32_emitf(node, "\tcall %s\n", pic_base_label);
1515 ia32_emitf(NULL, "%s:\n", pic_base_label);
1516 ia32_emitf(node, "\tpopl %D0\n");
1519 static void emit_ia32_ClimbFrame(const ir_node *node)
1521 const ia32_climbframe_attr_t *attr = get_ia32_climbframe_attr_const(node);
1523 ia32_emitf(node, "\tmovl %S0, %D0\n");
1524 ia32_emitf(node, "\tmovl $%u, %S1\n", attr->count);
1525 be_gas_emit_block_name(node);
1526 be_emit_cstring(":\n");
1527 be_emit_write_line();
1528 ia32_emitf(node, "\tmovl (%D0), %D0\n");
1529 ia32_emitf(node, "\tdec %S1\n");
1530 be_emit_cstring("\tjnz ");
1531 be_gas_emit_block_name(node);
1532 be_emit_finish_line_gas(node);
1535 static void emit_be_Return(const ir_node *node)
1537 unsigned pop = be_Return_get_pop(node);
1539 if (pop > 0 || be_Return_get_emit_pop(node)) {
1540 ia32_emitf(node, "\tret $%u\n", pop);
1542 ia32_emitf(node, "\tret\n");
1546 static void emit_Nothing(const ir_node *node)
1553 * Enters the emitter functions for handled nodes into the generic
1554 * pointer of an opcode.
1556 static void ia32_register_emitters(void)
1558 #define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
1559 #define IA32_EMIT(a) IA32_EMIT2(a,a)
1560 #define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
1561 #define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
1562 #define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
1563 #define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
1565 /* first clear the generic function pointer for all ops */
1566 ir_clear_opcodes_generic_func();
1568 /* register all emitter functions defined in spec */
1569 ia32_register_spec_emitters();
1571 /* other ia32 emitter functions */
1572 IA32_EMIT2(Conv_I2I8Bit, Conv_I2I);
1577 IA32_EMIT(Conv_FP2FP);
1578 IA32_EMIT(Conv_FP2I);
1579 IA32_EMIT(Conv_I2FP);
1580 IA32_EMIT(Conv_I2I);
1587 IA32_EMIT(Minus64Bit);
1588 IA32_EMIT(SwitchJmp);
1589 IA32_EMIT(ClimbFrame);
1592 /* benode emitter */
1612 typedef void (*emit_func_ptr) (const ir_node *);
1615 * Assign and emit an exception label if the current instruction can fail.
1617 static void ia32_assign_exc_label(ir_node *node)
1619 /* assign a new ID to the instruction */
1620 set_ia32_exc_label_id(node, ++exc_label_id);
1622 ia32_emit_exc_label(node);
1624 be_emit_pad_comment();
1625 be_emit_cstring("/* exception to Block ");
1626 ia32_emit_cfop_target(node);
1627 be_emit_cstring(" */\n");
1628 be_emit_write_line();
1632 * Emits code for a node.
1634 static void ia32_emit_node(ir_node *node)
1636 ir_op *op = get_irn_op(node);
1638 DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
1640 if (is_ia32_irn(node)) {
1641 if (get_ia32_exc_label(node)) {
1642 /* emit the exception label of this instruction */
1643 ia32_assign_exc_label(node);
1645 if (mark_spill_reload) {
1646 if (is_ia32_is_spill(node)) {
1647 ia32_emitf(NULL, "\txchg %ebx, %ebx /* spill mark */\n");
1649 if (is_ia32_is_reload(node)) {
1650 ia32_emitf(NULL, "\txchg %edx, %edx /* reload mark */\n");
1652 if (is_ia32_is_remat(node)) {
1653 ia32_emitf(NULL, "\txchg %ecx, %ecx /* remat mark */\n");
1657 if (op->ops.generic) {
1658 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
1660 be_dwarf_location(get_irn_dbg_info(node));
1665 ir_fprintf(stderr, "Error: No emit handler for node %+F (%+G, graph %+F)\n", node, node, current_ir_graph);
1670 int sp_change = arch_get_sp_bias(node);
1671 if (sp_change != 0) {
1672 assert(sp_change != SP_BIAS_RESET);
1673 callframe_offset += sp_change;
1674 be_dwarf_callframe_offset(callframe_offset);
1680 * Emits gas alignment directives
1682 static void ia32_emit_alignment(unsigned align, unsigned skip)
1684 ia32_emitf(NULL, "\t.p2align %u,,%u\n", align, skip);
1688 * Emits gas alignment directives for Labels depended on cpu architecture.
1690 static void ia32_emit_align_label(void)
1692 unsigned align = ia32_cg_config.label_alignment;
1693 unsigned maximum_skip = ia32_cg_config.label_alignment_max_skip;
1694 ia32_emit_alignment(align, maximum_skip);
1698 * Test whether a block should be aligned.
1699 * For cpus in the P4/Athlon class it is useful to align jump labels to
1700 * 16 bytes. However we should only do that if the alignment nops before the
1701 * label aren't executed more often than we have jumps to the label.
1703 static int should_align_block(const ir_node *block)
1705 static const double DELTA = .0001;
1706 ir_graph *irg = get_irn_irg(block);
1707 ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
1708 ir_node *prev = get_prev_block_sched(block);
1710 double prev_freq = 0; /**< execfreq of the fallthrough block */
1711 double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
1714 if (exec_freq == NULL)
1716 if (ia32_cg_config.label_alignment_factor <= 0)
1719 block_freq = get_block_execfreq(exec_freq, block);
1720 if (block_freq < DELTA)
1723 n_cfgpreds = get_Block_n_cfgpreds(block);
1724 for (i = 0; i < n_cfgpreds; ++i) {
1725 const ir_node *pred = get_Block_cfgpred_block(block, i);
1726 double pred_freq = get_block_execfreq(exec_freq, pred);
1729 prev_freq += pred_freq;
1731 jmp_freq += pred_freq;
1735 if (prev_freq < DELTA && !(jmp_freq < DELTA))
1738 jmp_freq /= prev_freq;
1740 return jmp_freq > ia32_cg_config.label_alignment_factor;
1744 * Emit the block header for a block.
1746 * @param block the block
1747 * @param prev_block the previous block
1749 static void ia32_emit_block_header(ir_node *block)
1751 ir_graph *irg = current_ir_graph;
1752 int need_label = block_needs_label(block);
1754 if (block == get_irg_end_block(irg))
1757 if (ia32_cg_config.label_alignment > 0) {
1758 /* align the current block if:
1759 * a) if should be aligned due to its execution frequency
1760 * b) there is no fall-through here
1762 if (should_align_block(block)) {
1763 ia32_emit_align_label();
1765 /* if the predecessor block has no fall-through,
1766 we can always align the label. */
1768 int has_fallthrough = 0;
1770 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1771 ir_node *cfg_pred = get_Block_cfgpred(block, i);
1772 if (can_be_fallthrough(cfg_pred)) {
1773 has_fallthrough = 1;
1778 if (!has_fallthrough)
1779 ia32_emit_align_label();
1783 be_gas_begin_block(block, need_label);
1787 * Walks over the nodes in a block connected by scheduling edges
1788 * and emits code for each node.
1790 static void ia32_gen_block(ir_node *block)
1794 ia32_emit_block_header(block);
1797 ir_graph *irg = get_irn_irg(block);
1798 callframe_offset = 4; /* 4 bytes for the return address */
1799 /* ESP guessing, TODO perform a real ESP simulation */
1800 if (block != get_irg_start_block(irg)) {
1801 callframe_offset += frame_type_size;
1803 be_dwarf_callframe_offset(callframe_offset);
1806 /* emit the contents of the block */
1807 be_dwarf_location(get_irn_dbg_info(block));
1808 sched_foreach(block, node) {
1809 ia32_emit_node(node);
1813 typedef struct exc_entry {
1814 ir_node *exc_instr; /** The instruction that can issue an exception. */
1815 ir_node *block; /** The block to call then. */
1820 * Sets labels for control flow nodes (jump target).
1821 * Links control predecessors to there destination blocks.
1823 static void ia32_gen_labels(ir_node *block, void *data)
1825 exc_entry **exc_list = (exc_entry**)data;
1829 for (n = get_Block_n_cfgpreds(block) - 1; n >= 0; --n) {
1830 pred = get_Block_cfgpred(block, n);
1831 set_irn_link(pred, block);
1833 pred = skip_Proj(pred);
1834 if (is_ia32_irn(pred) && get_ia32_exc_label(pred)) {
1839 ARR_APP1(exc_entry, *exc_list, e);
1840 set_irn_link(pred, block);
1846 * Compare two exception_entries.
1848 static int cmp_exc_entry(const void *a, const void *b)
1850 const exc_entry *ea = (const exc_entry*)a;
1851 const exc_entry *eb = (const exc_entry*)b;
1853 if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
1858 static parameter_dbg_info_t *construct_parameter_infos(ir_graph *irg)
1860 ir_entity *entity = get_irg_entity(irg);
1861 ir_type *type = get_entity_type(entity);
1862 size_t n_params = get_method_n_params(type);
1863 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1864 ir_type *arg_type = layout->arg_type;
1865 size_t n_members = get_compound_n_members(arg_type);
1866 parameter_dbg_info_t *infos = XMALLOCNZ(parameter_dbg_info_t, n_params);
1869 for (i = 0; i < n_members; ++i) {
1870 ir_entity *member = get_compound_member(arg_type, i);
1872 if (!is_parameter_entity(member))
1874 param = get_entity_parameter_number(member);
1875 if (param == IR_VA_START_PARAMETER_NUMBER)
1877 assert(infos[param].entity == NULL && infos[param].reg == NULL);
1878 infos[param].reg = NULL;
1879 infos[param].entity = member;
1886 * Main driver. Emits the code for one routine.
1888 void ia32_gen_routine(ir_graph *irg)
1890 ir_entity *entity = get_irg_entity(irg);
1891 exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
1892 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1893 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1894 ir_node **blk_sched = irg_data->blk_sched;
1895 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1896 parameter_dbg_info_t *infos;
1899 isa = (ia32_isa_t*) arch_env;
1900 do_pic = be_options.pic;
1902 be_gas_elf_type_char = '@';
1904 ia32_register_emitters();
1906 get_unique_label(pic_base_label, sizeof(pic_base_label), "PIC_BASE");
1908 infos = construct_parameter_infos(irg);
1909 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment,
1913 sp_relative = layout->sp_relative;
1914 if (layout->sp_relative) {
1915 ir_type *frame_type = get_irg_frame_type(irg);
1916 frame_type_size = get_type_size_bytes(frame_type);
1917 be_dwarf_callframe_register(&ia32_registers[REG_ESP]);
1919 /* well not entirely correct here, we should emit this after the
1920 * "movl esp, ebp" */
1921 be_dwarf_callframe_register(&ia32_registers[REG_EBP]);
1922 /* TODO: do not hardcode the following */
1923 be_dwarf_callframe_offset(8);
1924 be_dwarf_callframe_spilloffset(&ia32_registers[REG_EBP], -8);
1927 /* we use links to point to target blocks */
1928 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1929 irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
1931 /* initialize next block links */
1932 n = ARR_LEN(blk_sched);
1933 for (i = 0; i < n; ++i) {
1934 ir_node *block = blk_sched[i];
1935 ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
1937 set_irn_link(block, prev);
1940 for (i = 0; i < n; ++i) {
1941 ir_node *block = blk_sched[i];
1943 ia32_gen_block(block);
1946 be_gas_emit_function_epilog(entity);
1948 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1950 /* Sort the exception table using the exception label id's.
1951 Those are ascending with ascending addresses. */
1952 qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
1956 for (e = 0; e < ARR_LEN(exc_list); ++e) {
1957 be_emit_cstring("\t.long ");
1958 ia32_emit_exc_label(exc_list[e].exc_instr);
1960 be_emit_cstring("\t.long ");
1961 be_gas_emit_block_name(exc_list[e].block);
1965 DEL_ARR_F(exc_list);
1968 static const lc_opt_table_entry_t ia32_emitter_options[] = {
1969 LC_OPT_ENT_BOOL("mark_spill_reload", "mark spills and reloads with ud opcodes", &mark_spill_reload),
1973 /* ==== Experimental binary emitter ==== */
1975 static unsigned char reg_gp_map[N_ia32_gp_REGS];
1976 //static unsigned char reg_mmx_map[N_ia32_mmx_REGS];
1977 //static unsigned char reg_sse_map[N_ia32_xmm_REGS];
1979 static void build_reg_map(void)
1981 reg_gp_map[REG_GP_EAX] = 0x0;
1982 reg_gp_map[REG_GP_ECX] = 0x1;
1983 reg_gp_map[REG_GP_EDX] = 0x2;
1984 reg_gp_map[REG_GP_EBX] = 0x3;
1985 reg_gp_map[REG_GP_ESP] = 0x4;
1986 reg_gp_map[REG_GP_EBP] = 0x5;
1987 reg_gp_map[REG_GP_ESI] = 0x6;
1988 reg_gp_map[REG_GP_EDI] = 0x7;
1991 /** Returns the encoding for a pnc field. */
1992 static unsigned char pnc2cc(ia32_condition_code_t cc)
1997 /** Sign extension bit values for binops */
1999 UNSIGNED_IMM = 0, /**< unsigned immediate */
2000 SIGNEXT_IMM = 2, /**< sign extended immediate */
2003 /** The mod encoding of the ModR/M */
2005 MOD_IND = 0x00, /**< [reg1] */
2006 MOD_IND_BYTE_OFS = 0x40, /**< [reg1 + byte ofs] */
2007 MOD_IND_WORD_OFS = 0x80, /**< [reg1 + word ofs] */
2008 MOD_REG = 0xC0 /**< reg1 */
2011 /** create R/M encoding for ModR/M */
2012 #define ENC_RM(x) (x)
2013 /** create REG encoding for ModR/M */
2014 #define ENC_REG(x) ((x) << 3)
2016 /** create encoding for a SIB byte */
2017 #define ENC_SIB(scale, index, base) ((scale) << 6 | (index) << 3 | (base))
2019 /* Node: The following routines are supposed to append bytes, words, dwords
2020 to the output stream.
2021 Currently the implementation is stupid in that it still creates output
2022 for an "assembler" in the form of .byte, .long
2023 We will change this when enough infrastructure is there to create complete
2024 machine code in memory/object files */
2026 static void bemit8(const unsigned char byte)
2028 be_emit_irprintf("\t.byte 0x%x\n", byte);
2029 be_emit_write_line();
2032 static void bemit16(const unsigned short u16)
2034 be_emit_irprintf("\t.word 0x%x\n", u16);
2035 be_emit_write_line();
2038 static void bemit32(const unsigned u32)
2040 be_emit_irprintf("\t.long 0x%x\n", u32);
2041 be_emit_write_line();
2045 * Emit address of an entity. If @p is_relative is true then a relative
2046 * offset from behind the address to the entity is created.
2048 static void bemit_entity(ir_entity *entity, bool entity_sign, int offset,
2051 if (entity == NULL) {
2056 /* the final version should remember the position in the bytestream
2057 and patch it with the correct address at linktime... */
2058 be_emit_cstring("\t.long ");
2061 be_gas_emit_entity(entity);
2063 if (get_entity_owner(entity) == get_tls_type()) {
2064 if (!entity_has_definition(entity)) {
2065 be_emit_cstring("@INDNTPOFF");
2067 be_emit_cstring("@NTPOFF");
2072 be_emit_cstring("-.");
2077 be_emit_irprintf("%+d", offset);
2080 be_emit_write_line();
2083 static void bemit_jmp_destination(const ir_node *dest_block)
2085 be_emit_cstring("\t.long ");
2086 be_gas_emit_block_name(dest_block);
2087 be_emit_cstring(" - . - 4\n");
2088 be_emit_write_line();
2091 /* end emit routines, all emitters following here should only use the functions
2094 typedef enum reg_modifier {
2099 /** Create a ModR/M byte for src1,src2 registers */
2100 static void bemit_modrr(const arch_register_t *src1,
2101 const arch_register_t *src2)
2103 unsigned char modrm = MOD_REG;
2104 modrm |= ENC_RM(reg_gp_map[src1->index]);
2105 modrm |= ENC_REG(reg_gp_map[src2->index]);
2109 /** Create a ModR/M8 byte for src1,src2 registers */
2110 static void bemit_modrr8(reg_modifier_t high_part1, const arch_register_t *src1,
2111 reg_modifier_t high_part2, const arch_register_t *src2)
2113 unsigned char modrm = MOD_REG;
2114 modrm |= ENC_RM(reg_gp_map[src1->index] + (high_part1 == REG_HIGH ? 4 : 0));
2115 modrm |= ENC_REG(reg_gp_map[src2->index] + (high_part2 == REG_HIGH ? 4 : 0));
2119 /** Create a ModR/M byte for one register and extension */
2120 static void bemit_modru(const arch_register_t *reg, unsigned ext)
2122 unsigned char modrm = MOD_REG;
2124 modrm |= ENC_RM(reg_gp_map[reg->index]);
2125 modrm |= ENC_REG(ext);
2129 /** Create a ModR/M8 byte for one register */
2130 static void bemit_modrm8(reg_modifier_t high_part, const arch_register_t *reg)
2132 unsigned char modrm = MOD_REG;
2133 assert(reg_gp_map[reg->index] < 4);
2134 modrm |= ENC_RM(reg_gp_map[reg->index] + (high_part == REG_HIGH ? 4 : 0));
2140 * Calculate the size of an signed immediate in bytes.
2142 * @param offset an offset
2144 static unsigned get_signed_imm_size(int offset)
2146 if (-128 <= offset && offset < 128) {
2148 } else if (-32768 <= offset && offset < 32768) {
2156 * Emit an address mode.
2158 * @param reg content of the reg field: either a register index or an opcode extension
2159 * @param node the node
2161 static void bemit_mod_am(unsigned reg, const ir_node *node)
2163 ir_entity *ent = get_ia32_am_sc(node);
2164 int offs = get_ia32_am_offs_int(node);
2165 ir_node *base = get_irn_n(node, n_ia32_base);
2166 int has_base = !is_ia32_NoReg_GP(base);
2167 ir_node *idx = get_irn_n(node, n_ia32_index);
2168 int has_index = !is_ia32_NoReg_GP(idx);
2171 unsigned emitoffs = 0;
2172 bool emitsib = false;
2175 /* set the mod part depending on displacement */
2177 modrm |= MOD_IND_WORD_OFS;
2179 } else if (offs == 0) {
2182 } else if (-128 <= offs && offs < 128) {
2183 modrm |= MOD_IND_BYTE_OFS;
2186 modrm |= MOD_IND_WORD_OFS;
2191 const arch_register_t *base_reg = arch_get_irn_register(base);
2192 base_enc = reg_gp_map[base_reg->index];
2194 /* Use the EBP encoding + MOD_IND if NO base register. There is
2195 * always a 32bit offset present in this case. */
2201 /* Determine if we need a SIB byte. */
2203 const arch_register_t *reg_index = arch_get_irn_register(idx);
2204 int scale = get_ia32_am_scale(node);
2206 /* R/M set to ESP means SIB in 32bit mode. */
2207 modrm |= ENC_RM(0x04);
2208 sib = ENC_SIB(scale, reg_gp_map[reg_index->index], base_enc);
2210 } else if (base_enc == 0x04) {
2211 /* for the above reason we are forced to emit a SIB when base is ESP.
2212 * Only the base is used, index must be ESP too, which means no index.
2214 modrm |= ENC_RM(0x04);
2215 sib = ENC_SIB(0, 0x04, 0x04);
2218 modrm |= ENC_RM(base_enc);
2221 /* We are forced to emit an 8bit offset as EBP base without offset is a
2222 * special case for SIB without base register. */
2223 if (base_enc == 0x05 && emitoffs == 0) {
2224 modrm |= MOD_IND_BYTE_OFS;
2228 modrm |= ENC_REG(reg);
2234 /* emit displacement */
2235 if (emitoffs == 8) {
2236 bemit8((unsigned) offs);
2237 } else if (emitoffs == 32) {
2238 bemit_entity(ent, is_ia32_am_sc_sign(node), offs, false);
2243 * Emit a binop with a immediate operand.
2245 * @param node the node to emit
2246 * @param opcode_eax the opcode for the op eax, imm variant
2247 * @param opcode the opcode for the reg, imm variant
2248 * @param ruval the opcode extension for opcode
2250 static void bemit_binop_with_imm(
2251 const ir_node *node,
2252 unsigned char opcode_ax,
2253 unsigned char opcode, unsigned char ruval)
2255 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
2256 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
2257 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
2260 /* Some instructions (test) have no short form with 32bit value + 8bit
2262 if (attr->symconst != NULL || opcode & SIGNEXT_IMM) {
2265 /* check for sign extension */
2266 size = get_signed_imm_size(attr->offset);
2271 bemit8(opcode | SIGNEXT_IMM);
2272 /* cmp has this special mode */
2273 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2274 bemit_mod_am(ruval, node);
2276 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2277 bemit_modru(reg, ruval);
2279 bemit8((unsigned char)attr->offset);
2283 /* check for eax variant: this variant is shorter for 32bit immediates only */
2284 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2286 bemit_mod_am(ruval, node);
2288 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2289 if (reg->index == REG_GP_EAX) {
2293 bemit_modru(reg, ruval);
2296 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
2299 panic("invalid imm size?!?");
2305 static void bemit_binop_2(const ir_node *node, unsigned code)
2307 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
2309 if (get_ia32_op_type(node) == ia32_Normal) {
2310 const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
2311 bemit_modrr(op2, out);
2313 bemit_mod_am(reg_gp_map[out->index], node);
2320 static void bemit_binop(const ir_node *node, const unsigned char opcodes[4])
2322 ir_node *right = get_irn_n(node, n_ia32_binary_right);
2323 if (is_ia32_Immediate(right)) {
2324 bemit_binop_with_imm(node, opcodes[1], opcodes[2], opcodes[3]);
2326 bemit_binop_2(node, opcodes[0]);
2333 static void bemit_unop(const ir_node *node, unsigned char code, unsigned char ext, int input)
2336 if (get_ia32_op_type(node) == ia32_Normal) {
2337 const arch_register_t *in = arch_get_irn_register_in(node, input);
2338 bemit_modru(in, ext);
2340 bemit_mod_am(ext, node);
2344 static void bemit_unop_reg(const ir_node *node, unsigned char code, int input)
2346 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2347 bemit_unop(node, code, reg_gp_map[out->index], input);
2350 static void bemit_unop_mem(const ir_node *node, unsigned char code, unsigned char ext)
2352 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
2355 bemit8(size == 8 ? code : code + 1);
2356 bemit_mod_am(ext, node);
2359 static void bemit_immediate(const ir_node *node, bool relative)
2361 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
2362 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, relative);
2365 static void bemit_copy(const ir_node *copy)
2367 const arch_register_t *in = arch_get_irn_register_in(copy, 0);
2368 const arch_register_t *out = arch_get_irn_register_out(copy, 0);
2372 /* copies of vf nodes aren't real... */
2373 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
2376 assert(arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_gp]);
2378 bemit_modrr(in, out);
2381 static void bemit_perm(const ir_node *node)
2383 const arch_register_t *in0 = arch_get_irn_register(get_irn_n(node, 0));
2384 const arch_register_t *in1 = arch_get_irn_register(get_irn_n(node, 1));
2385 const arch_register_class_t *cls0 = arch_register_get_class(in0);
2387 assert(cls0 == arch_register_get_class(in1) && "Register class mismatch at Perm");
2389 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
2390 if (in0->index == REG_GP_EAX) {
2391 bemit8(0x90 + reg_gp_map[in1->index]);
2392 } else if (in1->index == REG_GP_EAX) {
2393 bemit8(0x90 + reg_gp_map[in0->index]);
2396 bemit_modrr(in0, in1);
2398 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
2399 panic("unimplemented"); // TODO implement
2400 //ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
2401 //ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
2402 //ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
2403 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
2405 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
2408 panic("unexpected register class in be_Perm (%+F)", node);
2412 static void bemit_xor0(const ir_node *node)
2414 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2416 bemit_modrr(out, out);
2419 static void bemit_mov_const(const ir_node *node)
2421 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2422 bemit8(0xB8 + reg_gp_map[out->index]);
2423 bemit_immediate(node, false);
2427 * Creates a function for a Binop with 3 possible encodings.
2429 #define BINOP(op, op0, op1, op2, op2_ext) \
2430 static void bemit_ ## op(const ir_node *node) { \
2431 static const unsigned char op ## _codes[] = {op0, op1, op2, op2_ext}; \
2432 bemit_binop(node, op ## _codes); \
2435 /* insn def eax,imm imm */
2436 BINOP(add, 0x03, 0x05, 0x81, 0)
2437 BINOP(or, 0x0B, 0x0D, 0x81, 1)
2438 BINOP(adc, 0x13, 0x15, 0x81, 2)
2439 BINOP(sbb, 0x1B, 0x1D, 0x81, 3)
2440 BINOP(and, 0x23, 0x25, 0x81, 4)
2441 BINOP(sub, 0x2B, 0x2D, 0x81, 5)
2442 BINOP(xor, 0x33, 0x35, 0x81, 6)
2443 BINOP(test, 0x85, 0xA9, 0xF7, 0)
2445 #define BINOPMEM(op, ext) \
2446 static void bemit_##op(const ir_node *node) \
2449 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2452 val = get_irn_n(node, n_ia32_unary_op); \
2453 if (is_ia32_Immediate(val)) { \
2454 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(val); \
2455 int offset = attr->offset; \
2456 if (attr->symconst == NULL && get_signed_imm_size(offset) == 1) { \
2458 bemit_mod_am(ext, node); \
2462 bemit_mod_am(ext, node); \
2466 bemit_entity(attr->symconst, attr->sc_sign, offset, false); \
2470 bemit8(ext << 3 | 1); \
2471 bemit_mod_am(reg_gp_map[arch_get_irn_register_out(val, 0)->index], node); \
2475 static void bemit_##op##8bit(const ir_node *node) \
2477 ir_node *val = get_irn_n(node, n_ia32_unary_op); \
2478 if (is_ia32_Immediate(val)) { \
2480 bemit_mod_am(ext, node); \
2481 bemit8(get_ia32_immediate_attr_const(val)->offset); \
2484 bemit_mod_am(reg_gp_map[arch_get_irn_register_out(val, 0)->index], node); \
2496 * Creates a function for an Unop with code /ext encoding.
2498 #define UNOP(op, code, ext, input) \
2499 static void bemit_ ## op(const ir_node *node) { \
2500 bemit_unop(node, code, ext, input); \
2503 UNOP(not, 0xF7, 2, n_ia32_Not_val)
2504 UNOP(neg, 0xF7, 3, n_ia32_Neg_val)
2505 UNOP(mul, 0xF7, 4, n_ia32_Mul_right)
2506 UNOP(imul1op, 0xF7, 5, n_ia32_IMul1OP_right)
2507 UNOP(div, 0xF7, 6, n_ia32_Div_divisor)
2508 UNOP(idiv, 0xF7, 7, n_ia32_IDiv_divisor)
2510 /* TODO: am support for IJmp */
2511 UNOP(ijmp, 0xFF, 4, n_ia32_IJmp_target)
2513 #define SHIFT(op, ext) \
2514 static void bemit_##op(const ir_node *node) \
2516 const arch_register_t *out = arch_get_irn_register_out(node, 0); \
2517 ir_node *count = get_irn_n(node, 1); \
2518 if (is_ia32_Immediate(count)) { \
2519 int offset = get_ia32_immediate_attr_const(count)->offset; \
2520 if (offset == 1) { \
2522 bemit_modru(out, ext); \
2525 bemit_modru(out, ext); \
2530 bemit_modru(out, ext); \
2534 static void bemit_##op##mem(const ir_node *node) \
2537 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2540 count = get_irn_n(node, 1); \
2541 if (is_ia32_Immediate(count)) { \
2542 int offset = get_ia32_immediate_attr_const(count)->offset; \
2543 if (offset == 1) { \
2544 bemit8(size == 8 ? 0xD0 : 0xD1); \
2545 bemit_mod_am(ext, node); \
2547 bemit8(size == 8 ? 0xC0 : 0xC1); \
2548 bemit_mod_am(ext, node); \
2552 bemit8(size == 8 ? 0xD2 : 0xD3); \
2553 bemit_mod_am(ext, node); \
2563 static void bemit_shld(const ir_node *node)
2565 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShlD_val_low);
2566 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShlD_res);
2567 ir_node *count = get_irn_n(node, n_ia32_ShlD_count);
2569 if (is_ia32_Immediate(count)) {
2571 bemit_modrr(out, in);
2572 bemit8(get_ia32_immediate_attr_const(count)->offset);
2575 bemit_modrr(out, in);
2579 static void bemit_shrd(const ir_node *node)
2581 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShrD_val_low);
2582 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShrD_res);
2583 ir_node *count = get_irn_n(node, n_ia32_ShrD_count);
2585 if (is_ia32_Immediate(count)) {
2587 bemit_modrr(out, in);
2588 bemit8(get_ia32_immediate_attr_const(count)->offset);
2591 bemit_modrr(out, in);
2596 * binary emitter for setcc.
2598 static void bemit_setcc(const ir_node *node)
2600 const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
2602 ia32_condition_code_t cc = get_ia32_condcode(node);
2603 cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
2604 if (cc & ia32_cc_float_parity_cases) {
2605 if (cc & ia32_cc_negated) {
2608 bemit8(0x90 | pnc2cc(cc));
2609 bemit_modrm8(REG_LOW, dreg);
2614 bemit_modrm8(REG_HIGH, dreg);
2616 /* orb %>dreg, %<dreg */
2618 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
2622 bemit8(0x90 | pnc2cc(cc));
2623 bemit_modrm8(REG_LOW, dreg);
2628 bemit_modrm8(REG_HIGH, dreg);
2630 /* andb %>dreg, %<dreg */
2632 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
2637 bemit8(0x90 | pnc2cc(cc));
2638 bemit_modrm8(REG_LOW, dreg);
2642 static void bemit_cmovcc(const ir_node *node)
2644 const ia32_attr_t *attr = get_ia32_attr_const(node);
2645 int ins_permuted = attr->data.ins_permuted;
2646 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
2647 ia32_condition_code_t cc = get_ia32_condcode(node);
2648 const arch_register_t *in_true;
2649 const arch_register_t *in_false;
2651 cc = determine_final_cc(node, n_ia32_CMovcc_eflags, cc);
2653 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
2654 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
2656 /* should be same constraint fullfilled? */
2657 if (out == in_false) {
2658 /* yes -> nothing to do */
2659 } else if (out == in_true) {
2660 assert(get_ia32_op_type(node) == ia32_Normal);
2661 ins_permuted = !ins_permuted;
2665 bemit8(0x8B); // mov %in_false, %out
2666 bemit_modrr(in_false, out);
2670 cc = ia32_negate_condition_code(cc);
2672 if (cc & ia32_cc_float_parity_cases)
2673 panic("cmov can't handle parity float cases");
2676 bemit8(0x40 | pnc2cc(cc));
2677 if (get_ia32_op_type(node) == ia32_Normal) {
2678 bemit_modrr(in_true, out);
2680 bemit_mod_am(reg_gp_map[out->index], node);
2684 static void bemit_cmp(const ir_node *node)
2686 unsigned ls_size = get_mode_size_bits(get_ia32_ls_mode(node));
2692 right = get_irn_n(node, n_ia32_binary_right);
2693 if (is_ia32_Immediate(right)) {
2694 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
2695 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
2696 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
2699 if (attr->symconst != NULL) {
2702 /* check for sign extension */
2703 size = get_signed_imm_size(attr->offset);
2708 bemit8(0x81 | SIGNEXT_IMM);
2709 /* cmp has this special mode */
2710 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2711 bemit_mod_am(7, node);
2713 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2714 bemit_modru(reg, 7);
2716 bemit8((unsigned char)attr->offset);
2720 /* check for eax variant: this variant is shorter for 32bit immediates only */
2721 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2723 bemit_mod_am(7, node);
2725 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
2726 if (reg->index == REG_GP_EAX) {
2730 bemit_modru(reg, 7);
2733 if (ls_size == 16) {
2734 bemit16(attr->offset);
2736 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
2740 panic("invalid imm size?!?");
2742 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
2744 if (get_ia32_op_type(node) == ia32_Normal) {
2745 const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
2746 bemit_modrr(op2, out);
2748 bemit_mod_am(reg_gp_map[out->index], node);
2753 static void bemit_cmp8bit(const ir_node *node)
2755 ir_node *right = get_irn_n(node, n_ia32_binary_right);
2756 if (is_ia32_Immediate(right)) {
2757 if (get_ia32_op_type(node) == ia32_Normal) {
2758 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
2759 if (out->index == REG_GP_EAX) {
2763 bemit_modru(out, 7);
2767 bemit_mod_am(7, node);
2769 bemit8(get_ia32_immediate_attr_const(right)->offset);
2771 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
2773 if (get_ia32_op_type(node) == ia32_Normal) {
2774 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Cmp_right);
2775 bemit_modrr(out, in);
2777 bemit_mod_am(reg_gp_map[out->index], node);
2782 static void bemit_test8bit(const ir_node *node)
2784 ir_node *right = get_irn_n(node, n_ia32_Test8Bit_right);
2785 if (is_ia32_Immediate(right)) {
2786 if (get_ia32_op_type(node) == ia32_Normal) {
2787 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
2788 if (out->index == REG_GP_EAX) {
2792 bemit_modru(out, 0);
2796 bemit_mod_am(0, node);
2798 bemit8(get_ia32_immediate_attr_const(right)->offset);
2800 const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
2802 if (get_ia32_op_type(node) == ia32_Normal) {
2803 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Test8Bit_right);
2804 bemit_modrr(out, in);
2806 bemit_mod_am(reg_gp_map[out->index], node);
2811 static void bemit_imul(const ir_node *node)
2813 ir_node *right = get_irn_n(node, n_ia32_IMul_right);
2814 /* Do we need the immediate form? */
2815 if (is_ia32_Immediate(right)) {
2816 int imm = get_ia32_immediate_attr_const(right)->offset;
2817 if (get_signed_imm_size(imm) == 1) {
2818 bemit_unop_reg(node, 0x6B, n_ia32_IMul_left);
2821 bemit_unop_reg(node, 0x69, n_ia32_IMul_left);
2826 bemit_unop_reg(node, 0xAF, n_ia32_IMul_right);
2830 static void bemit_dec(const ir_node *node)
2832 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Dec_res);
2833 bemit8(0x48 + reg_gp_map[out->index]);
2836 static void bemit_inc(const ir_node *node)
2838 const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Inc_res);
2839 bemit8(0x40 + reg_gp_map[out->index]);
2842 #define UNOPMEM(op, code, ext) \
2843 static void bemit_##op(const ir_node *node) \
2845 bemit_unop_mem(node, code, ext); \
2848 UNOPMEM(notmem, 0xF6, 2)
2849 UNOPMEM(negmem, 0xF6, 3)
2850 UNOPMEM(incmem, 0xFE, 0)
2851 UNOPMEM(decmem, 0xFE, 1)
2853 static void bemit_ldtls(const ir_node *node)
2855 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2857 bemit8(0x65); // gs:
2858 if (out->index == REG_GP_EAX) {
2859 bemit8(0xA1); // movl 0, %eax
2861 bemit8(0x8B); // movl 0, %reg
2862 bemit8(MOD_IND | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x05));
2870 static void bemit_lea(const ir_node *node)
2872 const arch_register_t *out = arch_get_irn_register_out(node, 0);
2874 bemit_mod_am(reg_gp_map[out->index], node);
2877 /* helper function for bemit_minus64bit */
2878 static void bemit_helper_mov(const arch_register_t *src, const arch_register_t *dst)
2880 bemit8(0x8B); // movl %src, %dst
2881 bemit_modrr(src, dst);
2884 /* helper function for bemit_minus64bit */
2885 static void bemit_helper_neg(const arch_register_t *reg)
2887 bemit8(0xF7); // negl %reg
2888 bemit_modru(reg, 3);
2891 /* helper function for bemit_minus64bit */
2892 static void bemit_helper_sbb0(const arch_register_t *reg)
2894 bemit8(0x83); // sbbl $0, %reg
2895 bemit_modru(reg, 3);
2899 /* helper function for bemit_minus64bit */
2900 static void bemit_helper_sbb(const arch_register_t *src, const arch_register_t *dst)
2902 bemit8(0x1B); // sbbl %src, %dst
2903 bemit_modrr(src, dst);
2906 /* helper function for bemit_minus64bit */
2907 static void bemit_helper_xchg(const arch_register_t *src, const arch_register_t *dst)
2909 if (src->index == REG_GP_EAX) {
2910 bemit8(0x90 + reg_gp_map[dst->index]); // xchgl %eax, %dst
2911 } else if (dst->index == REG_GP_EAX) {
2912 bemit8(0x90 + reg_gp_map[src->index]); // xchgl %src, %eax
2914 bemit8(0x87); // xchgl %src, %dst
2915 bemit_modrr(src, dst);
2919 /* helper function for bemit_minus64bit */
2920 static void bemit_helper_zero(const arch_register_t *reg)
2922 bemit8(0x33); // xorl %reg, %reg
2923 bemit_modrr(reg, reg);
2926 static void bemit_minus64bit(const ir_node *node)
2928 const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
2929 const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
2930 const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
2931 const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
2933 if (out_lo == in_lo) {
2934 if (out_hi != in_hi) {
2935 /* a -> a, b -> d */
2938 /* a -> a, b -> b */
2941 } else if (out_lo == in_hi) {
2942 if (out_hi == in_lo) {
2943 /* a -> b, b -> a */
2944 bemit_helper_xchg(in_lo, in_hi);
2947 /* a -> b, b -> d */
2948 bemit_helper_mov(in_hi, out_hi);
2949 bemit_helper_mov(in_lo, out_lo);
2953 if (out_hi == in_lo) {
2954 /* a -> c, b -> a */
2955 bemit_helper_mov(in_lo, out_lo);
2957 } else if (out_hi == in_hi) {
2958 /* a -> c, b -> b */
2959 bemit_helper_mov(in_lo, out_lo);
2962 /* a -> c, b -> d */
2963 bemit_helper_mov(in_lo, out_lo);
2969 bemit_helper_neg( out_hi);
2970 bemit_helper_neg( out_lo);
2971 bemit_helper_sbb0(out_hi);
2975 bemit_helper_zero(out_hi);
2976 bemit_helper_neg( out_lo);
2977 bemit_helper_sbb( in_hi, out_hi);
2981 * Emit a single opcode.
2983 #define EMIT_SINGLEOP(op, code) \
2984 static void bemit_ ## op(const ir_node *node) { \
2989 //EMIT_SINGLEOP(daa, 0x27)
2990 //EMIT_SINGLEOP(das, 0x2F)
2991 //EMIT_SINGLEOP(aaa, 0x37)
2992 //EMIT_SINGLEOP(aas, 0x3F)
2993 //EMIT_SINGLEOP(nop, 0x90)
2994 EMIT_SINGLEOP(cwtl, 0x98)
2995 EMIT_SINGLEOP(cltd, 0x99)
2996 //EMIT_SINGLEOP(fwait, 0x9B)
2997 EMIT_SINGLEOP(sahf, 0x9E)
2998 //EMIT_SINGLEOP(popf, 0x9D)
2999 EMIT_SINGLEOP(leave, 0xC9)
3000 EMIT_SINGLEOP(int3, 0xCC)
3001 //EMIT_SINGLEOP(iret, 0xCF)
3002 //EMIT_SINGLEOP(xlat, 0xD7)
3003 //EMIT_SINGLEOP(lock, 0xF0)
3004 EMIT_SINGLEOP(rep, 0xF3)
3005 //EMIT_SINGLEOP(halt, 0xF4)
3006 EMIT_SINGLEOP(cmc, 0xF5)
3007 EMIT_SINGLEOP(stc, 0xF9)
3008 //EMIT_SINGLEOP(cli, 0xFA)
3009 //EMIT_SINGLEOP(sti, 0xFB)
3010 //EMIT_SINGLEOP(std, 0xFD)
3013 * Emits a MOV out, [MEM].
3015 static void bemit_load(const ir_node *node)
3017 const arch_register_t *out = arch_get_irn_register_out(node, 0);
3019 if (out->index == REG_GP_EAX) {
3020 ir_node *base = get_irn_n(node, n_ia32_base);
3021 int has_base = !is_ia32_NoReg_GP(base);
3022 ir_node *idx = get_irn_n(node, n_ia32_index);
3023 int has_index = !is_ia32_NoReg_GP(idx);
3024 if (!has_base && !has_index) {
3025 ir_entity *ent = get_ia32_am_sc(node);
3026 int offs = get_ia32_am_offs_int(node);
3027 /* load from constant address to EAX can be encoded
3030 bemit_entity(ent, 0, offs, false);
3035 bemit_mod_am(reg_gp_map[out->index], node);
3039 * Emits a MOV [mem], in.
3041 static void bemit_store(const ir_node *node)
3043 const ir_node *value = get_irn_n(node, n_ia32_Store_val);
3044 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
3046 if (is_ia32_Immediate(value)) {
3049 bemit_mod_am(0, node);
3050 bemit8(get_ia32_immediate_attr_const(value)->offset);
3051 } else if (size == 16) {
3054 bemit_mod_am(0, node);
3055 bemit16(get_ia32_immediate_attr_const(value)->offset);
3058 bemit_mod_am(0, node);
3059 bemit_immediate(value, false);
3062 const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Store_val);
3064 if (in->index == REG_GP_EAX) {
3065 ir_node *base = get_irn_n(node, n_ia32_base);
3066 int has_base = !is_ia32_NoReg_GP(base);
3067 ir_node *idx = get_irn_n(node, n_ia32_index);
3068 int has_index = !is_ia32_NoReg_GP(idx);
3069 if (!has_base && !has_index) {
3070 ir_entity *ent = get_ia32_am_sc(node);
3071 int offs = get_ia32_am_offs_int(node);
3072 /* store to constant address from EAX can be encoded as
3073 * 0xA2/0xA3 [offset]*/
3081 bemit_entity(ent, 0, offs, false);
3093 bemit_mod_am(reg_gp_map[in->index], node);
3097 static void bemit_conv_i2i(const ir_node *node)
3099 ir_mode *smaller_mode = get_ia32_ls_mode(node);
3108 if (mode_is_signed(smaller_mode)) opcode |= 0x08;
3109 if (get_mode_size_bits(smaller_mode) == 16) opcode |= 0x01;
3110 bemit_unop_reg(node, opcode, n_ia32_Conv_I2I_val);
3116 static void bemit_push(const ir_node *node)
3118 const ir_node *value = get_irn_n(node, n_ia32_Push_val);
3120 if (is_ia32_Immediate(value)) {
3121 const ia32_immediate_attr_t *attr
3122 = get_ia32_immediate_attr_const(value);
3123 unsigned size = get_signed_imm_size(attr->offset);
3129 bemit8((unsigned char)attr->offset);
3134 bemit_immediate(value, false);
3137 } else if (is_ia32_NoReg_GP(value)) {
3139 bemit_mod_am(6, node);
3141 const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_Push_val);
3142 bemit8(0x50 + reg_gp_map[reg->index]);
3149 static void bemit_pop(const ir_node *node)
3151 const arch_register_t *reg = arch_get_irn_register_out(node, pn_ia32_Pop_res);
3152 bemit8(0x58 + reg_gp_map[reg->index]);
3155 static void bemit_popmem(const ir_node *node)
3158 bemit_mod_am(0, node);
3161 static void bemit_call(const ir_node *node)
3163 ir_node *proc = get_irn_n(node, n_ia32_Call_addr);
3165 if (is_ia32_Immediate(proc)) {
3167 bemit_immediate(proc, true);
3169 bemit_unop(node, 0xFF, 2, n_ia32_Call_addr);
3173 static void bemit_jmp(const ir_node *dest_block)
3176 bemit_jmp_destination(dest_block);
3179 static void bemit_jump(const ir_node *node)
3181 if (can_be_fallthrough(node))
3184 bemit_jmp(get_cfop_target_block(node));
3187 static void bemit_jcc(int pnc, const ir_node *dest_block)
3189 unsigned char cc = pnc2cc(pnc);
3192 bemit_jmp_destination(dest_block);
3195 static void bemit_jp(bool odd, const ir_node *dest_block)
3199 bemit_jmp_destination(dest_block);
3202 static void bemit_ia32_jcc(const ir_node *node)
3204 ia32_condition_code_t cc = get_ia32_condcode(node);
3205 const ir_node *proj_true;
3206 const ir_node *proj_false;
3207 const ir_node *dest_true;
3208 const ir_node *dest_false;
3210 cc = determine_final_cc(node, 0, cc);
3212 /* get both Projs */
3213 proj_true = get_proj(node, pn_ia32_Jcc_true);
3214 assert(proj_true && "Jcc without true Proj");
3216 proj_false = get_proj(node, pn_ia32_Jcc_false);
3217 assert(proj_false && "Jcc without false Proj");
3219 if (can_be_fallthrough(proj_true)) {
3220 /* exchange both proj's so the second one can be omitted */
3221 const ir_node *t = proj_true;
3223 proj_true = proj_false;
3225 cc = ia32_negate_condition_code(cc);
3228 dest_true = get_cfop_target_block(proj_true);
3229 dest_false = get_cfop_target_block(proj_false);
3231 if (cc & ia32_cc_float_parity_cases) {
3232 /* Some floating point comparisons require a test of the parity flag,
3233 * which indicates that the result is unordered */
3234 if (cc & ia32_cc_negated) {
3235 bemit_jp(false, dest_true);
3237 /* we need a local label if the false proj is a fallthrough
3238 * as the falseblock might have no label emitted then */
3239 if (can_be_fallthrough(proj_false)) {
3241 bemit8(0x06); // jp + 6
3243 bemit_jp(false, dest_false);
3247 bemit_jcc(cc, dest_true);
3249 /* the second Proj might be a fallthrough */
3250 if (can_be_fallthrough(proj_false)) {
3251 /* it's a fallthrough */
3253 bemit_jmp(dest_false);
3257 static void bemit_switchjmp(const ir_node *node)
3259 ir_entity *jump_table = get_ia32_am_sc(node);
3260 const ir_switch_table *table = get_ia32_switch_table(node);
3262 bemit8(0xFF); // jmp *tbl.label(,%in,4)
3263 bemit_mod_am(0x05, node);
3265 be_emit_jump_table(node, table, jump_table, get_cfop_target_block);
3271 static void bemit_return(const ir_node *node)
3273 unsigned pop = be_Return_get_pop(node);
3274 if (pop > 0 || be_Return_get_emit_pop(node)) {
3276 assert(pop <= 0xffff);
3283 static void bemit_subsp(const ir_node *node)
3285 const arch_register_t *out;
3288 /* mov %esp, %out */
3290 out = arch_get_irn_register_out(node, 1);
3291 bemit8(MOD_REG | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x04));
3294 static void bemit_incsp(const ir_node *node)
3297 const arch_register_t *reg;
3301 offs = be_get_IncSP_offset(node);
3312 size = get_signed_imm_size(offs);
3313 bemit8(size == 1 ? 0x83 : 0x81);
3315 reg = arch_get_irn_register_out(node, 0);
3316 bemit_modru(reg, ext);
3325 static void bemit_copybi(const ir_node *node)
3327 unsigned size = get_ia32_copyb_size(node);
3329 bemit8(0xA4); // movsb
3332 bemit8(0xA5); // movsw
3336 bemit8(0xA5); // movsl
3340 static void bemit_fbinop(const ir_node *node, unsigned code, unsigned code_to)
3342 if (get_ia32_op_type(node) == ia32_Normal) {
3343 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
3344 const arch_register_t *in1 = x87_attr->x87[0];
3345 const arch_register_t *in = x87_attr->x87[1];
3346 const arch_register_t *out = x87_attr->x87[2];
3350 } else if (out == in) {
3354 if (out->index == 0) {
3356 bemit8(MOD_REG | ENC_REG(code) | ENC_RM(in->index));
3359 bemit8(MOD_REG | ENC_REG(code_to) | ENC_RM(out->index));
3362 if (get_mode_size_bits(get_ia32_ls_mode(node)) == 32) {
3367 bemit_mod_am(code, node);
3371 static void bemit_fbinopp(const ir_node *node, unsigned const code)
3373 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
3374 const arch_register_t *out = x87_attr->x87[2];
3376 bemit8(code + out->index);
3379 static void bemit_fabs(const ir_node *node)
3387 static void bemit_fadd(const ir_node *node)
3389 bemit_fbinop(node, 0, 0);
3392 static void bemit_faddp(const ir_node *node)
3394 bemit_fbinopp(node, 0xC0);
3397 static void bemit_fchs(const ir_node *node)
3405 static void bemit_fdiv(const ir_node *node)
3407 bemit_fbinop(node, 6, 7);
3410 static void bemit_fdivp(const ir_node *node)
3412 bemit_fbinopp(node, 0xF8);
3415 static void bemit_fdivr(const ir_node *node)
3417 bemit_fbinop(node, 7, 6);
3420 static void bemit_fdivrp(const ir_node *node)
3422 bemit_fbinopp(node, 0xF0);
3425 static void bemit_fild(const ir_node *node)
3427 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3429 bemit8(0xDF); // filds
3430 bemit_mod_am(0, node);
3434 bemit8(0xDB); // fildl
3435 bemit_mod_am(0, node);
3439 bemit8(0xDF); // fildll
3440 bemit_mod_am(5, node);
3444 panic("invalid mode size");
3448 static void bemit_fist(const ir_node *node)
3450 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3452 bemit8(0xDF); // fists
3456 bemit8(0xDB); // fistl
3460 panic("invalid mode size");
3462 bemit_mod_am(2, node);
3465 static void bemit_fistp(const ir_node *node)
3467 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3469 bemit8(0xDF); // fistps
3470 bemit_mod_am(3, node);
3474 bemit8(0xDB); // fistpl
3475 bemit_mod_am(3, node);
3479 bemit8(0xDF); // fistpll
3480 bemit_mod_am(7, node);
3484 panic("invalid mode size");
3488 static void bemit_fld(const ir_node *node)
3490 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3492 bemit8(0xD9); // flds
3493 bemit_mod_am(0, node);
3497 bemit8(0xDD); // fldl
3498 bemit_mod_am(0, node);
3503 bemit8(0xDB); // fldt
3504 bemit_mod_am(5, node);
3508 panic("invalid mode size");
3512 static void bemit_fld1(const ir_node *node)
3516 bemit8(0xE8); // fld1
3519 static void bemit_fldcw(const ir_node *node)
3521 bemit8(0xD9); // fldcw
3522 bemit_mod_am(5, node);
3525 static void bemit_fldz(const ir_node *node)
3529 bemit8(0xEE); // fldz
3532 static void bemit_fmul(const ir_node *node)
3534 bemit_fbinop(node, 1, 1);
3537 static void bemit_fmulp(const ir_node *node)
3539 bemit_fbinopp(node, 0xC8);
3542 static void bemit_fpop(const ir_node *node)
3544 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3546 bemit8(0xD8 + attr->x87[0]->index);
3549 static void bemit_fpush(const ir_node *node)
3551 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3553 bemit8(0xC0 + attr->x87[0]->index);
3556 static void bemit_fpushcopy(const ir_node *node)
3558 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3560 bemit8(0xC0 + attr->x87[0]->index);
3563 static void bemit_fst(const ir_node *node)
3565 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3567 bemit8(0xD9); // fsts
3571 bemit8(0xDD); // fstl
3575 panic("invalid mode size");
3577 bemit_mod_am(2, node);
3580 static void bemit_fstp(const ir_node *node)
3582 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3584 bemit8(0xD9); // fstps
3585 bemit_mod_am(3, node);
3589 bemit8(0xDD); // fstpl
3590 bemit_mod_am(3, node);
3595 bemit8(0xDB); // fstpt
3596 bemit_mod_am(7, node);
3600 panic("invalid mode size");
3604 static void bemit_fsub(const ir_node *node)
3606 bemit_fbinop(node, 4, 5);
3609 static void bemit_fsubp(const ir_node *node)
3611 bemit_fbinopp(node, 0xE8);
3614 static void bemit_fsubr(const ir_node *node)
3616 bemit_fbinop(node, 5, 4);
3619 static void bemit_fsubrp(const ir_node *node)
3621 bemit_fbinopp(node, 0xE0);
3624 static void bemit_fnstcw(const ir_node *node)
3626 bemit8(0xD9); // fnstcw
3627 bemit_mod_am(7, node);
3630 static void bemit_fnstsw(void)
3632 bemit8(0xDF); // fnstsw %ax
3636 static void bemit_ftstfnstsw(const ir_node *node)
3640 bemit8(0xD9); // ftst
3645 static void bemit_fucomi(const ir_node *node)
3647 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3648 bemit8(0xDB); // fucomi
3649 bemit8(0xE8 + attr->x87[1]->index);
3652 static void bemit_fucomip(const ir_node *node)
3654 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3655 bemit8(0xDF); // fucomip
3656 bemit8(0xE8 + attr->x87[1]->index);
3659 static void bemit_fucomfnstsw(const ir_node *node)
3661 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3662 bemit8(0xDD); // fucom
3663 bemit8(0xE0 + attr->x87[1]->index);
3667 static void bemit_fucompfnstsw(const ir_node *node)
3669 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3670 bemit8(0xDD); // fucomp
3671 bemit8(0xE8 + attr->x87[1]->index);
3675 static void bemit_fucomppfnstsw(const ir_node *node)
3679 bemit8(0xDA); // fucompp
3684 static void bemit_fxch(const ir_node *node)
3686 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
3688 bemit8(0xC8 + attr->x87[0]->index);
3692 * The type of a emitter function.
3694 typedef void (*emit_func) (const ir_node *);
3697 * Set a node emitter. Make it a bit more type safe.
3699 static void register_emitter(ir_op *op, emit_func func)
3701 op->ops.generic = (op_func) func;
3704 static void ia32_register_binary_emitters(void)
3706 /* first clear the generic function pointer for all ops */
3707 ir_clear_opcodes_generic_func();
3709 /* benode emitter */
3710 register_emitter(op_be_Copy, bemit_copy);
3711 register_emitter(op_be_CopyKeep, bemit_copy);
3712 register_emitter(op_be_IncSP, bemit_incsp);
3713 register_emitter(op_be_Perm, bemit_perm);
3714 register_emitter(op_be_Return, bemit_return);
3715 register_emitter(op_ia32_Adc, bemit_adc);
3716 register_emitter(op_ia32_Add, bemit_add);
3717 register_emitter(op_ia32_AddMem, bemit_addmem);
3718 register_emitter(op_ia32_AddMem8Bit, bemit_addmem8bit);
3719 register_emitter(op_ia32_And, bemit_and);
3720 register_emitter(op_ia32_AndMem, bemit_andmem);
3721 register_emitter(op_ia32_AndMem8Bit, bemit_andmem8bit);
3722 register_emitter(op_ia32_Breakpoint, bemit_int3);
3723 register_emitter(op_ia32_CMovcc, bemit_cmovcc);
3724 register_emitter(op_ia32_Call, bemit_call);
3725 register_emitter(op_ia32_Cltd, bemit_cltd);
3726 register_emitter(op_ia32_Cmc, bemit_cmc);
3727 register_emitter(op_ia32_Cmp, bemit_cmp);
3728 register_emitter(op_ia32_Cmp8Bit, bemit_cmp8bit);
3729 register_emitter(op_ia32_Const, bemit_mov_const);
3730 register_emitter(op_ia32_Conv_I2I, bemit_conv_i2i);
3731 register_emitter(op_ia32_Conv_I2I8Bit, bemit_conv_i2i);
3732 register_emitter(op_ia32_CopyB_i, bemit_copybi);
3733 register_emitter(op_ia32_Cwtl, bemit_cwtl);
3734 register_emitter(op_ia32_Dec, bemit_dec);
3735 register_emitter(op_ia32_DecMem, bemit_decmem);
3736 register_emitter(op_ia32_Div, bemit_div);
3737 register_emitter(op_ia32_FldCW, bemit_fldcw);
3738 register_emitter(op_ia32_FnstCW, bemit_fnstcw);
3739 register_emitter(op_ia32_FtstFnstsw, bemit_ftstfnstsw);
3740 register_emitter(op_ia32_FucomFnstsw, bemit_fucomfnstsw);
3741 register_emitter(op_ia32_Fucomi, bemit_fucomi);
3742 register_emitter(op_ia32_FucompFnstsw, bemit_fucompfnstsw);
3743 register_emitter(op_ia32_Fucompi, bemit_fucomip);
3744 register_emitter(op_ia32_FucomppFnstsw, bemit_fucomppfnstsw);
3745 register_emitter(op_ia32_IDiv, bemit_idiv);
3746 register_emitter(op_ia32_IJmp, bemit_ijmp);
3747 register_emitter(op_ia32_IMul, bemit_imul);
3748 register_emitter(op_ia32_IMul1OP, bemit_imul1op);
3749 register_emitter(op_ia32_Inc, bemit_inc);
3750 register_emitter(op_ia32_IncMem, bemit_incmem);
3751 register_emitter(op_ia32_Jcc, bemit_ia32_jcc);
3752 register_emitter(op_ia32_Jmp, bemit_jump);
3753 register_emitter(op_ia32_LdTls, bemit_ldtls);
3754 register_emitter(op_ia32_Lea, bemit_lea);
3755 register_emitter(op_ia32_Leave, bemit_leave);
3756 register_emitter(op_ia32_Load, bemit_load);
3757 register_emitter(op_ia32_Minus64Bit, bemit_minus64bit);
3758 register_emitter(op_ia32_Mul, bemit_mul);
3759 register_emitter(op_ia32_Neg, bemit_neg);
3760 register_emitter(op_ia32_NegMem, bemit_negmem);
3761 register_emitter(op_ia32_Not, bemit_not);
3762 register_emitter(op_ia32_NotMem, bemit_notmem);
3763 register_emitter(op_ia32_Or, bemit_or);
3764 register_emitter(op_ia32_OrMem, bemit_ormem);
3765 register_emitter(op_ia32_OrMem8Bit, bemit_ormem8bit);
3766 register_emitter(op_ia32_Pop, bemit_pop);
3767 register_emitter(op_ia32_PopEbp, bemit_pop);
3768 register_emitter(op_ia32_PopMem, bemit_popmem);
3769 register_emitter(op_ia32_Push, bemit_push);
3770 register_emitter(op_ia32_RepPrefix, bemit_rep);
3771 register_emitter(op_ia32_Rol, bemit_rol);
3772 register_emitter(op_ia32_RolMem, bemit_rolmem);
3773 register_emitter(op_ia32_Ror, bemit_ror);
3774 register_emitter(op_ia32_RorMem, bemit_rormem);
3775 register_emitter(op_ia32_Sahf, bemit_sahf);
3776 register_emitter(op_ia32_Sar, bemit_sar);
3777 register_emitter(op_ia32_SarMem, bemit_sarmem);
3778 register_emitter(op_ia32_Sbb, bemit_sbb);
3779 register_emitter(op_ia32_Setcc, bemit_setcc);
3780 register_emitter(op_ia32_Shl, bemit_shl);
3781 register_emitter(op_ia32_ShlD, bemit_shld);
3782 register_emitter(op_ia32_ShlMem, bemit_shlmem);
3783 register_emitter(op_ia32_Shr, bemit_shr);
3784 register_emitter(op_ia32_ShrD, bemit_shrd);
3785 register_emitter(op_ia32_ShrMem, bemit_shrmem);
3786 register_emitter(op_ia32_Stc, bemit_stc);
3787 register_emitter(op_ia32_Store, bemit_store);
3788 register_emitter(op_ia32_Store8Bit, bemit_store);
3789 register_emitter(op_ia32_Sub, bemit_sub);
3790 register_emitter(op_ia32_SubMem, bemit_submem);
3791 register_emitter(op_ia32_SubMem8Bit, bemit_submem8bit);
3792 register_emitter(op_ia32_SubSP, bemit_subsp);
3793 register_emitter(op_ia32_SwitchJmp, bemit_switchjmp);
3794 register_emitter(op_ia32_Test, bemit_test);
3795 register_emitter(op_ia32_Test8Bit, bemit_test8bit);
3796 register_emitter(op_ia32_Xor, bemit_xor);
3797 register_emitter(op_ia32_Xor0, bemit_xor0);
3798 register_emitter(op_ia32_XorMem, bemit_xormem);
3799 register_emitter(op_ia32_XorMem8Bit, bemit_xormem8bit);
3800 register_emitter(op_ia32_fabs, bemit_fabs);
3801 register_emitter(op_ia32_fadd, bemit_fadd);
3802 register_emitter(op_ia32_faddp, bemit_faddp);
3803 register_emitter(op_ia32_fchs, bemit_fchs);
3804 register_emitter(op_ia32_fdiv, bemit_fdiv);
3805 register_emitter(op_ia32_fdivp, bemit_fdivp);
3806 register_emitter(op_ia32_fdivr, bemit_fdivr);
3807 register_emitter(op_ia32_fdivrp, bemit_fdivrp);
3808 register_emitter(op_ia32_fild, bemit_fild);
3809 register_emitter(op_ia32_fist, bemit_fist);
3810 register_emitter(op_ia32_fistp, bemit_fistp);
3811 register_emitter(op_ia32_fld, bemit_fld);
3812 register_emitter(op_ia32_fld1, bemit_fld1);
3813 register_emitter(op_ia32_fldz, bemit_fldz);
3814 register_emitter(op_ia32_fmul, bemit_fmul);
3815 register_emitter(op_ia32_fmulp, bemit_fmulp);
3816 register_emitter(op_ia32_fpop, bemit_fpop);
3817 register_emitter(op_ia32_fpush, bemit_fpush);
3818 register_emitter(op_ia32_fpushCopy, bemit_fpushcopy);
3819 register_emitter(op_ia32_fst, bemit_fst);
3820 register_emitter(op_ia32_fstp, bemit_fstp);
3821 register_emitter(op_ia32_fsub, bemit_fsub);
3822 register_emitter(op_ia32_fsubp, bemit_fsubp);
3823 register_emitter(op_ia32_fsubr, bemit_fsubr);
3824 register_emitter(op_ia32_fsubrp, bemit_fsubrp);
3825 register_emitter(op_ia32_fxch, bemit_fxch);
3827 /* ignore the following nodes */
3828 register_emitter(op_ia32_ProduceVal, emit_Nothing);
3829 register_emitter(op_be_Keep, emit_Nothing);
3830 register_emitter(op_be_Start, emit_Nothing);
3831 register_emitter(op_Phi, emit_Nothing);
3832 register_emitter(op_Start, emit_Nothing);
3835 static void gen_binary_block(ir_node *block)
3839 ia32_emit_block_header(block);
3841 /* emit the contents of the block */
3842 sched_foreach(block, node) {
3843 ia32_emit_node(node);
3847 void ia32_gen_binary_routine(ir_graph *irg)
3849 ir_entity *entity = get_irg_entity(irg);
3850 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
3851 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
3852 ir_node **blk_sched = irg_data->blk_sched;
3854 parameter_dbg_info_t *infos;
3856 isa = (ia32_isa_t*) arch_env;
3858 ia32_register_binary_emitters();
3860 infos = construct_parameter_infos(irg);
3861 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment,
3865 /* we use links to point to target blocks */
3866 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
3867 irg_block_walk_graph(irg, ia32_gen_labels, NULL, NULL);
3869 /* initialize next block links */
3870 n = ARR_LEN(blk_sched);
3871 for (i = 0; i < n; ++i) {
3872 ir_node *block = blk_sched[i];
3873 ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
3875 set_irn_link(block, prev);
3878 for (i = 0; i < n; ++i) {
3879 ir_node *block = blk_sched[i];
3880 gen_binary_block(block);
3883 be_gas_emit_function_epilog(entity);
3885 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
3889 void ia32_init_emitter(void)
3891 lc_opt_entry_t *be_grp;
3892 lc_opt_entry_t *ia32_grp;
3894 be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
3895 ia32_grp = lc_opt_get_grp(be_grp, "ia32");
3897 lc_opt_add_table(ia32_grp, ia32_emitter_options);
3901 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.emitter");