2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the ia32 node emitter.
23 * @author Christian Wuerdig, Matthias Braun
39 #include "iredges_t.h"
43 #include "raw_bitset.h"
47 #include "../besched.h"
48 #include "../benode.h"
50 #include "../be_dbgout.h"
51 #include "../beemitter.h"
52 #include "../begnuas.h"
54 #include "../be_dbgout.h"
56 #include "ia32_emitter.h"
57 #include "gen_ia32_emitter.h"
58 #include "gen_ia32_regalloc_if.h"
59 #include "ia32_nodes_attr.h"
60 #include "ia32_new_nodes.h"
61 #include "ia32_map_regs.h"
62 #include "ia32_architecture.h"
63 #include "bearch_ia32_t.h"
65 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
67 #define BLOCK_PREFIX ".L"
69 #define SNPRINTF_BUF_LEN 128
71 static const ia32_isa_t *isa;
72 static ia32_code_gen_t *cg;
73 static char pic_base_label[128];
74 static ir_label_t exc_label_id;
75 static int mark_spill_reload = 0;
78 /** Return the next block in Block schedule */
79 static ir_node *get_prev_block_sched(const ir_node *block)
81 return get_irn_link(block);
84 /** Checks if the current block is a fall-through target. */
85 static int is_fallthrough(const ir_node *cfgpred)
89 if (!is_Proj(cfgpred))
91 pred = get_Proj_pred(cfgpred);
92 if (is_ia32_SwitchJmp(pred))
99 * returns non-zero if the given block needs a label
100 * because of being a jump-target (and not a fall-through)
102 static int block_needs_label(const ir_node *block)
105 int n_cfgpreds = get_Block_n_cfgpreds(block);
107 if (has_Block_entity(block))
110 if (n_cfgpreds == 0) {
112 } else if (n_cfgpreds == 1) {
113 ir_node *cfgpred = get_Block_cfgpred(block, 0);
114 ir_node *cfgpred_block = get_nodes_block(cfgpred);
116 if (get_prev_block_sched(block) == cfgpred_block
117 && is_fallthrough(cfgpred)) {
126 * Returns the register at in position pos.
128 static const arch_register_t *get_in_reg(const ir_node *irn, int pos)
131 const arch_register_t *reg = NULL;
133 assert(get_irn_arity(irn) > pos && "Invalid IN position");
135 /* The out register of the operator at position pos is the
136 in register we need. */
137 op = get_irn_n(irn, pos);
139 reg = arch_get_irn_register(op);
141 assert(reg && "no in register found");
143 if (reg == &ia32_gp_regs[REG_GP_NOREG])
144 panic("trying to emit noreg for %+F input %d", irn, pos);
146 /* in case of unknown register: just return a valid register */
147 if (reg == &ia32_gp_regs[REG_GP_UKNWN]) {
148 const arch_register_req_t *req = arch_get_register_req(irn, pos);
150 if (arch_register_req_is(req, limited)) {
151 /* in case of limited requirements: get the first allowed register */
152 unsigned idx = rbitset_next(req->limited, 0, 1);
153 reg = arch_register_for_index(req->cls, idx);
155 /* otherwise get first register in class */
156 reg = arch_register_for_index(req->cls, 0);
164 * Returns the register at out position pos.
166 static const arch_register_t *get_out_reg(const ir_node *irn, int pos)
169 const arch_register_t *reg = NULL;
171 /* 1st case: irn is not of mode_T, so it has only */
172 /* one OUT register -> good */
173 /* 2nd case: irn is of mode_T -> collect all Projs and ask the */
174 /* Proj with the corresponding projnum for the register */
176 if (get_irn_mode(irn) != mode_T) {
178 reg = arch_get_irn_register(irn);
179 } else if (is_ia32_irn(irn)) {
180 reg = arch_irn_get_register(irn, pos);
182 const ir_edge_t *edge;
184 foreach_out_edge(irn, edge) {
185 proj = get_edge_src_irn(edge);
186 assert(is_Proj(proj) && "non-Proj from mode_T node");
187 if (get_Proj_proj(proj) == pos) {
188 reg = arch_get_irn_register(proj);
194 assert(reg && "no out register found");
199 * Add a number to a prefix. This number will not be used a second time.
201 static char *get_unique_label(char *buf, size_t buflen, const char *prefix)
203 static unsigned long id = 0;
204 snprintf(buf, buflen, "%s%lu", prefix, ++id);
208 /*************************************************************
210 * (_) | | / _| | | | |
211 * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
212 * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
213 * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
214 * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
217 *************************************************************/
220 * Emit the name of the 8bit low register
222 static void emit_8bit_register(const arch_register_t *reg)
224 const char *reg_name = arch_register_get_name(reg);
227 be_emit_char(reg_name[1]);
232 * Emit the name of the 8bit high register
234 static void emit_8bit_register_high(const arch_register_t *reg)
236 const char *reg_name = arch_register_get_name(reg);
239 be_emit_char(reg_name[1]);
243 static void emit_16bit_register(const arch_register_t *reg)
245 const char *reg_name = ia32_get_mapped_reg_name(isa->regs_16bit, reg);
248 be_emit_string(reg_name);
252 * emit a register, possible shortened by a mode
254 * @param reg the register
255 * @param mode the mode of the register or NULL for full register
257 static void emit_register(const arch_register_t *reg, const ir_mode *mode)
259 const char *reg_name;
262 int size = get_mode_size_bits(mode);
264 case 8: emit_8bit_register(reg); return;
265 case 16: emit_16bit_register(reg); return;
267 assert(mode_is_float(mode) || size == 32);
270 reg_name = arch_register_get_name(reg);
273 be_emit_string(reg_name);
276 void ia32_emit_source_register(const ir_node *node, int pos)
278 const arch_register_t *reg = get_in_reg(node, pos);
280 emit_register(reg, NULL);
283 static void ia32_emit_entity(ir_entity *entity, int no_pic_adjust)
285 set_entity_backend_marked(entity, 1);
286 be_gas_emit_entity(entity);
288 if (get_entity_owner(entity) == get_tls_type()) {
289 if (get_entity_visibility(entity) == visibility_external_allocated) {
290 be_emit_cstring("@INDNTPOFF");
292 be_emit_cstring("@NTPOFF");
296 if (do_pic && !no_pic_adjust) {
298 be_emit_string(pic_base_label);
302 static void emit_ia32_Immediate_no_prefix(const ir_node *node)
304 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
306 if (attr->symconst != NULL) {
309 ia32_emit_entity(attr->symconst, attr->no_pic_adjust);
311 if (attr->symconst == NULL || attr->offset != 0) {
312 if (attr->symconst != NULL) {
313 be_emit_irprintf("%+d", attr->offset);
315 be_emit_irprintf("0x%X", attr->offset);
320 static void emit_ia32_Immediate(const ir_node *node)
323 emit_ia32_Immediate_no_prefix(node);
326 void ia32_emit_8bit_source_register_or_immediate(const ir_node *node, int pos)
328 const arch_register_t *reg;
329 const ir_node *in = get_irn_n(node, pos);
330 if (is_ia32_Immediate(in)) {
331 emit_ia32_Immediate(in);
335 reg = get_in_reg(node, pos);
336 emit_8bit_register(reg);
339 void ia32_emit_8bit_high_source_register(const ir_node *node, int pos)
341 const arch_register_t *reg = get_in_reg(node, pos);
342 emit_8bit_register_high(reg);
345 void ia32_emit_16bit_source_register_or_immediate(const ir_node *node, int pos)
347 const arch_register_t *reg;
348 const ir_node *in = get_irn_n(node, pos);
349 if (is_ia32_Immediate(in)) {
350 emit_ia32_Immediate(in);
354 reg = get_in_reg(node, pos);
355 emit_16bit_register(reg);
358 void ia32_emit_dest_register(const ir_node *node, int pos)
360 const arch_register_t *reg = get_out_reg(node, pos);
362 emit_register(reg, NULL);
365 void ia32_emit_dest_register_size(const ir_node *node, int pos)
367 const arch_register_t *reg = get_out_reg(node, pos);
369 emit_register(reg, get_ia32_ls_mode(node));
372 void ia32_emit_8bit_dest_register(const ir_node *node, int pos)
374 const arch_register_t *reg = get_out_reg(node, pos);
376 emit_register(reg, mode_Bu);
379 void ia32_emit_x87_register(const ir_node *node, int pos)
381 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
385 be_emit_string(attr->x87[pos]->name);
388 static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
390 assert(mode_is_int(mode) || mode_is_reference(mode));
391 switch (get_mode_size_bits(mode)) {
392 case 8: be_emit_char('b'); return;
393 case 16: be_emit_char('w'); return;
394 case 32: be_emit_char('l'); return;
395 /* gas docu says q is the suffix but gcc, objdump and icc use ll
397 case 64: be_emit_cstring("ll"); return;
399 panic("Can't output mode_suffix for %+F", mode);
402 void ia32_emit_mode_suffix(const ir_node *node)
404 ir_mode *mode = get_ia32_ls_mode(node);
408 ia32_emit_mode_suffix_mode(mode);
411 void ia32_emit_x87_mode_suffix(const ir_node *node)
415 /* we only need to emit the mode on address mode */
416 if (get_ia32_op_type(node) == ia32_Normal)
419 mode = get_ia32_ls_mode(node);
420 assert(mode != NULL);
422 if (mode_is_float(mode)) {
423 switch (get_mode_size_bits(mode)) {
424 case 32: be_emit_char('s'); return;
425 case 64: be_emit_char('l'); return;
427 case 96: be_emit_char('t'); return;
430 assert(mode_is_int(mode));
431 switch (get_mode_size_bits(mode)) {
432 case 16: be_emit_char('s'); return;
433 case 32: be_emit_char('l'); return;
434 /* gas docu says q is the suffix but gcc, objdump and icc use ll
436 case 64: be_emit_cstring("ll"); return;
439 panic("Can't output mode_suffix for %+F", mode);
442 static char get_xmm_mode_suffix(ir_mode *mode)
444 assert(mode_is_float(mode));
445 switch(get_mode_size_bits(mode)) {
448 default: panic("Invalid XMM mode");
452 void ia32_emit_xmm_mode_suffix(const ir_node *node)
454 ir_mode *mode = get_ia32_ls_mode(node);
455 assert(mode != NULL);
457 be_emit_char(get_xmm_mode_suffix(mode));
460 void ia32_emit_xmm_mode_suffix_s(const ir_node *node)
462 ir_mode *mode = get_ia32_ls_mode(node);
463 assert(mode != NULL);
464 be_emit_char(get_xmm_mode_suffix(mode));
467 void ia32_emit_extend_suffix(const ir_node *node)
469 ir_mode *mode = get_ia32_ls_mode(node);
470 if (get_mode_size_bits(mode) == 32)
472 be_emit_char(mode_is_signed(mode) ? 's' : 'z');
473 ia32_emit_mode_suffix_mode(mode);
476 void ia32_emit_source_register_or_immediate(const ir_node *node, int pos)
478 ir_node *in = get_irn_n(node, pos);
479 if (is_ia32_Immediate(in)) {
480 emit_ia32_Immediate(in);
482 const ir_mode *mode = get_ia32_ls_mode(node);
483 const arch_register_t *reg = get_in_reg(node, pos);
484 emit_register(reg, mode);
489 * Returns the target block for a control flow node.
491 static ir_node *get_cfop_target_block(const ir_node *irn)
493 assert(get_irn_mode(irn) == mode_X);
494 return get_irn_link(irn);
498 * Emits a block label for the given block.
500 static void ia32_emit_block_name(const ir_node *block)
502 if (has_Block_entity(block)) {
503 ir_entity *entity = get_Block_entity(block);
504 be_gas_emit_entity(entity);
506 be_emit_cstring(BLOCK_PREFIX);
507 be_emit_irprintf("%ld", get_irn_node_nr(block));
512 * Emits the target label for a control flow node.
514 static void ia32_emit_cfop_target(const ir_node *node)
516 ir_node *block = get_cfop_target_block(node);
517 ia32_emit_block_name(block);
521 * positive conditions for signed compares
523 static const char *const cmp2condition_s[] = {
524 NULL, /* always false */
531 NULL /* always true */
535 * positive conditions for unsigned compares
537 static const char *const cmp2condition_u[] = {
538 NULL, /* always false */
545 NULL /* always true */
549 * Emit the suffix for a compare instruction.
551 static void ia32_emit_cmp_suffix(int pnc)
555 if (pnc == ia32_pn_Cmp_parity) {
560 if (pnc & ia32_pn_Cmp_float || pnc & ia32_pn_Cmp_unsigned) {
561 str = cmp2condition_u[pnc & 7];
563 str = cmp2condition_s[pnc & 7];
569 typedef enum ia32_emit_mod_t {
570 EMIT_RESPECT_LS = 1U << 0,
571 EMIT_ALTERNATE_AM = 1U << 1,
573 EMIT_HIGH_REG = 1U << 3,
574 EMIT_LOW_REG = 1U << 4
578 * Emits address mode.
580 void ia32_emit_am(const ir_node *node)
582 ir_entity *ent = get_ia32_am_sc(node);
583 int offs = get_ia32_am_offs_int(node);
584 ir_node *base = get_irn_n(node, n_ia32_base);
585 int has_base = !is_ia32_NoReg_GP(base);
586 ir_node *index = get_irn_n(node, n_ia32_index);
587 int has_index = !is_ia32_NoReg_GP(index);
589 /* just to be sure... */
590 assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
594 const ia32_attr_t *attr = get_ia32_attr_const(node);
595 if (is_ia32_am_sc_sign(node))
597 ia32_emit_entity(ent, attr->data.am_sc_no_pic_adjust);
600 /* also handle special case if nothing is set */
601 if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
603 be_emit_irprintf("%+d", offs);
605 be_emit_irprintf("%d", offs);
609 if (has_base || has_index) {
614 const arch_register_t *reg = get_in_reg(node, n_ia32_base);
615 emit_register(reg, NULL);
618 /* emit index + scale */
620 const arch_register_t *reg = get_in_reg(node, n_ia32_index);
623 emit_register(reg, NULL);
625 scale = get_ia32_am_scale(node);
627 be_emit_irprintf(",%d", 1 << scale);
635 * fmt parameter output
636 * ---- ---------------------- ---------------------------------------------
638 * %AM <node> address mode of the node
639 * %AR const arch_register_t* address mode of the node or register
640 * %ASx <node> address mode of the node or source register x
641 * %Dx <node> destination register x
642 * %I <node> immediate of the node
643 * %L <node> control flow target of the node
644 * %M <node> mode suffix of the node
645 * %P int condition code
646 * %R const arch_register_t* register
647 * %Sx <node> source register x
648 * %s const char* string
649 * %u unsigned int unsigned int
650 * %d signed int signed int
653 * # modifier for %ASx, %D, %R, and %S uses ls mode of node to alter register width
654 * * modifier does not prefix immediates with $, but AM with *
655 * l modifier for %lu and %ld
656 * > modifier to output high 8bit register (ah, bh)
657 * < modifier to output low 8bit register (al, bl)
659 static void ia32_emitf(const ir_node *node, const char *fmt, ...)
665 const char *start = fmt;
666 ia32_emit_mod_t mod = 0;
668 while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
671 be_emit_string_len(start, fmt - start);
675 be_emit_finish_line_gas(node);
688 case '*': mod |= EMIT_ALTERNATE_AM; break;
689 case '#': mod |= EMIT_RESPECT_LS; break;
690 case 'l': mod |= EMIT_LONG; break;
691 case '>': mod |= EMIT_HIGH_REG; break;
692 case '<': mod |= EMIT_LOW_REG; break;
709 if (mod & EMIT_ALTERNATE_AM)
715 if (get_ia32_op_type(node) == ia32_AddrModeS) {
718 const arch_register_t *reg = va_arg(ap, const arch_register_t*);
719 if (mod & EMIT_ALTERNATE_AM)
721 emit_register(reg, NULL);
727 if (get_ia32_op_type(node) == ia32_AddrModeS) {
731 assert(get_ia32_op_type(node) == ia32_Normal);
736 default: goto unknown;
743 const arch_register_t *reg;
745 if (*fmt < '0' || '9' <= *fmt)
749 reg = get_out_reg(node, pos);
750 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
755 if (!(mod & EMIT_ALTERNATE_AM))
757 emit_ia32_Immediate_no_prefix(node);
761 ia32_emit_cfop_target(node);
765 ia32_emit_mode_suffix_mode(get_ia32_ls_mode(node));
770 int pnc = va_arg(ap, int);
771 ia32_emit_cmp_suffix(pnc);
776 const arch_register_t *reg = va_arg(ap, const arch_register_t*);
777 if (mod & EMIT_HIGH_REG) {
778 emit_8bit_register_high(reg);
779 } else if (mod & EMIT_LOW_REG) {
780 emit_8bit_register(reg);
782 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
792 if (*fmt < '0' || '9' <= *fmt)
796 in = get_irn_n(node, pos);
797 if (is_ia32_Immediate(in)) {
798 if (!(mod & EMIT_ALTERNATE_AM))
800 emit_ia32_Immediate_no_prefix(in);
802 const arch_register_t *reg;
804 if (mod & EMIT_ALTERNATE_AM)
806 reg = get_in_reg(node, pos);
807 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
813 const char *str = va_arg(ap, const char*);
819 if (mod & EMIT_LONG) {
820 unsigned long num = va_arg(ap, unsigned long);
821 be_emit_irprintf("%lu", num);
823 unsigned num = va_arg(ap, unsigned);
824 be_emit_irprintf("%u", num);
829 if (mod & EMIT_LONG) {
830 long num = va_arg(ap, long);
831 be_emit_irprintf("%ld", num);
833 int num = va_arg(ap, int);
834 be_emit_irprintf("%d", num);
840 panic("unknown format conversion in ia32_emitf()");
848 * Emits registers and/or address mode of a binary operation.
850 void ia32_emit_binop(const ir_node *node)
852 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
853 ia32_emitf(node, "%#S4, %#AS3");
855 ia32_emitf(node, "%#AS4, %#S3");
860 * Emits registers and/or address mode of a binary operation.
862 void ia32_emit_x87_binop(const ir_node *node)
864 switch(get_ia32_op_type(node)) {
867 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
868 const arch_register_t *in1 = x87_attr->x87[0];
869 const arch_register_t *in = x87_attr->x87[1];
870 const arch_register_t *out = x87_attr->x87[2];
874 } else if (out == in) {
879 be_emit_string(arch_register_get_name(in));
880 be_emit_cstring(", %");
881 be_emit_string(arch_register_get_name(out));
889 assert(0 && "unsupported op type");
894 * Emits registers and/or address mode of a unary operation.
896 void ia32_emit_unop(const ir_node *node, int pos)
900 ia32_emitf(node, fmt);
903 static void emit_ia32_IMul(const ir_node *node)
905 ir_node *left = get_irn_n(node, n_ia32_IMul_left);
906 const arch_register_t *out_reg = get_out_reg(node, pn_ia32_IMul_res);
908 /* do we need the 3-address form? */
909 if (is_ia32_NoReg_GP(left) ||
910 get_in_reg(node, n_ia32_IMul_left) != out_reg) {
911 ia32_emitf(node, "\timul%M %#S4, %#AS3, %#D0\n");
913 ia32_emitf(node, "\timul%M %#AS4, %#S3\n");
918 * walks up a tree of copies/perms/spills/reloads to find the original value
919 * that is moved around
921 static ir_node *find_original_value(ir_node *node)
923 if (irn_visited(node))
926 mark_irn_visited(node);
927 if (be_is_Copy(node)) {
928 return find_original_value(be_get_Copy_op(node));
929 } else if (be_is_CopyKeep(node)) {
930 return find_original_value(be_get_CopyKeep_op(node));
931 } else if (is_Proj(node)) {
932 ir_node *pred = get_Proj_pred(node);
933 if (be_is_Perm(pred)) {
934 return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
935 } else if (be_is_MemPerm(pred)) {
936 return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
937 } else if (is_ia32_Load(pred)) {
938 return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
942 } else if (is_ia32_Store(node)) {
943 return find_original_value(get_irn_n(node, n_ia32_Store_val));
944 } else if (is_Phi(node)) {
946 arity = get_irn_arity(node);
947 for (i = 0; i < arity; ++i) {
948 ir_node *in = get_irn_n(node, i);
949 ir_node *res = find_original_value(in);
960 static int determine_final_pnc(const ir_node *node, int flags_pos, int pnc)
962 ir_node *flags = get_irn_n(node, flags_pos);
963 const ia32_attr_t *flags_attr;
964 flags = skip_Proj(flags);
966 if (is_ia32_Sahf(flags)) {
967 ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
968 if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
969 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
970 inc_irg_visited(current_ir_graph);
971 cmp = find_original_value(cmp);
973 assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
974 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
977 flags_attr = get_ia32_attr_const(cmp);
978 if (flags_attr->data.ins_permuted)
979 pnc = get_mirrored_pnc(pnc);
980 pnc |= ia32_pn_Cmp_float;
981 } else if (is_ia32_Ucomi(flags) || is_ia32_Fucomi(flags)
982 || is_ia32_Fucompi(flags)) {
983 flags_attr = get_ia32_attr_const(flags);
985 if (flags_attr->data.ins_permuted)
986 pnc = get_mirrored_pnc(pnc);
987 pnc |= ia32_pn_Cmp_float;
989 flags_attr = get_ia32_attr_const(flags);
991 if (flags_attr->data.ins_permuted)
992 pnc = get_mirrored_pnc(pnc);
993 if (flags_attr->data.cmp_unsigned)
994 pnc |= ia32_pn_Cmp_unsigned;
1000 static pn_Cmp ia32_get_negated_pnc(pn_Cmp pnc)
1002 ir_mode *mode = pnc & ia32_pn_Cmp_float ? mode_F : mode_Iu;
1003 return get_negated_pnc(pnc, mode);
1006 void ia32_emit_cmp_suffix_node(const ir_node *node,
1009 const ia32_attr_t *attr = get_ia32_attr_const(node);
1011 pn_Cmp pnc = get_ia32_condcode(node);
1013 pnc = determine_final_pnc(node, flags_pos, pnc);
1014 if (attr->data.ins_permuted)
1015 pnc = ia32_get_negated_pnc(pnc);
1017 ia32_emit_cmp_suffix(pnc);
1021 * Emits an exception label for a given node.
1023 static void ia32_emit_exc_label(const ir_node *node)
1025 be_emit_string(be_gas_insn_label_prefix());
1026 be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
1030 * Returns the Proj with projection number proj and NOT mode_M
1032 static ir_node *get_proj(const ir_node *node, long proj)
1034 const ir_edge_t *edge;
1037 assert(get_irn_mode(node) == mode_T && "expected mode_T node");
1039 foreach_out_edge(node, edge) {
1040 src = get_edge_src_irn(edge);
1042 assert(is_Proj(src) && "Proj expected");
1043 if (get_irn_mode(src) == mode_M)
1046 if (get_Proj_proj(src) == proj)
1052 static int can_be_fallthrough(const ir_node *node)
1054 ir_node *target_block = get_cfop_target_block(node);
1055 ir_node *block = get_nodes_block(node);
1056 return get_prev_block_sched(target_block) == block;
1060 * Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
1062 static void emit_ia32_Jcc(const ir_node *node)
1064 int need_parity_label = 0;
1065 const ir_node *proj_true;
1066 const ir_node *proj_false;
1067 const ir_node *block;
1068 pn_Cmp pnc = get_ia32_condcode(node);
1070 pnc = determine_final_pnc(node, 0, pnc);
1072 /* get both Projs */
1073 proj_true = get_proj(node, pn_ia32_Jcc_true);
1074 assert(proj_true && "Jcc without true Proj");
1076 proj_false = get_proj(node, pn_ia32_Jcc_false);
1077 assert(proj_false && "Jcc without false Proj");
1079 block = get_nodes_block(node);
1081 if (can_be_fallthrough(proj_true)) {
1082 /* exchange both proj's so the second one can be omitted */
1083 const ir_node *t = proj_true;
1085 proj_true = proj_false;
1087 pnc = ia32_get_negated_pnc(pnc);
1090 if (pnc & ia32_pn_Cmp_float) {
1091 /* Some floating point comparisons require a test of the parity flag,
1092 * which indicates that the result is unordered */
1093 switch (pnc & 0x0f) {
1095 ia32_emitf(proj_true, "\tjp %L\n");
1100 ia32_emitf(proj_true, "\tjnp %L\n");
1106 /* we need a local label if the false proj is a fallthrough
1107 * as the falseblock might have no label emitted then */
1108 if (can_be_fallthrough(proj_false)) {
1109 need_parity_label = 1;
1110 ia32_emitf(proj_false, "\tjp 1f\n");
1112 ia32_emitf(proj_false, "\tjp %L\n");
1119 ia32_emitf(proj_true, "\tjp %L\n");
1127 ia32_emitf(proj_true, "\tj%P %L\n", pnc);
1130 if (need_parity_label) {
1131 ia32_emitf(NULL, "1:\n");
1134 /* the second Proj might be a fallthrough */
1135 if (can_be_fallthrough(proj_false)) {
1136 ia32_emitf(proj_false, "\t/* fallthrough to %L */\n");
1138 ia32_emitf(proj_false, "\tjmp %L\n");
1143 * Emits an ia32 Setcc. This is mostly easy but some floating point compares
1146 static void emit_ia32_Setcc(const ir_node *node)
1148 const arch_register_t *dreg = get_out_reg(node, pn_ia32_Setcc_res);
1150 pn_Cmp pnc = get_ia32_condcode(node);
1151 pnc = determine_final_pnc(node, n_ia32_Setcc_eflags, pnc);
1152 if (pnc & ia32_pn_Cmp_float) {
1153 switch (pnc & 0x0f) {
1155 ia32_emitf(node, "\tsetp %#R\n", dreg);
1159 ia32_emitf(node, "\tsetnp %#R\n", dreg);
1165 ia32_emitf(node, "\tset%P %<R\n", pnc, dreg);
1166 ia32_emitf(node, "\tsetnp %>R\n", dreg);
1167 ia32_emitf(node, "\tandb %>R, %<R\n", dreg, dreg);
1173 ia32_emitf(node, "\tset%P %<R\n", pnc, dreg);
1174 ia32_emitf(node, "\tsetp %>R\n", dreg);
1175 ia32_emitf(node, "\torb %>R, %<R\n", dreg, dreg);
1182 ia32_emitf(node, "\tset%P %#R\n", pnc, dreg);
1185 static void emit_ia32_CMovcc(const ir_node *node)
1187 const ia32_attr_t *attr = get_ia32_attr_const(node);
1188 const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
1189 pn_Cmp pnc = get_ia32_condcode(node);
1190 const arch_register_t *in_true;
1191 const arch_register_t *in_false;
1193 pnc = determine_final_pnc(node, n_ia32_CMovcc_eflags, pnc);
1194 /* although you can't set ins_permuted in the constructor it might still
1195 be set by memory operand folding */
1196 if (attr->data.ins_permuted)
1197 pnc = ia32_get_negated_pnc(pnc);
1199 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
1200 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
1202 /* should be same constraint fullfilled? */
1203 if (out == in_false) {
1204 /* yes -> nothing to do */
1205 } else if (out == in_true) {
1206 const arch_register_t *tmp;
1208 assert(get_ia32_op_type(node) == ia32_Normal);
1210 pnc = ia32_get_negated_pnc(pnc);
1217 ia32_emitf(node, "\tmovl %R, %R\n", in_false, out);
1220 /* TODO: handling of Nans isn't correct yet */
1221 if (pnc & ia32_pn_Cmp_float) {
1222 switch (pnc & 0x0f) {
1231 panic("CMov with floatingpoint compare/parity not supported yet");
1235 ia32_emitf(node, "\tcmov%P %#AR, %#R\n", pnc, in_true, out);
1238 /*********************************************************
1241 * ___ _ __ ___ _| |_ _ _ _ _ __ ___ _ __ ___
1242 * / _ \ '_ ` _ \| | __| | | | | | '_ ` _ \| '_ \/ __|
1243 * | __/ | | | | | | |_ | | |_| | | | | | | |_) \__ \
1244 * \___|_| |_| |_|_|\__| | |\__,_|_| |_| |_| .__/|___/
1247 *********************************************************/
1249 /* jump table entry (target and corresponding number) */
1250 typedef struct _branch_t {
1255 /* jump table for switch generation */
1256 typedef struct _jmp_tbl_t {
1257 ir_node *defProj; /**< default target */
1258 long min_value; /**< smallest switch case */
1259 long max_value; /**< largest switch case */
1260 long num_branches; /**< number of jumps */
1261 char label[SNPRINTF_BUF_LEN]; /**< label of the jump table */
1262 branch_t *branches; /**< jump array */
1266 * Compare two variables of type branch_t. Used to sort all switch cases
1268 static int ia32_cmp_branch_t(const void *a, const void *b)
1270 branch_t *b1 = (branch_t *)a;
1271 branch_t *b2 = (branch_t *)b;
1273 if (b1->value <= b2->value)
1279 static void generate_jump_table(jmp_tbl_t *tbl, const ir_node *node)
1285 const ir_edge_t *edge;
1287 /* fill the table structure */
1288 get_unique_label(tbl->label, SNPRINTF_BUF_LEN, ".TBL_");
1289 tbl->defProj = NULL;
1290 tbl->num_branches = get_irn_n_edges(node) - 1;
1291 tbl->branches = XMALLOCNZ(branch_t, tbl->num_branches);
1292 tbl->min_value = LONG_MAX;
1293 tbl->max_value = LONG_MIN;
1295 default_pn = get_ia32_condcode(node);
1297 /* go over all proj's and collect them */
1298 foreach_out_edge(node, edge) {
1299 proj = get_edge_src_irn(edge);
1300 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
1302 pnc = get_Proj_proj(proj);
1304 /* check for default proj */
1305 if (pnc == default_pn) {
1306 assert(tbl->defProj == NULL && "found two default Projs at SwitchJmp");
1307 tbl->defProj = proj;
1309 tbl->min_value = pnc < tbl->min_value ? pnc : tbl->min_value;
1310 tbl->max_value = pnc > tbl->max_value ? pnc : tbl->max_value;
1312 /* create branch entry */
1313 tbl->branches[i].target = proj;
1314 tbl->branches[i].value = pnc;
1319 assert(i == tbl->num_branches);
1321 /* sort the branches by their number */
1322 qsort(tbl->branches, tbl->num_branches, sizeof(tbl->branches[0]), ia32_cmp_branch_t);
1326 * Emits code for a SwitchJmp (creates a jump table if
1327 * possible otherwise a cmp-jmp cascade). Port from
1330 static void emit_ia32_SwitchJmp(const ir_node *node)
1332 unsigned long interval;
1336 /* fill the table structure */
1337 generate_jump_table(&tbl, node);
1339 /* two-complement's magic make this work without overflow */
1340 interval = tbl.max_value - tbl.min_value;
1342 /* emit the table */
1343 ia32_emitf(node, "\tcmpl $%u, %S0\n", interval);
1344 ia32_emitf(tbl.defProj, "\tja %L\n");
1346 if (tbl.num_branches > 1) {
1348 ia32_emitf(node, "\tjmp *%s(,%S0,4)\n", tbl.label);
1350 be_gas_emit_switch_section(GAS_SECTION_RODATA);
1351 ia32_emitf(NULL, "\t.align 4\n");
1352 ia32_emitf(NULL, "%s:\n", tbl.label);
1354 last_value = tbl.branches[0].value;
1355 for (i = 0; i != tbl.num_branches; ++i) {
1356 while (last_value != tbl.branches[i].value) {
1357 ia32_emitf(tbl.defProj, ".long %L\n");
1360 ia32_emitf(tbl.branches[i].target, ".long %L\n");
1363 be_gas_emit_switch_section(GAS_SECTION_TEXT);
1365 /* one jump is enough */
1366 ia32_emitf(tbl.branches[0].target, "\tjmp %L\n");
1373 * Emits code for a unconditional jump.
1375 static void emit_ia32_Jmp(const ir_node *node)
1379 /* for now, the code works for scheduled and non-schedules blocks */
1380 block = get_nodes_block(node);
1382 /* we have a block schedule */
1383 if (can_be_fallthrough(node)) {
1384 ia32_emitf(node, "\t/* fallthrough to %L */\n");
1386 ia32_emitf(node, "\tjmp %L\n");
1391 * Emit an inline assembler operand.
1393 * @param node the ia32_ASM node
1394 * @param s points to the operand (a %c)
1396 * @return pointer to the first char in s NOT in the current operand
1398 static const char* emit_asm_operand(const ir_node *node, const char *s)
1400 const ia32_attr_t *ia32_attr = get_ia32_attr_const(node);
1401 const ia32_asm_attr_t *attr = CONST_CAST_IA32_ATTR(ia32_asm_attr_t,
1403 const arch_register_t *reg;
1404 const ia32_asm_reg_t *asm_regs = attr->register_map;
1405 const ia32_asm_reg_t *asm_reg;
1406 const char *reg_name;
1415 /* parse modifiers */
1418 ir_fprintf(stderr, "Warning: asm text (%+F) ends with %%\n", node);
1443 "Warning: asm text (%+F) contains unknown modifier '%c' for asm op\n",
1450 sscanf(s, "%d%n", &num, &p);
1452 ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
1459 if (num < 0 || ARR_LEN(asm_regs) <= num) {
1461 "Error: Custom assembler references invalid input/output (%+F)\n",
1465 asm_reg = & asm_regs[num];
1466 assert(asm_reg->valid);
1469 if (asm_reg->use_input == 0) {
1470 reg = get_out_reg(node, asm_reg->inout_pos);
1472 ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
1474 /* might be an immediate value */
1475 if (is_ia32_Immediate(pred)) {
1476 emit_ia32_Immediate(pred);
1479 reg = get_in_reg(node, asm_reg->inout_pos);
1483 "Warning: no register assigned for %d asm op (%+F)\n",
1488 if (asm_reg->memory) {
1493 if (modifier != 0) {
1497 reg_name = ia32_get_mapped_reg_name(isa->regs_8bit, reg);
1500 reg_name = ia32_get_mapped_reg_name(isa->regs_8bit_high, reg);
1503 reg_name = ia32_get_mapped_reg_name(isa->regs_16bit, reg);
1506 panic("Invalid asm op modifier");
1508 be_emit_string(reg_name);
1510 emit_register(reg, asm_reg->mode);
1513 if (asm_reg->memory) {
1521 * Emits code for an ASM pseudo op.
1523 static void emit_ia32_Asm(const ir_node *node)
1525 const void *gen_attr = get_irn_generic_attr_const(node);
1526 const ia32_asm_attr_t *attr
1527 = CONST_CAST_IA32_ATTR(ia32_asm_attr_t, gen_attr);
1528 ident *asm_text = attr->asm_text;
1529 const char *s = get_id_str(asm_text);
1531 ia32_emitf(node, "#APP\t\n");
1538 s = emit_asm_operand(node, s);
1544 ia32_emitf(NULL, "\n#NO_APP\n");
1547 /**********************************
1550 * | | ___ _ __ _ _| |_) |
1551 * | | / _ \| '_ \| | | | _ <
1552 * | |___| (_) | |_) | |_| | |_) |
1553 * \_____\___/| .__/ \__, |____/
1556 **********************************/
1559 * Emit movsb/w instructions to make mov count divideable by 4
1561 static void emit_CopyB_prolog(unsigned size)
1564 ia32_emitf(NULL, "\tmovsb\n");
1566 ia32_emitf(NULL, "\tmovsw\n");
1570 * Emit rep movsd instruction for memcopy.
1572 static void emit_ia32_CopyB(const ir_node *node)
1574 unsigned size = get_ia32_copyb_size(node);
1576 emit_CopyB_prolog(size);
1577 ia32_emitf(node, "\trep movsd\n");
1581 * Emits unrolled memcopy.
1583 static void emit_ia32_CopyB_i(const ir_node *node)
1585 unsigned size = get_ia32_copyb_size(node);
1587 emit_CopyB_prolog(size);
1591 ia32_emitf(NULL, "\tmovsd\n");
1597 /***************************
1601 * | | / _ \| '_ \ \ / /
1602 * | |___| (_) | | | \ V /
1603 * \_____\___/|_| |_|\_/
1605 ***************************/
1608 * Emit code for conversions (I, FP), (FP, I) and (FP, FP).
1610 static void emit_ia32_Conv_with_FP(const ir_node *node, const char* conv_f,
1613 ir_mode *ls_mode = get_ia32_ls_mode(node);
1614 int ls_bits = get_mode_size_bits(ls_mode);
1615 const char *conv = ls_bits == 32 ? conv_f : conv_d;
1617 ia32_emitf(node, "\tcvt%s %AS3, %D0\n", conv);
1620 static void emit_ia32_Conv_I2FP(const ir_node *node)
1622 emit_ia32_Conv_with_FP(node, "si2ss", "si2sd");
1625 static void emit_ia32_Conv_FP2I(const ir_node *node)
1627 emit_ia32_Conv_with_FP(node, "ss2si", "sd2si");
1630 static void emit_ia32_Conv_FP2FP(const ir_node *node)
1632 emit_ia32_Conv_with_FP(node, "sd2ss", "ss2sd");
1636 * Emits code for an Int conversion.
1638 static void emit_ia32_Conv_I2I(const ir_node *node)
1640 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1641 int signed_mode = mode_is_signed(smaller_mode);
1642 const char *sign_suffix;
1644 assert(!mode_is_float(smaller_mode));
1646 sign_suffix = signed_mode ? "s" : "z";
1647 ia32_emitf(node, "\tmov%s%Ml %#AS3, %D0\n", sign_suffix);
1653 static void emit_ia32_Call(const ir_node *node)
1655 /* Special case: Call must not have its immediates prefixed by $, instead
1656 * address mode is prefixed by *. */
1657 ia32_emitf(node, "\tcall %*AS3\n");
1661 /*******************************************
1664 * | |__ ___ _ __ ___ __| | ___ ___
1665 * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __|
1666 * | |_) | __/ | | | (_) | (_| | __/\__ \
1667 * |_.__/ \___|_| |_|\___/ \__,_|\___||___/
1669 *******************************************/
1672 * Emits code to increase stack pointer.
1674 static void emit_be_IncSP(const ir_node *node)
1676 int offs = be_get_IncSP_offset(node);
1682 ia32_emitf(node, "\tsubl $%u, %D0\n", offs);
1684 ia32_emitf(node, "\taddl $%u, %D0\n", -offs);
1688 static inline bool is_unknown_reg(const arch_register_t *reg)
1690 if(reg == &ia32_gp_regs[REG_GP_UKNWN]
1691 || reg == &ia32_xmm_regs[REG_XMM_UKNWN]
1692 || reg == &ia32_vfp_regs[REG_VFP_UKNWN])
1699 * Emits code for Copy/CopyKeep.
1701 static void Copy_emitter(const ir_node *node, const ir_node *op)
1703 const arch_register_t *in = arch_get_irn_register(op);
1704 const arch_register_t *out = arch_get_irn_register(node);
1709 if (is_unknown_reg(in))
1711 /* copies of vf nodes aren't real... */
1712 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
1715 if (get_irn_mode(node) == mode_E) {
1716 ia32_emitf(node, "\tmovsd %R, %R\n", in, out);
1718 ia32_emitf(node, "\tmovl %R, %R\n", in, out);
1722 static void emit_be_Copy(const ir_node *node)
1724 Copy_emitter(node, be_get_Copy_op(node));
1727 static void emit_be_CopyKeep(const ir_node *node)
1729 Copy_emitter(node, be_get_CopyKeep_op(node));
1733 * Emits code for exchange.
1735 static void emit_be_Perm(const ir_node *node)
1737 const arch_register_t *in0, *in1;
1738 const arch_register_class_t *cls0, *cls1;
1740 in0 = arch_get_irn_register(get_irn_n(node, 0));
1741 in1 = arch_get_irn_register(get_irn_n(node, 1));
1743 cls0 = arch_register_get_class(in0);
1744 cls1 = arch_register_get_class(in1);
1746 assert(cls0 == cls1 && "Register class mismatch at Perm");
1748 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
1749 ia32_emitf(node, "\txchg %R, %R\n", in1, in0);
1750 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
1751 ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
1752 ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
1753 ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
1754 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
1756 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
1759 panic("unexpected register class in be_Perm (%+F)", node);
1764 * Emits code for Constant loading.
1766 static void emit_ia32_Const(const ir_node *node)
1768 ia32_emitf(node, "\tmovl %I, %D0\n");
1772 * Emits code to load the TLS base
1774 static void emit_ia32_LdTls(const ir_node *node)
1776 ia32_emitf(node, "\tmovl %%gs:0, %D0\n");
1779 /* helper function for emit_ia32_Minus64Bit */
1780 static void emit_mov(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1782 ia32_emitf(node, "\tmovl %R, %R\n", src, dst);
1785 /* helper function for emit_ia32_Minus64Bit */
1786 static void emit_neg(const ir_node* node, const arch_register_t *reg)
1788 ia32_emitf(node, "\tnegl %R\n", reg);
1791 /* helper function for emit_ia32_Minus64Bit */
1792 static void emit_sbb0(const ir_node* node, const arch_register_t *reg)
1794 ia32_emitf(node, "\tsbbl $0, %R\n", reg);
1797 /* helper function for emit_ia32_Minus64Bit */
1798 static void emit_sbb(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1800 ia32_emitf(node, "\tsbbl %R, %R\n", src, dst);
1803 /* helper function for emit_ia32_Minus64Bit */
1804 static void emit_xchg(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1806 ia32_emitf(node, "\txchgl %R, %R\n", src, dst);
1809 /* helper function for emit_ia32_Minus64Bit */
1810 static void emit_zero(const ir_node* node, const arch_register_t *reg)
1812 ia32_emitf(node, "\txorl %R, %R\n", reg, reg);
1815 static void emit_ia32_Minus64Bit(const ir_node *node)
1817 const arch_register_t *in_lo = get_in_reg(node, 0);
1818 const arch_register_t *in_hi = get_in_reg(node, 1);
1819 const arch_register_t *out_lo = get_out_reg(node, 0);
1820 const arch_register_t *out_hi = get_out_reg(node, 1);
1822 if (out_lo == in_lo) {
1823 if (out_hi != in_hi) {
1824 /* a -> a, b -> d */
1827 /* a -> a, b -> b */
1830 } else if (out_lo == in_hi) {
1831 if (out_hi == in_lo) {
1832 /* a -> b, b -> a */
1833 emit_xchg(node, in_lo, in_hi);
1836 /* a -> b, b -> d */
1837 emit_mov(node, in_hi, out_hi);
1838 emit_mov(node, in_lo, out_lo);
1842 if (out_hi == in_lo) {
1843 /* a -> c, b -> a */
1844 emit_mov(node, in_lo, out_lo);
1846 } else if (out_hi == in_hi) {
1847 /* a -> c, b -> b */
1848 emit_mov(node, in_lo, out_lo);
1851 /* a -> c, b -> d */
1852 emit_mov(node, in_lo, out_lo);
1858 emit_neg( node, out_hi);
1859 emit_neg( node, out_lo);
1860 emit_sbb0(node, out_hi);
1864 emit_zero(node, out_hi);
1865 emit_neg( node, out_lo);
1866 emit_sbb( node, in_hi, out_hi);
1869 static void emit_ia32_GetEIP(const ir_node *node)
1871 ia32_emitf(node, "\tcall %s\n", pic_base_label);
1872 ia32_emitf(NULL, "%s:\n", pic_base_label);
1873 ia32_emitf(node, "\tpopl %D0\n");
1876 static void emit_ia32_ClimbFrame(const ir_node *node)
1878 const ia32_climbframe_attr_t *attr = get_ia32_climbframe_attr_const(node);
1880 ia32_emitf(node, "\tmovl %S0, %D0\n");
1881 ia32_emitf(node, "\tmovl $%u, %S1\n", attr->count);
1882 ia32_emitf(NULL, BLOCK_PREFIX "%ld:\n", get_irn_node_nr(node));
1883 ia32_emitf(node, "\tmovl (%D0), %D0\n");
1884 ia32_emitf(node, "\tdec %S1\n");
1885 ia32_emitf(node, "\tjnz " BLOCK_PREFIX "%ld\n", get_irn_node_nr(node));
1888 static void emit_be_Return(const ir_node *node)
1890 unsigned pop = be_Return_get_pop(node);
1892 if (pop > 0 || be_Return_get_emit_pop(node)) {
1893 ia32_emitf(node, "\tret $%u\n", pop);
1895 ia32_emitf(node, "\tret\n");
1899 static void emit_Nothing(const ir_node *node)
1905 /***********************************************************************************
1908 * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
1909 * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
1910 * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
1911 * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
1913 ***********************************************************************************/
1916 * Enters the emitter functions for handled nodes into the generic
1917 * pointer of an opcode.
1919 static void ia32_register_emitters(void)
1921 #define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
1922 #define IA32_EMIT(a) IA32_EMIT2(a,a)
1923 #define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
1924 #define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
1925 #define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
1926 #define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
1928 /* first clear the generic function pointer for all ops */
1929 clear_irp_opcodes_generic_func();
1931 /* register all emitter functions defined in spec */
1932 ia32_register_spec_emitters();
1934 /* other ia32 emitter functions */
1935 IA32_EMIT2(Conv_I2I8Bit, Conv_I2I);
1940 IA32_EMIT(Conv_FP2FP);
1941 IA32_EMIT(Conv_FP2I);
1942 IA32_EMIT(Conv_I2FP);
1943 IA32_EMIT(Conv_I2I);
1951 IA32_EMIT(Minus64Bit);
1952 IA32_EMIT(SwitchJmp);
1953 IA32_EMIT(ClimbFrame);
1956 /* benode emitter */
1977 typedef void (*emit_func_ptr) (const ir_node *);
1980 * Assign and emit an exception label if the current instruction can fail.
1982 static void ia32_assign_exc_label(ir_node *node)
1984 /* assign a new ID to the instruction */
1985 set_ia32_exc_label_id(node, ++exc_label_id);
1987 ia32_emit_exc_label(node);
1989 be_emit_pad_comment();
1990 be_emit_cstring("/* exception to Block ");
1991 ia32_emit_cfop_target(node);
1992 be_emit_cstring(" */\n");
1993 be_emit_write_line();
1997 * Emits code for a node.
1999 static void ia32_emit_node(ir_node *node)
2001 ir_op *op = get_irn_op(node);
2003 DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
2005 if (is_ia32_irn(node)) {
2006 if (get_ia32_exc_label(node)) {
2007 /* emit the exception label of this instruction */
2008 ia32_assign_exc_label(node);
2010 if (mark_spill_reload) {
2011 if (is_ia32_is_spill(node)) {
2012 ia32_emitf(NULL, "\txchg %ebx, %ebx /* spill mark */\n");
2014 if (is_ia32_is_reload(node)) {
2015 ia32_emitf(NULL, "\txchg %edx, %edx /* reload mark */\n");
2017 if (is_ia32_is_remat(node)) {
2018 ia32_emitf(NULL, "\txchg %ecx, %ecx /* remat mark */\n");
2022 if (op->ops.generic) {
2023 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
2025 be_dbg_set_dbg_info(get_irn_dbg_info(node));
2030 ir_fprintf(stderr, "Error: No emit handler for node %+F (%+G, graph %+F)\n", node, node, current_ir_graph);
2036 * Emits gas alignment directives
2038 static void ia32_emit_alignment(unsigned align, unsigned skip)
2040 ia32_emitf(NULL, "\t.p2align %u,,%u\n", align, skip);
2044 * Emits gas alignment directives for Labels depended on cpu architecture.
2046 static void ia32_emit_align_label(void)
2048 unsigned align = ia32_cg_config.label_alignment;
2049 unsigned maximum_skip = ia32_cg_config.label_alignment_max_skip;
2050 ia32_emit_alignment(align, maximum_skip);
2054 * Test whether a block should be aligned.
2055 * For cpus in the P4/Athlon class it is useful to align jump labels to
2056 * 16 bytes. However we should only do that if the alignment nops before the
2057 * label aren't executed more often than we have jumps to the label.
2059 static int should_align_block(const ir_node *block)
2061 static const double DELTA = .0001;
2062 ir_exec_freq *exec_freq = cg->birg->exec_freq;
2063 ir_node *prev = get_prev_block_sched(block);
2065 double prev_freq = 0; /**< execfreq of the fallthrough block */
2066 double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
2069 if (exec_freq == NULL)
2071 if (ia32_cg_config.label_alignment_factor <= 0)
2074 block_freq = get_block_execfreq(exec_freq, block);
2075 if (block_freq < DELTA)
2078 n_cfgpreds = get_Block_n_cfgpreds(block);
2079 for(i = 0; i < n_cfgpreds; ++i) {
2080 const ir_node *pred = get_Block_cfgpred_block(block, i);
2081 double pred_freq = get_block_execfreq(exec_freq, pred);
2084 prev_freq += pred_freq;
2086 jmp_freq += pred_freq;
2090 if (prev_freq < DELTA && !(jmp_freq < DELTA))
2093 jmp_freq /= prev_freq;
2095 return jmp_freq > ia32_cg_config.label_alignment_factor;
2099 * Emit the block header for a block.
2101 * @param block the block
2102 * @param prev_block the previous block
2104 static void ia32_emit_block_header(ir_node *block)
2106 ir_graph *irg = current_ir_graph;
2107 int need_label = block_needs_label(block);
2109 ir_exec_freq *exec_freq = cg->birg->exec_freq;
2111 if (block == get_irg_end_block(irg))
2114 if (ia32_cg_config.label_alignment > 0) {
2115 /* align the current block if:
2116 * a) if should be aligned due to its execution frequency
2117 * b) there is no fall-through here
2119 if (should_align_block(block)) {
2120 ia32_emit_align_label();
2122 /* if the predecessor block has no fall-through,
2123 we can always align the label. */
2125 int has_fallthrough = 0;
2127 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
2128 ir_node *cfg_pred = get_Block_cfgpred(block, i);
2129 if (can_be_fallthrough(cfg_pred)) {
2130 has_fallthrough = 1;
2135 if (!has_fallthrough)
2136 ia32_emit_align_label();
2141 ia32_emit_block_name(block);
2144 be_emit_pad_comment();
2145 be_emit_cstring(" /* ");
2147 be_emit_cstring("\t/* ");
2148 ia32_emit_block_name(block);
2149 be_emit_cstring(": ");
2152 be_emit_cstring("preds:");
2154 /* emit list of pred blocks in comment */
2155 arity = get_irn_arity(block);
2157 be_emit_cstring(" none");
2159 for (i = 0; i < arity; ++i) {
2160 ir_node *predblock = get_Block_cfgpred_block(block, i);
2161 be_emit_irprintf(" %d", get_irn_node_nr(predblock));
2164 if (exec_freq != NULL) {
2165 be_emit_irprintf(", freq: %f",
2166 get_block_execfreq(exec_freq, block));
2168 be_emit_cstring(" */\n");
2169 be_emit_write_line();
2173 * Walks over the nodes in a block connected by scheduling edges
2174 * and emits code for each node.
2176 static void ia32_gen_block(ir_node *block)
2180 ia32_emit_block_header(block);
2182 /* emit the contents of the block */
2183 be_dbg_set_dbg_info(get_irn_dbg_info(block));
2184 sched_foreach(block, node) {
2185 ia32_emit_node(node);
2189 typedef struct exc_entry {
2190 ir_node *exc_instr; /** The instruction that can issue an exception. */
2191 ir_node *block; /** The block to call then. */
2196 * Sets labels for control flow nodes (jump target).
2197 * Links control predecessors to there destination blocks.
2199 static void ia32_gen_labels(ir_node *block, void *data)
2201 exc_entry **exc_list = data;
2205 for (n = get_Block_n_cfgpreds(block) - 1; n >= 0; --n) {
2206 pred = get_Block_cfgpred(block, n);
2207 set_irn_link(pred, block);
2209 pred = skip_Proj(pred);
2210 if (is_ia32_irn(pred) && get_ia32_exc_label(pred)) {
2215 ARR_APP1(exc_entry, *exc_list, e);
2216 set_irn_link(pred, block);
2222 * Compare two exception_entries.
2224 static int cmp_exc_entry(const void *a, const void *b)
2226 const exc_entry *ea = a;
2227 const exc_entry *eb = b;
2229 if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
2235 * Main driver. Emits the code for one routine.
2237 void ia32_gen_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
2239 ir_entity *entity = get_irg_entity(irg);
2240 exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
2245 do_pic = cg->birg->main_env->options->pic;
2247 be_gas_elf_type_char = '@';
2249 ia32_register_emitters();
2251 get_unique_label(pic_base_label, sizeof(pic_base_label), ".PIC_BASE");
2253 be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
2254 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
2256 /* we use links to point to target blocks */
2257 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
2258 irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
2260 /* initialize next block links */
2261 n = ARR_LEN(cg->blk_sched);
2262 for (i = 0; i < n; ++i) {
2263 ir_node *block = cg->blk_sched[i];
2264 ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
2266 set_irn_link(block, prev);
2269 for (i = 0; i < n; ++i) {
2270 ir_node *block = cg->blk_sched[i];
2272 ia32_gen_block(block);
2275 be_gas_emit_function_epilog(entity);
2276 be_dbg_method_end();
2278 be_emit_write_line();
2280 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
2282 /* Sort the exception table using the exception label id's.
2283 Those are ascending with ascending addresses. */
2284 qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
2288 for (i = 0; i < ARR_LEN(exc_list); ++i) {
2289 be_emit_cstring("\t.long ");
2290 ia32_emit_exc_label(exc_list[i].exc_instr);
2292 be_emit_cstring("\t.long ");
2293 ia32_emit_block_name(exc_list[i].block);
2297 DEL_ARR_F(exc_list);
2300 static const lc_opt_table_entry_t ia32_emitter_options[] = {
2301 LC_OPT_ENT_BOOL("mark_spill_reload", "mark spills and reloads with ud opcodes", &mark_spill_reload),
2305 /* ==== Experimental binary emitter ==== */
2307 static unsigned char reg_gp_map[N_ia32_gp_REGS];
2308 //static unsigned char reg_mmx_map[N_ia32_mmx_REGS];
2309 //static unsigned char reg_sse_map[N_ia32_xmm_REGS];
2310 static unsigned char pnc_map_signed[8];
2311 static unsigned char pnc_map_unsigned[8];
2313 static void build_reg_map(void)
2315 reg_gp_map[REG_EAX] = 0x0;
2316 reg_gp_map[REG_ECX] = 0x1;
2317 reg_gp_map[REG_EDX] = 0x2;
2318 reg_gp_map[REG_EBX] = 0x3;
2319 reg_gp_map[REG_ESP] = 0x4;
2320 reg_gp_map[REG_EBP] = 0x5;
2321 reg_gp_map[REG_ESI] = 0x6;
2322 reg_gp_map[REG_EDI] = 0x7;
2324 pnc_map_signed[pn_Cmp_Eq] = 0x04;
2325 pnc_map_signed[pn_Cmp_Lt] = 0x0C;
2326 pnc_map_signed[pn_Cmp_Le] = 0x0E;
2327 pnc_map_signed[pn_Cmp_Gt] = 0x0F;
2328 pnc_map_signed[pn_Cmp_Ge] = 0x0D;
2329 pnc_map_signed[pn_Cmp_Lg] = 0x05;
2331 pnc_map_unsigned[pn_Cmp_Eq] = 0x04;
2332 pnc_map_unsigned[pn_Cmp_Lt] = 0x02;
2333 pnc_map_unsigned[pn_Cmp_Le] = 0x06;
2334 pnc_map_unsigned[pn_Cmp_Gt] = 0x07;
2335 pnc_map_unsigned[pn_Cmp_Ge] = 0x03;
2336 pnc_map_unsigned[pn_Cmp_Lg] = 0x05;
2339 /** Returns the encoding for a pnc field. */
2340 static unsigned char pnc2cc(int pnc)
2343 if (pnc == ia32_pn_Cmp_parity) {
2345 } else if (pnc & ia32_pn_Cmp_float || pnc & ia32_pn_Cmp_unsigned) {
2346 cc = pnc_map_unsigned[pnc & 0x07];
2348 cc = pnc_map_signed[pnc & 0x07];
2354 /** Sign extension bit values for binops */
2356 UNSIGNED_IMM = 0, /**< unsigned immediate */
2357 SIGNEXT_IMM = 2, /**< sign extended immediate */
2360 /** The mod encoding of the ModR/M */
2362 MOD_IND = 0x00, /**< [reg1] */
2363 MOD_IND_BYTE_OFS = 0x40, /**< [reg1 + byte ofs] */
2364 MOD_IND_WORD_OFS = 0x80, /**< [reg1 + word ofs] */
2365 MOD_REG = 0xC0 /**< reg1 */
2368 /** create R/M encoding for ModR/M */
2369 #define ENC_RM(x) (x)
2370 /** create REG encoding for ModR/M */
2371 #define ENC_REG(x) ((x) << 3)
2373 /** create encoding for a SIB byte */
2374 #define ENC_SIB(scale, index, base) ((scale) << 6 | (index) << 3 | (base))
2376 /* Node: The following routines are supposed to append bytes, words, dwords
2377 to the output stream.
2378 Currently the implementation is stupid in that it still creates output
2379 for an "assembler" in the form of .byte, .long
2380 We will change this when enough infrastructure is there to create complete
2381 machine code in memory/object files */
2383 static void bemit8(const unsigned char byte)
2385 be_emit_irprintf("\t.byte 0x%x\n", byte);
2386 be_emit_write_line();
2389 static void bemit16(const unsigned short u16)
2391 be_emit_irprintf("\t.word 0x%x\n", u16);
2392 be_emit_write_line();
2395 static void bemit32(const unsigned u32)
2397 be_emit_irprintf("\t.long 0x%x\n", u32);
2398 be_emit_write_line();
2402 * Emit address of an entity. If @p is_relative is true then a relative
2403 * offset from behind the address to the entity is created.
2405 static void bemit_entity(ir_entity *entity, bool entity_sign, int offset,
2408 if (entity == NULL) {
2413 /* the final version should remember the position in the bytestream
2414 and patch it with the correct address at linktime... */
2415 be_emit_cstring("\t.long ");
2418 set_entity_backend_marked(entity, 1);
2419 be_gas_emit_entity(entity);
2421 if (get_entity_owner(entity) == get_tls_type()) {
2422 if (get_entity_visibility(entity) == visibility_external_allocated) {
2423 be_emit_cstring("@INDNTPOFF");
2425 be_emit_cstring("@NTPOFF");
2430 be_emit_cstring("-.");
2435 be_emit_irprintf("%+d", offset);
2438 be_emit_write_line();
2441 static void bemit_jmp_destination(const ir_node *dest_block)
2443 be_emit_cstring("\t.long ");
2444 ia32_emit_block_name(dest_block);
2445 be_emit_cstring(" - . - 4\n");
2446 be_emit_write_line();
2449 /* end emit routines, all emitters following here should only use the functions
2452 typedef enum reg_modifier {
2457 /** Create a ModR/M byte for src1,src2 registers */
2458 static void bemit_modrr(const arch_register_t *src1,
2459 const arch_register_t *src2)
2461 unsigned char modrm = MOD_REG;
2462 modrm |= ENC_RM(reg_gp_map[src1->index]);
2463 modrm |= ENC_REG(reg_gp_map[src2->index]);
2467 /** Create a ModR/M8 byte for src1,src2 registers */
2468 static void bemit_modrr8(reg_modifier_t high_part1, const arch_register_t *src1,
2469 reg_modifier_t high_part2, const arch_register_t *src2)
2471 unsigned char modrm = MOD_REG;
2472 modrm |= ENC_RM(reg_gp_map[src1->index] + (high_part1 == REG_HIGH ? 4 : 0));
2473 modrm |= ENC_REG(reg_gp_map[src2->index] + (high_part2 == REG_HIGH ? 4 : 0));
2477 /** Create a ModR/M byte for one register and extension */
2478 static void bemit_modru(const arch_register_t *reg, unsigned ext)
2480 unsigned char modrm = MOD_REG;
2482 modrm |= ENC_RM(reg_gp_map[reg->index]);
2483 modrm |= ENC_REG(ext);
2487 /** Create a ModR/M8 byte for one register */
2488 static void bemit_modrm8(reg_modifier_t high_part, const arch_register_t *reg)
2490 unsigned char modrm = MOD_REG;
2491 assert(reg_gp_map[reg->index] < 4);
2492 modrm |= ENC_RM(reg_gp_map[reg->index] + (high_part == REG_HIGH ? 4 : 0));
2498 * Calculate the size of an signed immediate in bytes.
2500 * @param offset an offset
2502 static unsigned get_signed_imm_size(int offset)
2504 if (-128 <= offset && offset < 128) {
2506 } else if (-32768 <= offset && offset < 32768) {
2514 * Emit an address mode.
2516 * @param reg content of the reg field: either a register index or an opcode extension
2517 * @param node the node
2519 static void bemit_mod_am(unsigned reg, const ir_node *node)
2521 ir_entity *ent = get_ia32_am_sc(node);
2522 int offs = get_ia32_am_offs_int(node);
2523 ir_node *base = get_irn_n(node, n_ia32_base);
2524 int has_base = !is_ia32_NoReg_GP(base);
2525 ir_node *index = get_irn_n(node, n_ia32_index);
2526 int has_index = !is_ia32_NoReg_GP(index);
2529 unsigned emitoffs = 0;
2530 bool emitsib = false;
2533 /* set the mod part depending on displacement */
2535 modrm |= MOD_IND_WORD_OFS;
2537 } else if (offs == 0) {
2540 } else if (-128 <= offs && offs < 128) {
2541 modrm |= MOD_IND_BYTE_OFS;
2544 modrm |= MOD_IND_WORD_OFS;
2549 const arch_register_t *base_reg = arch_get_irn_register(base);
2550 base_enc = reg_gp_map[base_reg->index];
2552 /* Use the EBP encoding + MOD_IND if NO base register. There is
2553 * always a 32bit offset present in this case. */
2559 /* Determine if we need a SIB byte. */
2561 const arch_register_t *reg_index = arch_get_irn_register(index);
2562 int scale = get_ia32_am_scale(node);
2564 /* R/M set to ESP means SIB in 32bit mode. */
2565 modrm |= ENC_RM(0x04);
2566 sib = ENC_SIB(scale, reg_gp_map[reg_index->index], base_enc);
2568 } else if (base_enc == 0x04) {
2569 /* for the above reason we are forced to emit a SIB when base is ESP.
2570 * Only the base is used, index must be ESP too, which means no index.
2572 modrm |= ENC_RM(0x04);
2573 sib = ENC_SIB(0, 0x04, 0x04);
2576 modrm |= ENC_RM(base_enc);
2579 /* We are forced to emit an 8bit offset as EBP base without offset is a
2580 * special case for SIB without base register. */
2581 if (base_enc == 0x05 && emitoffs == 0) {
2582 modrm |= MOD_IND_BYTE_OFS;
2586 modrm |= ENC_REG(reg);
2592 /* emit displacement */
2593 if (emitoffs == 8) {
2594 bemit8((unsigned) offs);
2595 } else if (emitoffs == 32) {
2596 bemit_entity(ent, is_ia32_am_sc_sign(node), offs, false);
2601 * Emit a binop with a immediate operand.
2603 * @param node the node to emit
2604 * @param opcode_eax the opcode for the op eax, imm variant
2605 * @param opcode the opcode for the reg, imm variant
2606 * @param ruval the opcode extension for opcode
2608 static void bemit_binop_with_imm(
2609 const ir_node *node,
2610 unsigned char opcode_ax,
2611 unsigned char opcode, unsigned char ruval)
2613 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
2614 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
2615 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
2618 /* Some instructions (test) have no short form with 32bit value + 8bit
2620 if (attr->symconst != NULL || opcode & SIGNEXT_IMM) {
2623 /* check for sign extension */
2624 size = get_signed_imm_size(attr->offset);
2629 bemit8(opcode | SIGNEXT_IMM);
2630 /* cmp has this special mode */
2631 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2632 bemit_mod_am(ruval, node);
2634 const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
2635 bemit_modru(reg, ruval);
2637 bemit8((unsigned char)attr->offset);
2641 /* check for eax variant: this variant is shorter for 32bit immediates only */
2642 if (get_ia32_op_type(node) == ia32_AddrModeS) {
2644 bemit_mod_am(ruval, node);
2646 const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
2647 if (reg->index == REG_EAX) {
2651 bemit_modru(reg, ruval);
2654 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
2657 panic("invalid imm size?!?");
2663 static void bemit_binop_2(const ir_node *node, unsigned code)
2665 const arch_register_t *out = get_in_reg(node, n_ia32_binary_left);
2667 if (get_ia32_op_type(node) == ia32_Normal) {
2668 const arch_register_t *op2 = get_in_reg(node, n_ia32_binary_right);
2669 bemit_modrr(op2, out);
2671 bemit_mod_am(reg_gp_map[out->index], node);
2678 static void bemit_binop(const ir_node *node, const unsigned char opcodes[4])
2680 ir_node *right = get_irn_n(node, n_ia32_binary_right);
2681 if (is_ia32_Immediate(right)) {
2682 bemit_binop_with_imm(node, opcodes[1], opcodes[2], opcodes[3]);
2684 bemit_binop_2(node, opcodes[0]);
2691 static void bemit_unop(const ir_node *node, unsigned char code, unsigned char ext, int input)
2694 if (get_ia32_op_type(node) == ia32_Normal) {
2695 const arch_register_t *in = get_in_reg(node, input);
2696 bemit_modru(in, ext);
2698 bemit_mod_am(ext, node);
2702 static void bemit_unop_reg(const ir_node *node, unsigned char code, int input)
2704 const arch_register_t *out = get_out_reg(node, 0);
2705 bemit_unop(node, code, reg_gp_map[out->index], input);
2708 static void bemit_unop_mem(const ir_node *node, unsigned char code, unsigned char ext)
2710 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
2713 bemit8(size == 8 ? code : code + 1);
2714 bemit_mod_am(ext, node);
2717 static void bemit_immediate(const ir_node *node, bool relative)
2719 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
2720 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, relative);
2723 static void bemit_copy(const ir_node *copy)
2725 const arch_register_t *in = get_in_reg(copy, 0);
2726 const arch_register_t *out = get_out_reg(copy, 0);
2728 if (in == out || is_unknown_reg(in))
2730 /* copies of vf nodes aren't real... */
2731 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
2734 if (get_irn_mode(copy) == mode_E) {
2737 assert(arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_gp]);
2739 bemit_modrr(in, out);
2743 static void bemit_perm(const ir_node *node)
2745 const arch_register_t *in0 = arch_get_irn_register(get_irn_n(node, 0));
2746 const arch_register_t *in1 = arch_get_irn_register(get_irn_n(node, 1));
2747 const arch_register_class_t *cls0 = arch_register_get_class(in0);
2749 assert(cls0 == arch_register_get_class(in1) && "Register class mismatch at Perm");
2751 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
2752 if (in0->index == REG_EAX) {
2753 bemit8(0x90 + reg_gp_map[in1->index]);
2754 } else if (in1->index == REG_EAX) {
2755 bemit8(0x90 + reg_gp_map[in0->index]);
2758 bemit_modrr(in0, in1);
2760 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
2761 panic("unimplemented"); // TODO implement
2762 //ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
2763 //ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
2764 //ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
2765 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
2767 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
2770 panic("unexpected register class in be_Perm (%+F)", node);
2774 static void bemit_xor0(const ir_node *node)
2776 const arch_register_t *out = get_out_reg(node, 0);
2778 bemit_modrr(out, out);
2781 static void bemit_mov_const(const ir_node *node)
2783 const arch_register_t *out = get_out_reg(node, 0);
2784 bemit8(0xB8 + reg_gp_map[out->index]);
2785 bemit_immediate(node, false);
2789 * Creates a function for a Binop with 3 possible encodings.
2791 #define BINOP(op, op0, op1, op2, op2_ext) \
2792 static void bemit_ ## op(const ir_node *node) { \
2793 static const unsigned char op ## _codes[] = {op0, op1, op2, op2_ext}; \
2794 bemit_binop(node, op ## _codes); \
2797 /* insn def eax,imm imm */
2798 BINOP(add, 0x03, 0x05, 0x81, 0)
2799 BINOP(or, 0x0B, 0x0D, 0x81, 1)
2800 BINOP(adc, 0x13, 0x15, 0x81, 2)
2801 BINOP(sbb, 0x1B, 0x1D, 0x81, 3)
2802 BINOP(and, 0x23, 0x25, 0x81, 4)
2803 BINOP(sub, 0x2B, 0x2D, 0x81, 5)
2804 BINOP(xor, 0x33, 0x35, 0x81, 6)
2805 BINOP(test, 0x85, 0xA9, 0xF7, 0)
2807 #define BINOPMEM(op, ext) \
2808 static void bemit_##op(const ir_node *node) \
2811 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2814 val = get_irn_n(node, n_ia32_unary_op); \
2815 if (is_ia32_Immediate(val)) { \
2816 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(val); \
2817 int offset = attr->offset; \
2818 if (attr->symconst == NULL && get_signed_imm_size(offset) == 1) { \
2820 bemit_mod_am(ext, node); \
2824 bemit_mod_am(ext, node); \
2828 bemit_entity(attr->symconst, attr->sc_sign, offset, false); \
2832 bemit8(ext << 3 | 1); \
2833 bemit_mod_am(reg_gp_map[get_out_reg(val, 0)->index], node); \
2837 static void bemit_##op##8bit(const ir_node *node) \
2839 ir_node *val = get_irn_n(node, n_ia32_unary_op); \
2840 if (is_ia32_Immediate(val)) { \
2842 bemit_mod_am(ext, node); \
2843 bemit8(get_ia32_immediate_attr_const(val)->offset); \
2846 bemit_mod_am(reg_gp_map[get_out_reg(val, 0)->index], node); \
2858 * Creates a function for an Unop with code /ext encoding.
2860 #define UNOP(op, code, ext, input) \
2861 static void bemit_ ## op(const ir_node *node) { \
2862 bemit_unop(node, code, ext, input); \
2865 UNOP(not, 0xF7, 2, n_ia32_Not_val)
2866 UNOP(neg, 0xF7, 3, n_ia32_Neg_val)
2867 UNOP(mul, 0xF7, 4, n_ia32_Mul_right)
2868 UNOP(imul1op, 0xF7, 5, n_ia32_IMul1OP_right)
2869 UNOP(div, 0xF7, 6, n_ia32_Div_divisor)
2870 UNOP(idiv, 0xF7, 7, n_ia32_IDiv_divisor)
2872 /* TODO: am support for IJmp */
2873 UNOP(ijmp, 0xFF, 4, n_ia32_IJmp_target)
2875 #define SHIFT(op, ext) \
2876 static void bemit_##op(const ir_node *node) \
2878 const arch_register_t *out = get_out_reg(node, 0); \
2879 ir_node *count = get_irn_n(node, 1); \
2880 if (is_ia32_Immediate(count)) { \
2881 int offset = get_ia32_immediate_attr_const(count)->offset; \
2882 if (offset == 1) { \
2884 bemit_modru(out, ext); \
2887 bemit_modru(out, ext); \
2892 bemit_modru(out, ext); \
2896 static void bemit_##op##mem(const ir_node *node) \
2899 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node)); \
2902 count = get_irn_n(node, 1); \
2903 if (is_ia32_Immediate(count)) { \
2904 int offset = get_ia32_immediate_attr_const(count)->offset; \
2905 if (offset == 1) { \
2906 bemit8(size == 8 ? 0xD0 : 0xD1); \
2907 bemit_mod_am(ext, node); \
2909 bemit8(size == 8 ? 0xC0 : 0xC1); \
2910 bemit_mod_am(ext, node); \
2914 bemit8(size == 8 ? 0xD2 : 0xD3); \
2915 bemit_mod_am(ext, node); \
2925 static void bemit_shld(const ir_node *node)
2927 const arch_register_t *in = get_in_reg(node, n_ia32_ShlD_val_low);
2928 const arch_register_t *out = get_out_reg(node, pn_ia32_ShlD_res);
2929 ir_node *count = get_irn_n(node, n_ia32_ShlD_count);
2931 if (is_ia32_Immediate(count)) {
2933 bemit_modrr(out, in);
2934 bemit8(get_ia32_immediate_attr_const(count)->offset);
2937 bemit_modrr(out, in);
2941 static void bemit_shrd(const ir_node *node)
2943 const arch_register_t *in = get_in_reg(node, n_ia32_ShrD_val_low);
2944 const arch_register_t *out = get_out_reg(node, pn_ia32_ShrD_res);
2945 ir_node *count = get_irn_n(node, n_ia32_ShrD_count);
2947 if (is_ia32_Immediate(count)) {
2949 bemit_modrr(out, in);
2950 bemit8(get_ia32_immediate_attr_const(count)->offset);
2953 bemit_modrr(out, in);
2958 * binary emitter for setcc.
2960 static void bemit_setcc(const ir_node *node)
2962 const arch_register_t *dreg = get_out_reg(node, pn_ia32_Setcc_res);
2964 pn_Cmp pnc = get_ia32_condcode(node);
2965 pnc = determine_final_pnc(node, n_ia32_Setcc_eflags, pnc);
2966 if (pnc & ia32_pn_Cmp_float) {
2967 switch (pnc & 0x0f) {
2972 bemit_modrm8(REG_LOW, dreg);
2979 bemit_modrm8(REG_LOW, dreg);
2987 bemit8(0x90 | pnc2cc(pnc));
2988 bemit_modrm8(REG_LOW, dreg);
2993 bemit_modrm8(REG_HIGH, dreg);
2995 /* andb %>dreg, %<dreg */
2997 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
3005 bemit8(0x90 | pnc2cc(pnc));
3006 bemit_modrm8(REG_LOW, dreg);
3011 bemit_modrm8(REG_HIGH, dreg);
3013 /* orb %>dreg, %<dreg */
3015 bemit_modrr8(REG_LOW, dreg, REG_HIGH, dreg);
3024 bemit8(0x90 | pnc2cc(pnc));
3025 bemit_modrm8(REG_LOW, dreg);
3028 static void bemit_cmovcc(const ir_node *node)
3030 const ia32_attr_t *attr = get_ia32_attr_const(node);
3031 int ins_permuted = attr->data.ins_permuted;
3032 const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
3033 pn_Cmp pnc = get_ia32_condcode(node);
3034 const arch_register_t *in_true;
3035 const arch_register_t *in_false;
3037 pnc = determine_final_pnc(node, n_ia32_CMovcc_eflags, pnc);
3039 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_true));
3040 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMovcc_val_false));
3042 /* should be same constraint fullfilled? */
3043 if (out == in_false) {
3044 /* yes -> nothing to do */
3045 } else if (out == in_true) {
3046 assert(get_ia32_op_type(node) == ia32_Normal);
3047 ins_permuted = !ins_permuted;
3051 bemit8(0x8B); // mov %in_false, %out
3052 bemit_modrr(in_false, out);
3056 pnc = ia32_get_negated_pnc(pnc);
3058 /* TODO: handling of Nans isn't correct yet */
3061 bemit8(0x40 | pnc2cc(pnc));
3062 if (get_ia32_op_type(node) == ia32_Normal) {
3063 bemit_modrr(in_true, out);
3065 bemit_mod_am(reg_gp_map[out->index], node);
3069 static void bemit_cmp(const ir_node *node)
3071 unsigned ls_size = get_mode_size_bits(get_ia32_ls_mode(node));
3077 right = get_irn_n(node, n_ia32_binary_right);
3078 if (is_ia32_Immediate(right)) {
3079 /* Use in-reg, because some instructions (cmp, test) have no out-reg. */
3080 const ir_node *op = get_irn_n(node, n_ia32_binary_right);
3081 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(op);
3084 if (attr->symconst != NULL) {
3087 /* check for sign extension */
3088 size = get_signed_imm_size(attr->offset);
3093 bemit8(0x81 | SIGNEXT_IMM);
3094 /* cmp has this special mode */
3095 if (get_ia32_op_type(node) == ia32_AddrModeS) {
3096 bemit_mod_am(7, node);
3098 const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
3099 bemit_modru(reg, 7);
3101 bemit8((unsigned char)attr->offset);
3105 /* check for eax variant: this variant is shorter for 32bit immediates only */
3106 if (get_ia32_op_type(node) == ia32_AddrModeS) {
3108 bemit_mod_am(7, node);
3110 const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
3111 if (reg->index == REG_EAX) {
3115 bemit_modru(reg, 7);
3118 if (ls_size == 16) {
3119 bemit16(attr->offset);
3121 bemit_entity(attr->symconst, attr->sc_sign, attr->offset, false);
3125 panic("invalid imm size?!?");
3127 const arch_register_t *out = get_in_reg(node, n_ia32_binary_left);
3129 if (get_ia32_op_type(node) == ia32_Normal) {
3130 const arch_register_t *op2 = get_in_reg(node, n_ia32_binary_right);
3131 bemit_modrr(op2, out);
3133 bemit_mod_am(reg_gp_map[out->index], node);
3138 static void bemit_cmp8bit(const ir_node *node)
3140 ir_node *right = get_irn_n(node, n_ia32_binary_right);
3141 if (is_ia32_Immediate(right)) {
3142 if (get_ia32_op_type(node) == ia32_Normal) {
3143 const arch_register_t *out = get_in_reg(node, n_ia32_Cmp_left);
3144 if (out->index == REG_EAX) {
3148 bemit_modru(out, 7);
3152 bemit_mod_am(7, node);
3154 bemit8(get_ia32_immediate_attr_const(right)->offset);
3156 const arch_register_t *out = get_in_reg(node, n_ia32_Cmp_left);
3158 if (get_ia32_op_type(node) == ia32_Normal) {
3159 const arch_register_t *in = get_in_reg(node, n_ia32_Cmp_right);
3160 bemit_modrr(out, in);
3162 bemit_mod_am(reg_gp_map[out->index], node);
3167 static void bemit_test8bit(const ir_node *node)
3169 ir_node *right = get_irn_n(node, n_ia32_Test8Bit_right);
3170 if (is_ia32_Immediate(right)) {
3171 if (get_ia32_op_type(node) == ia32_Normal) {
3172 const arch_register_t *out = get_in_reg(node, n_ia32_Test8Bit_left);
3173 if (out->index == REG_EAX) {
3177 bemit_modru(out, 0);
3181 bemit_mod_am(0, node);
3183 bemit8(get_ia32_immediate_attr_const(right)->offset);
3185 const arch_register_t *out = get_in_reg(node, n_ia32_Test8Bit_left);
3187 if (get_ia32_op_type(node) == ia32_Normal) {
3188 const arch_register_t *in = get_in_reg(node, n_ia32_Test8Bit_right);
3189 bemit_modrr(out, in);
3191 bemit_mod_am(reg_gp_map[out->index], node);
3196 static void bemit_imul(const ir_node *node)
3198 ir_node *right = get_irn_n(node, n_ia32_IMul_right);
3199 /* Do we need the immediate form? */
3200 if (is_ia32_Immediate(right)) {
3201 int imm = get_ia32_immediate_attr_const(right)->offset;
3202 if (get_signed_imm_size(imm) == 1) {
3203 bemit_unop_reg(node, 0x6B, n_ia32_IMul_left);
3206 bemit_unop_reg(node, 0x69, n_ia32_IMul_left);
3211 bemit_unop_reg(node, 0xAF, n_ia32_IMul_right);
3215 static void bemit_dec(const ir_node *node)
3217 const arch_register_t *out = get_out_reg(node, pn_ia32_Dec_res);
3218 bemit8(0x48 + reg_gp_map[out->index]);
3221 static void bemit_inc(const ir_node *node)
3223 const arch_register_t *out = get_out_reg(node, pn_ia32_Inc_res);
3224 bemit8(0x40 + reg_gp_map[out->index]);
3227 #define UNOPMEM(op, code, ext) \
3228 static void bemit_##op(const ir_node *node) \
3230 bemit_unop_mem(node, code, ext); \
3233 UNOPMEM(notmem, 0xF6, 2)
3234 UNOPMEM(negmem, 0xF6, 3)
3235 UNOPMEM(incmem, 0xFE, 0)
3236 UNOPMEM(decmem, 0xFE, 1)
3238 static void bemit_ldtls(const ir_node *node)
3240 const arch_register_t *out = get_out_reg(node, 0);
3242 bemit8(0x65); // gs:
3243 if (out->index == REG_EAX) {
3244 bemit8(0xA1); // movl 0, %eax
3246 bemit8(0x8B); // movl 0, %reg
3247 bemit8(MOD_IND | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x05));
3255 static void bemit_lea(const ir_node *node)
3257 const arch_register_t *out = get_out_reg(node, 0);
3259 bemit_mod_am(reg_gp_map[out->index], node);
3262 /* helper function for bemit_minus64bit */
3263 static void bemit_helper_mov(const arch_register_t *src, const arch_register_t *dst)
3265 bemit8(0x8B); // movl %src, %dst
3266 bemit_modrr(src, dst);
3269 /* helper function for bemit_minus64bit */
3270 static void bemit_helper_neg(const arch_register_t *reg)
3272 bemit8(0xF7); // negl %reg
3273 bemit_modru(reg, 3);
3276 /* helper function for bemit_minus64bit */
3277 static void bemit_helper_sbb0(const arch_register_t *reg)
3279 bemit8(0x83); // sbbl $0, %reg
3280 bemit_modru(reg, 3);
3284 /* helper function for bemit_minus64bit */
3285 static void bemit_helper_sbb(const arch_register_t *src, const arch_register_t *dst)
3287 bemit8(0x1B); // sbbl %src, %dst
3288 bemit_modrr(src, dst);
3291 /* helper function for bemit_minus64bit */
3292 static void bemit_helper_xchg(const arch_register_t *src, const arch_register_t *dst)
3294 if (src->index == REG_EAX) {
3295 bemit8(0x90 + reg_gp_map[dst->index]); // xchgl %eax, %dst
3296 } else if (dst->index == REG_EAX) {
3297 bemit8(0x90 + reg_gp_map[src->index]); // xchgl %src, %eax
3299 bemit8(0x87); // xchgl %src, %dst
3300 bemit_modrr(src, dst);
3304 /* helper function for bemit_minus64bit */
3305 static void bemit_helper_zero(const arch_register_t *reg)
3307 bemit8(0x33); // xorl %reg, %reg
3308 bemit_modrr(reg, reg);
3311 static void bemit_minus64bit(const ir_node *node)
3313 const arch_register_t *in_lo = get_in_reg(node, 0);
3314 const arch_register_t *in_hi = get_in_reg(node, 1);
3315 const arch_register_t *out_lo = get_out_reg(node, 0);
3316 const arch_register_t *out_hi = get_out_reg(node, 1);
3318 if (out_lo == in_lo) {
3319 if (out_hi != in_hi) {
3320 /* a -> a, b -> d */
3323 /* a -> a, b -> b */
3326 } else if (out_lo == in_hi) {
3327 if (out_hi == in_lo) {
3328 /* a -> b, b -> a */
3329 bemit_helper_xchg(in_lo, in_hi);
3332 /* a -> b, b -> d */
3333 bemit_helper_mov(in_hi, out_hi);
3334 bemit_helper_mov(in_lo, out_lo);
3338 if (out_hi == in_lo) {
3339 /* a -> c, b -> a */
3340 bemit_helper_mov(in_lo, out_lo);
3342 } else if (out_hi == in_hi) {
3343 /* a -> c, b -> b */
3344 bemit_helper_mov(in_lo, out_lo);
3347 /* a -> c, b -> d */
3348 bemit_helper_mov(in_lo, out_lo);
3354 bemit_helper_neg( out_hi);
3355 bemit_helper_neg( out_lo);
3356 bemit_helper_sbb0(out_hi);
3360 bemit_helper_zero(out_hi);
3361 bemit_helper_neg( out_lo);
3362 bemit_helper_sbb( in_hi, out_hi);
3366 * Emit a single opcode.
3368 #define EMIT_SINGLEOP(op, code) \
3369 static void bemit_ ## op(const ir_node *node) { \
3374 //EMIT_SINGLEOP(daa, 0x27)
3375 //EMIT_SINGLEOP(das, 0x2F)
3376 //EMIT_SINGLEOP(aaa, 0x37)
3377 //EMIT_SINGLEOP(aas, 0x3F)
3378 //EMIT_SINGLEOP(nop, 0x90)
3379 EMIT_SINGLEOP(cwtl, 0x98)
3380 EMIT_SINGLEOP(cltd, 0x99)
3381 //EMIT_SINGLEOP(fwait, 0x9B)
3382 EMIT_SINGLEOP(sahf, 0x9E)
3383 //EMIT_SINGLEOP(popf, 0x9D)
3384 EMIT_SINGLEOP(leave, 0xC9)
3385 EMIT_SINGLEOP(int3, 0xCC)
3386 //EMIT_SINGLEOP(iret, 0xCF)
3387 //EMIT_SINGLEOP(xlat, 0xD7)
3388 //EMIT_SINGLEOP(lock, 0xF0)
3389 EMIT_SINGLEOP(rep, 0xF3)
3390 //EMIT_SINGLEOP(halt, 0xF4)
3391 EMIT_SINGLEOP(cmc, 0xF5)
3392 EMIT_SINGLEOP(stc, 0xF9)
3393 //EMIT_SINGLEOP(cli, 0xFA)
3394 //EMIT_SINGLEOP(sti, 0xFB)
3395 //EMIT_SINGLEOP(std, 0xFD)
3398 * Emits a MOV out, [MEM].
3400 static void bemit_load(const ir_node *node)
3402 const arch_register_t *out = get_out_reg(node, 0);
3404 if (out->index == REG_EAX) {
3405 ir_node *base = get_irn_n(node, n_ia32_base);
3406 int has_base = !is_ia32_NoReg_GP(base);
3407 ir_node *index = get_irn_n(node, n_ia32_index);
3408 int has_index = !is_ia32_NoReg_GP(index);
3409 if (!has_base && !has_index) {
3410 ir_entity *ent = get_ia32_am_sc(node);
3411 int offs = get_ia32_am_offs_int(node);
3412 /* load from constant address to EAX can be encoded
3415 bemit_entity(ent, 0, offs, false);
3420 bemit_mod_am(reg_gp_map[out->index], node);
3424 * Emits a MOV [mem], in.
3426 static void bemit_store(const ir_node *node)
3428 const ir_node *value = get_irn_n(node, n_ia32_Store_val);
3429 unsigned size = get_mode_size_bits(get_ia32_ls_mode(node));
3431 if (is_ia32_Immediate(value)) {
3434 bemit_mod_am(0, node);
3435 bemit8(get_ia32_immediate_attr_const(value)->offset);
3436 } else if (size == 16) {
3439 bemit_mod_am(0, node);
3440 bemit16(get_ia32_immediate_attr_const(value)->offset);
3443 bemit_mod_am(0, node);
3444 bemit_immediate(value, false);
3447 const arch_register_t *in = get_in_reg(node, n_ia32_Store_val);
3449 if (in->index == REG_EAX) {
3450 ir_node *base = get_irn_n(node, n_ia32_base);
3451 int has_base = !is_ia32_NoReg_GP(base);
3452 ir_node *index = get_irn_n(node, n_ia32_index);
3453 int has_index = !is_ia32_NoReg_GP(index);
3454 if (!has_base && !has_index) {
3455 ir_entity *ent = get_ia32_am_sc(node);
3456 int offs = get_ia32_am_offs_int(node);
3457 /* store to constant address from EAX can be encoded as
3458 * 0xA2/0xA3 [offset]*/
3466 bemit_entity(ent, 0, offs, false);
3478 bemit_mod_am(reg_gp_map[in->index], node);
3482 static void bemit_conv_i2i(const ir_node *node)
3484 ir_mode *smaller_mode = get_ia32_ls_mode(node);
3493 if (mode_is_signed(smaller_mode)) opcode |= 0x08;
3494 if (get_mode_size_bits(smaller_mode) == 16) opcode |= 0x01;
3495 bemit_unop_reg(node, opcode, n_ia32_Conv_I2I_val);
3501 static void bemit_push(const ir_node *node)
3503 const ir_node *value = get_irn_n(node, n_ia32_Push_val);
3505 if (is_ia32_Immediate(value)) {
3506 const ia32_immediate_attr_t *attr
3507 = get_ia32_immediate_attr_const(value);
3508 unsigned size = get_signed_imm_size(attr->offset);
3514 bemit8((unsigned char)attr->offset);
3519 bemit_immediate(value, false);
3522 } else if (is_ia32_NoReg_GP(value)) {
3524 bemit_mod_am(6, node);
3526 const arch_register_t *reg = get_in_reg(node, n_ia32_Push_val);
3527 bemit8(0x50 + reg_gp_map[reg->index]);
3534 static void bemit_pop(const ir_node *node)
3536 const arch_register_t *reg = get_out_reg(node, pn_ia32_Pop_res);
3537 bemit8(0x58 + reg_gp_map[reg->index]);
3540 static void bemit_popmem(const ir_node *node)
3543 bemit_mod_am(0, node);
3546 static void bemit_call(const ir_node *node)
3548 ir_node *proc = get_irn_n(node, n_ia32_Call_addr);
3550 if (is_ia32_Immediate(proc)) {
3552 bemit_immediate(proc, true);
3554 bemit_unop(node, 0xFF, 2, n_ia32_Call_addr);
3558 static void bemit_jmp(const ir_node *dest_block)
3561 bemit_jmp_destination(dest_block);
3564 static void bemit_jump(const ir_node *node)
3566 if (can_be_fallthrough(node))
3569 bemit_jmp(get_cfop_target_block(node));
3572 static void bemit_jcc(int pnc, const ir_node *dest_block)
3574 unsigned char cc = pnc2cc(pnc);
3577 bemit_jmp_destination(dest_block);
3580 static void bemit_jp(bool odd, const ir_node *dest_block)
3584 bemit_jmp_destination(dest_block);
3587 static void bemit_ia32_jcc(const ir_node *node)
3589 int pnc = get_ia32_condcode(node);
3590 const ir_node *proj_true;
3591 const ir_node *proj_false;
3592 const ir_node *dest_true;
3593 const ir_node *dest_false;
3594 const ir_node *block;
3596 pnc = determine_final_pnc(node, 0, pnc);
3598 /* get both Projs */
3599 proj_true = get_proj(node, pn_ia32_Jcc_true);
3600 assert(proj_true && "Jcc without true Proj");
3602 proj_false = get_proj(node, pn_ia32_Jcc_false);
3603 assert(proj_false && "Jcc without false Proj");
3605 block = get_nodes_block(node);
3607 if (can_be_fallthrough(proj_true)) {
3608 /* exchange both proj's so the second one can be omitted */
3609 const ir_node *t = proj_true;
3611 proj_true = proj_false;
3613 pnc = ia32_get_negated_pnc(pnc);
3616 dest_true = get_cfop_target_block(proj_true);
3617 dest_false = get_cfop_target_block(proj_false);
3619 if (pnc & ia32_pn_Cmp_float) {
3620 /* Some floating point comparisons require a test of the parity flag,
3621 * which indicates that the result is unordered */
3624 bemit_jp(false, dest_true);
3629 bemit_jp(true, dest_true);
3635 /* we need a local label if the false proj is a fallthrough
3636 * as the falseblock might have no label emitted then */
3637 if (can_be_fallthrough(proj_false)) {
3639 bemit8(0x06); // jp + 6
3641 bemit_jp(false, dest_false);
3648 bemit_jp(false, dest_true);
3656 bemit_jcc(pnc, dest_true);
3659 /* the second Proj might be a fallthrough */
3660 if (can_be_fallthrough(proj_false)) {
3661 /* it's a fallthrough */
3663 bemit_jmp(dest_false);
3667 static void bemit_switchjmp(const ir_node *node)
3669 unsigned long interval;
3673 const arch_register_t *in;
3675 /* fill the table structure */
3676 generate_jump_table(&tbl, node);
3678 /* two-complement's magic make this work without overflow */
3679 interval = tbl.max_value - tbl.min_value;
3681 in = get_in_reg(node, 0);
3682 /* emit the table */
3683 if (get_signed_imm_size(interval) == 1) {
3684 bemit8(0x83); // cmpl $imm8, %in
3688 bemit8(0x81); // cmpl $imm32, %in
3692 bemit8(0x0F); // ja tbl.defProj
3694 ia32_emitf(tbl.defProj, ".long %L - . - 4\n");
3696 if (tbl.num_branches > 1) {
3698 bemit8(0xFF); // jmp *tbl.label(,%in,4)
3699 bemit8(MOD_IND | ENC_REG(4) | ENC_RM(0x04));
3700 bemit8(ENC_SIB(2, reg_gp_map[in->index], 0x05));
3701 be_emit_irprintf("\t.long %s\n", tbl.label);
3703 be_gas_emit_switch_section(GAS_SECTION_RODATA);
3704 be_emit_cstring(".align 4\n");
3705 be_emit_irprintf("%s:\n", tbl.label);
3707 last_value = tbl.branches[0].value;
3708 for (i = 0; i != tbl.num_branches; ++i) {
3709 while (last_value != tbl.branches[i].value) {
3710 ia32_emitf(tbl.defProj, ".long %L\n");
3713 ia32_emitf(tbl.branches[i].target, ".long %L\n");
3716 be_gas_emit_switch_section(GAS_SECTION_TEXT);
3718 /* one jump is enough */
3719 panic("switch only has one case");
3720 //ia32_emitf(tbl.branches[0].target, "\tjmp %L\n");
3723 be_emit_write_line();
3731 static void bemit_return(const ir_node *node)
3733 unsigned pop = be_Return_get_pop(node);
3734 if (pop > 0 || be_Return_get_emit_pop(node)) {
3736 assert(pop <= 0xffff);
3743 static void bemit_subsp(const ir_node *node)
3745 const arch_register_t *out;
3748 /* mov %esp, %out */
3750 out = get_out_reg(node, 1);
3751 bemit8(MOD_REG | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x04));
3754 static void bemit_incsp(const ir_node *node)
3757 const arch_register_t *reg;
3761 offs = be_get_IncSP_offset(node);
3772 size = get_signed_imm_size(offs);
3773 bemit8(size == 1 ? 0x83 : 0x81);
3775 reg = get_out_reg(node, 0);
3776 bemit_modru(reg, ext);
3785 static void bemit_copybi(const ir_node *node)
3787 unsigned size = get_ia32_copyb_size(node);
3789 bemit8(0xA4); // movsb
3792 bemit8(0xA5); // movsw
3796 bemit8(0xA5); // movsl
3800 static void bemit_fbinop(const ir_node *node, unsigned code, unsigned code_to)
3802 if (get_ia32_op_type(node) == ia32_Normal) {
3803 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
3804 const arch_register_t *in1 = x87_attr->x87[0];
3805 const arch_register_t *in = x87_attr->x87[1];
3806 const arch_register_t *out = x87_attr->x87[2];
3810 } else if (out == in) {
3814 if (out->index == 0) {
3816 bemit8(MOD_REG | ENC_REG(code) | ENC_RM(in->index));
3819 bemit8(MOD_REG | ENC_REG(code_to) | ENC_RM(out->index));
3822 if (get_mode_size_bits(get_ia32_ls_mode(node)) == 32) {
3827 bemit_mod_am(code, node);
3831 static void bemit_fbinopp(const ir_node *node, unsigned const code)
3833 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
3834 const arch_register_t *out = x87_attr->x87[2];
3836 bemit8(code + out->index);
3839 static void bemit_fabs(const ir_node *node)
3847 static void bemit_fadd(const ir_node *node)
3849 bemit_fbinop(node, 0, 0);
3852 static void bemit_faddp(const ir_node *node)
3854 bemit_fbinopp(node, 0xC0);
3857 static void bemit_fchs(const ir_node *node)
3865 static void bemit_fdiv(const ir_node *node)
3867 bemit_fbinop(node, 6, 7);
3870 static void bemit_fdivp(const ir_node *node)
3872 bemit_fbinopp(node, 0xF8);
3875 static void bemit_fdivr(const ir_node *node)
3877 bemit_fbinop(node, 7, 6);
3880 static void bemit_fdivrp(const ir_node *node)
3882 bemit_fbinopp(node, 0xF0);
3885 static void bemit_fild(const ir_node *node)
3887 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3889 bemit8(0xDF); // filds
3890 bemit_mod_am(0, node);
3894 bemit8(0xDB); // fildl
3895 bemit_mod_am(0, node);
3899 bemit8(0xDF); // fildll
3900 bemit_mod_am(5, node);
3904 panic("invalid mode size");
3908 static void bemit_fist(const ir_node *node)
3910 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3912 bemit8(0xDF); // fists
3916 bemit8(0xDB); // fistl
3920 panic("invalid mode size");
3922 bemit_mod_am(2, node);
3925 static void bemit_fistp(const ir_node *node)
3927 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3929 bemit8(0xDF); // fistps
3930 bemit_mod_am(3, node);
3934 bemit8(0xDB); // fistpl
3935 bemit_mod_am(3, node);
3939 bemit8(0xDF); // fistpll
3940 bemit_mod_am(7, node);
3944 panic("invalid mode size");
3948 static void bemit_fld(const ir_node *node)
3950 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
3952 bemit8(0xD9); // flds
3953 bemit_mod_am(0, node);
3957 bemit8(0xDD); // fldl
3958 bemit_mod_am(0, node);
3963 bemit8(0xDB); // fldt
3964 bemit_mod_am(5, node);
3968 panic("invalid mode size");
3972 static void bemit_fld1(const ir_node *node)
3976 bemit8(0xE8); // fld1
3979 static void bemit_fldcw(const ir_node *node)
3981 bemit8(0xD9); // fldcw
3982 bemit_mod_am(5, node);
3985 static void bemit_fldz(const ir_node *node)
3989 bemit8(0xEE); // fldz
3992 static void bemit_fmul(const ir_node *node)
3994 bemit_fbinop(node, 1, 1);
3997 static void bemit_fmulp(const ir_node *node)
3999 bemit_fbinopp(node, 0xC8);
4002 static void bemit_fpop(const ir_node *node)
4004 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4006 bemit8(0xD8 + attr->x87[0]->index);
4009 static void bemit_fpush(const ir_node *node)
4011 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4013 bemit8(0xC0 + attr->x87[0]->index);
4016 static void bemit_fpushcopy(const ir_node *node)
4018 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4020 bemit8(0xC0 + attr->x87[0]->index);
4023 static void bemit_fst(const ir_node *node)
4025 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
4027 bemit8(0xD9); // fsts
4031 bemit8(0xDD); // fstl
4035 panic("invalid mode size");
4037 bemit_mod_am(2, node);
4040 static void bemit_fstp(const ir_node *node)
4042 switch (get_mode_size_bits(get_ia32_ls_mode(node))) {
4044 bemit8(0xD9); // fstps
4045 bemit_mod_am(3, node);
4049 bemit8(0xDD); // fstpl
4050 bemit_mod_am(3, node);
4055 bemit8(0xDB); // fstpt
4056 bemit_mod_am(7, node);
4060 panic("invalid mode size");
4064 static void bemit_fsub(const ir_node *node)
4066 bemit_fbinop(node, 4, 5);
4069 static void bemit_fsubp(const ir_node *node)
4071 bemit_fbinopp(node, 0xE8);
4074 static void bemit_fsubr(const ir_node *node)
4076 bemit_fbinop(node, 5, 4);
4079 static void bemit_fsubrp(const ir_node *node)
4081 bemit_fbinopp(node, 0xE0);
4084 static void bemit_fnstcw(const ir_node *node)
4086 bemit8(0xD9); // fnstcw
4087 bemit_mod_am(7, node);
4090 static void bemit_fnstsw(void)
4092 bemit8(0xDF); // fnstsw %ax
4096 static void bemit_ftstfnstsw(const ir_node *node)
4100 bemit8(0xD9); // ftst
4105 static void bemit_fucomi(const ir_node *node)
4107 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4108 bemit8(0xDB); // fucomi
4109 bemit8(0xE8 + attr->x87[1]->index);
4112 static void bemit_fucomip(const ir_node *node)
4114 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4115 bemit8(0xDF); // fucomip
4116 bemit8(0xE8 + attr->x87[1]->index);
4119 static void bemit_fucomfnstsw(const ir_node *node)
4121 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4122 bemit8(0xDD); // fucom
4123 bemit8(0xE0 + attr->x87[1]->index);
4127 static void bemit_fucompfnstsw(const ir_node *node)
4129 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4130 bemit8(0xDD); // fucomp
4131 bemit8(0xE8 + attr->x87[1]->index);
4135 static void bemit_fucomppfnstsw(const ir_node *node)
4139 bemit8(0xDA); // fucompp
4144 static void bemit_fxch(const ir_node *node)
4146 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
4148 bemit8(0xC8 + attr->x87[0]->index);
4152 * The type of a emitter function.
4154 typedef void (*emit_func) (const ir_node *);
4157 * Set a node emitter. Make it a bit more type safe.
4159 static void register_emitter(ir_op *op, emit_func func)
4161 op->ops.generic = (op_func) func;
4164 static void ia32_register_binary_emitters(void)
4166 /* first clear the generic function pointer for all ops */
4167 clear_irp_opcodes_generic_func();
4169 /* benode emitter */
4170 register_emitter(op_be_Copy, bemit_copy);
4171 register_emitter(op_be_CopyKeep, bemit_copy);
4172 register_emitter(op_be_IncSP, bemit_incsp);
4173 register_emitter(op_be_Perm, bemit_perm);
4174 register_emitter(op_be_Return, bemit_return);
4175 register_emitter(op_ia32_Adc, bemit_adc);
4176 register_emitter(op_ia32_Add, bemit_add);
4177 register_emitter(op_ia32_AddMem, bemit_addmem);
4178 register_emitter(op_ia32_AddMem8Bit, bemit_addmem8bit);
4179 register_emitter(op_ia32_And, bemit_and);
4180 register_emitter(op_ia32_AndMem, bemit_andmem);
4181 register_emitter(op_ia32_AndMem8Bit, bemit_andmem8bit);
4182 register_emitter(op_ia32_Breakpoint, bemit_int3);
4183 register_emitter(op_ia32_CMovcc, bemit_cmovcc);
4184 register_emitter(op_ia32_Call, bemit_call);
4185 register_emitter(op_ia32_Cltd, bemit_cltd);
4186 register_emitter(op_ia32_Cmc, bemit_cmc);
4187 register_emitter(op_ia32_Cmp, bemit_cmp);
4188 register_emitter(op_ia32_Cmp8Bit, bemit_cmp8bit);
4189 register_emitter(op_ia32_Const, bemit_mov_const);
4190 register_emitter(op_ia32_Conv_I2I, bemit_conv_i2i);
4191 register_emitter(op_ia32_Conv_I2I8Bit, bemit_conv_i2i);
4192 register_emitter(op_ia32_CopyB_i, bemit_copybi);
4193 register_emitter(op_ia32_Cwtl, bemit_cwtl);
4194 register_emitter(op_ia32_Dec, bemit_dec);
4195 register_emitter(op_ia32_DecMem, bemit_decmem);
4196 register_emitter(op_ia32_Div, bemit_div);
4197 register_emitter(op_ia32_FldCW, bemit_fldcw);
4198 register_emitter(op_ia32_FnstCW, bemit_fnstcw);
4199 register_emitter(op_ia32_FtstFnstsw, bemit_ftstfnstsw);
4200 register_emitter(op_ia32_FucomFnstsw, bemit_fucomfnstsw);
4201 register_emitter(op_ia32_Fucomi, bemit_fucomi);
4202 register_emitter(op_ia32_FucompFnstsw, bemit_fucompfnstsw);
4203 register_emitter(op_ia32_Fucompi, bemit_fucomip);
4204 register_emitter(op_ia32_FucomppFnstsw, bemit_fucomppfnstsw);
4205 register_emitter(op_ia32_IDiv, bemit_idiv);
4206 register_emitter(op_ia32_IJmp, bemit_ijmp);
4207 register_emitter(op_ia32_IMul, bemit_imul);
4208 register_emitter(op_ia32_IMul1OP, bemit_imul1op);
4209 register_emitter(op_ia32_Inc, bemit_inc);
4210 register_emitter(op_ia32_IncMem, bemit_incmem);
4211 register_emitter(op_ia32_Jcc, bemit_ia32_jcc);
4212 register_emitter(op_ia32_Jmp, bemit_jump);
4213 register_emitter(op_ia32_LdTls, bemit_ldtls);
4214 register_emitter(op_ia32_Lea, bemit_lea);
4215 register_emitter(op_ia32_Leave, bemit_leave);
4216 register_emitter(op_ia32_Load, bemit_load);
4217 register_emitter(op_ia32_Minus64Bit, bemit_minus64bit);
4218 register_emitter(op_ia32_Mul, bemit_mul);
4219 register_emitter(op_ia32_Neg, bemit_neg);
4220 register_emitter(op_ia32_NegMem, bemit_negmem);
4221 register_emitter(op_ia32_Not, bemit_not);
4222 register_emitter(op_ia32_NotMem, bemit_notmem);
4223 register_emitter(op_ia32_Or, bemit_or);
4224 register_emitter(op_ia32_OrMem, bemit_ormem);
4225 register_emitter(op_ia32_OrMem8Bit, bemit_ormem8bit);
4226 register_emitter(op_ia32_Pop, bemit_pop);
4227 register_emitter(op_ia32_PopEbp, bemit_pop);
4228 register_emitter(op_ia32_PopMem, bemit_popmem);
4229 register_emitter(op_ia32_Push, bemit_push);
4230 register_emitter(op_ia32_RepPrefix, bemit_rep);
4231 register_emitter(op_ia32_Rol, bemit_rol);
4232 register_emitter(op_ia32_RolMem, bemit_rolmem);
4233 register_emitter(op_ia32_Ror, bemit_ror);
4234 register_emitter(op_ia32_RorMem, bemit_rormem);
4235 register_emitter(op_ia32_Sahf, bemit_sahf);
4236 register_emitter(op_ia32_Sar, bemit_sar);
4237 register_emitter(op_ia32_SarMem, bemit_sarmem);
4238 register_emitter(op_ia32_Sbb, bemit_sbb);
4239 register_emitter(op_ia32_Setcc, bemit_setcc);
4240 register_emitter(op_ia32_Shl, bemit_shl);
4241 register_emitter(op_ia32_ShlD, bemit_shld);
4242 register_emitter(op_ia32_ShlMem, bemit_shlmem);
4243 register_emitter(op_ia32_Shr, bemit_shr);
4244 register_emitter(op_ia32_ShrD, bemit_shrd);
4245 register_emitter(op_ia32_ShrMem, bemit_shrmem);
4246 register_emitter(op_ia32_Stc, bemit_stc);
4247 register_emitter(op_ia32_Store, bemit_store);
4248 register_emitter(op_ia32_Store8Bit, bemit_store);
4249 register_emitter(op_ia32_Sub, bemit_sub);
4250 register_emitter(op_ia32_SubMem, bemit_submem);
4251 register_emitter(op_ia32_SubMem8Bit, bemit_submem8bit);
4252 register_emitter(op_ia32_SubSP, bemit_subsp);
4253 register_emitter(op_ia32_SwitchJmp, bemit_switchjmp);
4254 register_emitter(op_ia32_Test, bemit_test);
4255 register_emitter(op_ia32_Test8Bit, bemit_test8bit);
4256 register_emitter(op_ia32_Xor, bemit_xor);
4257 register_emitter(op_ia32_Xor0, bemit_xor0);
4258 register_emitter(op_ia32_XorMem, bemit_xormem);
4259 register_emitter(op_ia32_XorMem8Bit, bemit_xormem8bit);
4260 register_emitter(op_ia32_fabs, bemit_fabs);
4261 register_emitter(op_ia32_fadd, bemit_fadd);
4262 register_emitter(op_ia32_faddp, bemit_faddp);
4263 register_emitter(op_ia32_fchs, bemit_fchs);
4264 register_emitter(op_ia32_fdiv, bemit_fdiv);
4265 register_emitter(op_ia32_fdivp, bemit_fdivp);
4266 register_emitter(op_ia32_fdivr, bemit_fdivr);
4267 register_emitter(op_ia32_fdivrp, bemit_fdivrp);
4268 register_emitter(op_ia32_fild, bemit_fild);
4269 register_emitter(op_ia32_fist, bemit_fist);
4270 register_emitter(op_ia32_fistp, bemit_fistp);
4271 register_emitter(op_ia32_fld, bemit_fld);
4272 register_emitter(op_ia32_fld1, bemit_fld1);
4273 register_emitter(op_ia32_fldz, bemit_fldz);
4274 register_emitter(op_ia32_fmul, bemit_fmul);
4275 register_emitter(op_ia32_fmulp, bemit_fmulp);
4276 register_emitter(op_ia32_fpop, bemit_fpop);
4277 register_emitter(op_ia32_fpush, bemit_fpush);
4278 register_emitter(op_ia32_fpushCopy, bemit_fpushcopy);
4279 register_emitter(op_ia32_fst, bemit_fst);
4280 register_emitter(op_ia32_fstp, bemit_fstp);
4281 register_emitter(op_ia32_fsub, bemit_fsub);
4282 register_emitter(op_ia32_fsubp, bemit_fsubp);
4283 register_emitter(op_ia32_fsubr, bemit_fsubr);
4284 register_emitter(op_ia32_fsubrp, bemit_fsubrp);
4285 register_emitter(op_ia32_fxch, bemit_fxch);
4287 /* ignore the following nodes */
4288 register_emitter(op_ia32_ProduceVal, emit_Nothing);
4289 register_emitter(op_be_Barrier, emit_Nothing);
4290 register_emitter(op_be_Keep, emit_Nothing);
4291 register_emitter(op_be_Start, emit_Nothing);
4292 register_emitter(op_Phi, emit_Nothing);
4293 register_emitter(op_Start, emit_Nothing);
4296 static void gen_binary_block(ir_node *block)
4300 ia32_emit_block_header(block);
4302 /* emit the contents of the block */
4303 sched_foreach(block, node) {
4304 ia32_emit_node(node);
4308 void ia32_gen_binary_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
4310 ir_entity *entity = get_irg_entity(irg);
4316 ia32_register_binary_emitters();
4318 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
4320 /* we use links to point to target blocks */
4321 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
4322 irg_block_walk_graph(irg, ia32_gen_labels, NULL, NULL);
4324 /* initialize next block links */
4325 n = ARR_LEN(cg->blk_sched);
4326 for (i = 0; i < n; ++i) {
4327 ir_node *block = cg->blk_sched[i];
4328 ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
4330 set_irn_link(block, prev);
4333 for (i = 0; i < n; ++i) {
4334 ir_node *block = cg->blk_sched[i];
4335 gen_binary_block(block);
4338 be_gas_emit_function_epilog(entity);
4339 be_dbg_method_end();
4341 be_emit_write_line();
4343 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
4349 void ia32_init_emitter(void)
4351 lc_opt_entry_t *be_grp;
4352 lc_opt_entry_t *ia32_grp;
4354 be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
4355 ia32_grp = lc_opt_get_grp(be_grp, "ia32");
4357 lc_opt_add_table(ia32_grp, ia32_emitter_options);
4361 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.emitter");