2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the ia32 node emitter.
23 * @author Christian Wuerdig, Matthias Braun
39 #include "iredges_t.h"
42 #include "raw_bitset.h"
45 #include "../besched_t.h"
46 #include "../benode_t.h"
48 #include "../be_dbgout.h"
49 #include "../beemitter.h"
50 #include "../begnuas.h"
51 #include "../beirg_t.h"
52 #include "../be_dbgout.h"
54 #include "ia32_emitter.h"
55 #include "gen_ia32_emitter.h"
56 #include "gen_ia32_regalloc_if.h"
57 #include "ia32_nodes_attr.h"
58 #include "ia32_new_nodes.h"
59 #include "ia32_map_regs.h"
60 #include "ia32_architecture.h"
61 #include "bearch_ia32_t.h"
63 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
65 #define BLOCK_PREFIX ".L"
67 #define SNPRINTF_BUF_LEN 128
69 static const ia32_isa_t *isa;
70 static ia32_code_gen_t *cg;
71 static char pic_base_label[128];
72 static ir_label_t exc_label_id;
73 static int mark_spill_reload = 0;
76 /** Return the next block in Block schedule */
77 static ir_node *get_prev_block_sched(const ir_node *block)
79 return get_irn_link(block);
82 /** Checks if the current block is a fall-through target. */
83 static int is_fallthrough(const ir_node *cfgpred)
87 if (!is_Proj(cfgpred))
89 pred = get_Proj_pred(cfgpred);
90 if (is_ia32_SwitchJmp(pred))
97 * returns non-zero if the given block needs a label
98 * because of being a jump-target (and not a fall-through)
100 static int block_needs_label(const ir_node *block)
103 int n_cfgpreds = get_Block_n_cfgpreds(block);
105 if (n_cfgpreds == 0) {
107 } else if (n_cfgpreds == 1) {
108 ir_node *cfgpred = get_Block_cfgpred(block, 0);
109 ir_node *cfgpred_block = get_nodes_block(cfgpred);
111 if (get_prev_block_sched(block) == cfgpred_block
112 && is_fallthrough(cfgpred)) {
121 * Returns the register at in position pos.
123 static const arch_register_t *get_in_reg(const ir_node *irn, int pos)
126 const arch_register_t *reg = NULL;
128 assert(get_irn_arity(irn) > pos && "Invalid IN position");
130 /* The out register of the operator at position pos is the
131 in register we need. */
132 op = get_irn_n(irn, pos);
134 reg = arch_get_irn_register(op);
136 assert(reg && "no in register found");
138 if (reg == &ia32_gp_regs[REG_GP_NOREG])
139 panic("trying to emit noreg for %+F input %d", irn, pos);
141 /* in case of unknown register: just return a valid register */
142 if (reg == &ia32_gp_regs[REG_GP_UKNWN]) {
143 const arch_register_req_t *req = arch_get_register_req(irn, pos);
145 if (arch_register_req_is(req, limited)) {
146 /* in case of limited requirements: get the first allowed register */
147 unsigned idx = rbitset_next(req->limited, 0, 1);
148 reg = arch_register_for_index(req->cls, idx);
150 /* otherwise get first register in class */
151 reg = arch_register_for_index(req->cls, 0);
159 * Returns the register at out position pos.
161 static const arch_register_t *get_out_reg(const ir_node *irn, int pos)
164 const arch_register_t *reg = NULL;
166 /* 1st case: irn is not of mode_T, so it has only */
167 /* one OUT register -> good */
168 /* 2nd case: irn is of mode_T -> collect all Projs and ask the */
169 /* Proj with the corresponding projnum for the register */
171 if (get_irn_mode(irn) != mode_T) {
173 reg = arch_get_irn_register(irn);
174 } else if (is_ia32_irn(irn)) {
175 reg = arch_irn_get_register(irn, pos);
177 const ir_edge_t *edge;
179 foreach_out_edge(irn, edge) {
180 proj = get_edge_src_irn(edge);
181 assert(is_Proj(proj) && "non-Proj from mode_T node");
182 if (get_Proj_proj(proj) == pos) {
183 reg = arch_get_irn_register(proj);
189 assert(reg && "no out register found");
194 * Add a number to a prefix. This number will not be used a second time.
196 static char *get_unique_label(char *buf, size_t buflen, const char *prefix)
198 static unsigned long id = 0;
199 snprintf(buf, buflen, "%s%lu", prefix, ++id);
203 /*************************************************************
205 * (_) | | / _| | | | |
206 * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
207 * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
208 * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
209 * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
212 *************************************************************/
215 * Emit the name of the 8bit low register
217 static void emit_8bit_register(const arch_register_t *reg)
219 const char *reg_name = arch_register_get_name(reg);
222 be_emit_char(reg_name[1]);
227 * Emit the name of the 8bit high register
229 static void emit_8bit_register_high(const arch_register_t *reg)
231 const char *reg_name = arch_register_get_name(reg);
234 be_emit_char(reg_name[1]);
238 static void emit_16bit_register(const arch_register_t *reg)
240 const char *reg_name = ia32_get_mapped_reg_name(isa->regs_16bit, reg);
243 be_emit_string(reg_name);
247 * emit a register, possible shortened by a mode
249 * @param reg the register
250 * @param mode the mode of the register or NULL for full register
252 static void emit_register(const arch_register_t *reg, const ir_mode *mode)
254 const char *reg_name;
257 int size = get_mode_size_bits(mode);
259 case 8: emit_8bit_register(reg); return;
260 case 16: emit_16bit_register(reg); return;
262 assert(mode_is_float(mode) || size == 32);
265 reg_name = arch_register_get_name(reg);
268 be_emit_string(reg_name);
271 void ia32_emit_source_register(const ir_node *node, int pos)
273 const arch_register_t *reg = get_in_reg(node, pos);
275 emit_register(reg, NULL);
278 static void ia32_emit_entity(ir_entity *entity, int no_pic_adjust)
282 set_entity_backend_marked(entity, 1);
283 id = get_entity_ld_ident(entity);
286 if (get_entity_owner(entity) == get_tls_type()) {
287 if (get_entity_visibility(entity) == visibility_external_allocated) {
288 be_emit_cstring("@INDNTPOFF");
290 be_emit_cstring("@NTPOFF");
294 if (do_pic && !no_pic_adjust) {
296 be_emit_string(pic_base_label);
300 static void emit_ia32_Immediate_no_prefix(const ir_node *node)
302 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
304 if (attr->symconst != NULL) {
307 ia32_emit_entity(attr->symconst, attr->no_pic_adjust);
309 if (attr->symconst == NULL || attr->offset != 0) {
310 if (attr->symconst != NULL) {
311 be_emit_irprintf("%+d", attr->offset);
313 be_emit_irprintf("0x%X", attr->offset);
318 static void emit_ia32_Immediate(const ir_node *node)
321 emit_ia32_Immediate_no_prefix(node);
324 void ia32_emit_8bit_source_register_or_immediate(const ir_node *node, int pos)
326 const arch_register_t *reg;
327 const ir_node *in = get_irn_n(node, pos);
328 if (is_ia32_Immediate(in)) {
329 emit_ia32_Immediate(in);
333 reg = get_in_reg(node, pos);
334 emit_8bit_register(reg);
337 void ia32_emit_8bit_high_source_register(const ir_node *node, int pos)
339 const arch_register_t *reg = get_in_reg(node, pos);
340 emit_8bit_register_high(reg);
343 void ia32_emit_16bit_source_register_or_immediate(const ir_node *node, int pos)
345 const arch_register_t *reg;
346 const ir_node *in = get_irn_n(node, pos);
347 if (is_ia32_Immediate(in)) {
348 emit_ia32_Immediate(in);
352 reg = get_in_reg(node, pos);
353 emit_16bit_register(reg);
356 void ia32_emit_dest_register(const ir_node *node, int pos)
358 const arch_register_t *reg = get_out_reg(node, pos);
360 emit_register(reg, NULL);
363 void ia32_emit_dest_register_size(const ir_node *node, int pos)
365 const arch_register_t *reg = get_out_reg(node, pos);
367 emit_register(reg, get_ia32_ls_mode(node));
370 void ia32_emit_8bit_dest_register(const ir_node *node, int pos)
372 const arch_register_t *reg = get_out_reg(node, pos);
374 emit_register(reg, mode_Bu);
377 void ia32_emit_x87_register(const ir_node *node, int pos)
379 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
383 be_emit_string(attr->x87[pos]->name);
386 static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
388 assert(mode_is_int(mode) || mode_is_reference(mode));
389 switch (get_mode_size_bits(mode)) {
390 case 8: be_emit_char('b'); return;
391 case 16: be_emit_char('w'); return;
392 case 32: be_emit_char('l'); return;
393 /* gas docu says q is the suffix but gcc, objdump and icc use ll
395 case 64: be_emit_cstring("ll"); return;
397 panic("Can't output mode_suffix for %+F", mode);
400 void ia32_emit_mode_suffix(const ir_node *node)
402 ir_mode *mode = get_ia32_ls_mode(node);
406 ia32_emit_mode_suffix_mode(mode);
409 void ia32_emit_x87_mode_suffix(const ir_node *node)
413 /* we only need to emit the mode on address mode */
414 if (get_ia32_op_type(node) == ia32_Normal)
417 mode = get_ia32_ls_mode(node);
418 assert(mode != NULL);
420 if (mode_is_float(mode)) {
421 switch (get_mode_size_bits(mode)) {
422 case 32: be_emit_char('s'); return;
423 case 64: be_emit_char('l'); return;
425 case 96: be_emit_char('t'); return;
428 assert(mode_is_int(mode));
429 switch (get_mode_size_bits(mode)) {
430 case 16: be_emit_char('s'); return;
431 case 32: be_emit_char('l'); return;
432 /* gas docu says q is the suffix but gcc, objdump and icc use ll
434 case 64: be_emit_cstring("ll"); return;
437 panic("Can't output mode_suffix for %+F", mode);
440 static char get_xmm_mode_suffix(ir_mode *mode)
442 assert(mode_is_float(mode));
443 switch(get_mode_size_bits(mode)) {
446 default: panic("Invalid XMM mode");
450 void ia32_emit_xmm_mode_suffix(const ir_node *node)
452 ir_mode *mode = get_ia32_ls_mode(node);
453 assert(mode != NULL);
455 be_emit_char(get_xmm_mode_suffix(mode));
458 void ia32_emit_xmm_mode_suffix_s(const ir_node *node)
460 ir_mode *mode = get_ia32_ls_mode(node);
461 assert(mode != NULL);
462 be_emit_char(get_xmm_mode_suffix(mode));
465 void ia32_emit_extend_suffix(const ir_node *node)
467 ir_mode *mode = get_ia32_ls_mode(node);
468 if (get_mode_size_bits(mode) == 32)
470 be_emit_char(mode_is_signed(mode) ? 's' : 'z');
471 ia32_emit_mode_suffix_mode(mode);
474 void ia32_emit_source_register_or_immediate(const ir_node *node, int pos)
476 ir_node *in = get_irn_n(node, pos);
477 if (is_ia32_Immediate(in)) {
478 emit_ia32_Immediate(in);
480 const ir_mode *mode = get_ia32_ls_mode(node);
481 const arch_register_t *reg = get_in_reg(node, pos);
482 emit_register(reg, mode);
487 * Returns the target block for a control flow node.
489 static ir_node *get_cfop_target_block(const ir_node *irn)
491 assert(get_irn_mode(irn) == mode_X);
492 return get_irn_link(irn);
496 * Emits a block label for the given block.
498 static void ia32_emit_block_name(const ir_node *block)
500 if (has_Block_label(block)) {
501 be_emit_string(be_gas_block_label_prefix());
502 be_emit_irprintf("%lu", get_Block_label(block));
504 be_emit_cstring(BLOCK_PREFIX);
505 be_emit_irprintf("%ld", get_irn_node_nr(block));
510 * Emits the target label for a control flow node.
512 static void ia32_emit_cfop_target(const ir_node *node)
514 ir_node *block = get_cfop_target_block(node);
515 ia32_emit_block_name(block);
519 * positive conditions for signed compares
521 static const char *const cmp2condition_s[] = {
522 NULL, /* always false */
529 NULL /* always true */
533 * positive conditions for unsigned compares
535 static const char *const cmp2condition_u[] = {
536 NULL, /* always false */
543 NULL /* always true */
547 * Emit the suffix for a compare instruction.
549 static void ia32_emit_cmp_suffix(int pnc)
553 if (pnc == ia32_pn_Cmp_parity) {
557 if (pnc & ia32_pn_Cmp_float || pnc & ia32_pn_Cmp_unsigned) {
558 str = cmp2condition_u[pnc & 7];
560 str = cmp2condition_s[pnc & 7];
566 typedef enum ia32_emit_mod_t {
567 EMIT_RESPECT_LS = 1U << 0,
568 EMIT_ALTERNATE_AM = 1U << 1,
573 * Emits address mode.
575 void ia32_emit_am(const ir_node *node)
577 ir_entity *ent = get_ia32_am_sc(node);
578 int offs = get_ia32_am_offs_int(node);
579 ir_node *base = get_irn_n(node, n_ia32_base);
580 int has_base = !is_ia32_NoReg_GP(base);
581 ir_node *index = get_irn_n(node, n_ia32_index);
582 int has_index = !is_ia32_NoReg_GP(index);
584 /* just to be sure... */
585 assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
589 const ia32_attr_t *attr = get_ia32_attr_const(node);
590 if (is_ia32_am_sc_sign(node))
592 ia32_emit_entity(ent, attr->data.am_sc_no_pic_adjust);
595 /* also handle special case if nothing is set */
596 if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
598 be_emit_irprintf("%+d", offs);
600 be_emit_irprintf("%d", offs);
604 if (has_base || has_index) {
609 const arch_register_t *reg = get_in_reg(node, n_ia32_base);
610 emit_register(reg, NULL);
613 /* emit index + scale */
615 const arch_register_t *reg = get_in_reg(node, n_ia32_index);
618 emit_register(reg, NULL);
620 scale = get_ia32_am_scale(node);
622 be_emit_irprintf(",%d", 1 << scale);
630 * fmt parameter output
631 * ---- ---------------------- ---------------------------------------------
633 * %AM <node> address mode of the node
634 * %AR const arch_register_t* address mode of the node or register
635 * %ASx <node> address mode of the node or source register x
636 * %Dx <node> destination register x
637 * %I <node> immediate of the node
638 * %L <node> control flow target of the node
639 * %M <node> mode suffix of the node
640 * %P int condition code
641 * %R const arch_register_t* register
642 * %Sx <node> source register x
643 * %s const char* string
644 * %u unsigned int unsigned int
645 * %d signed int signed int
648 * # modifier for %ASx, %D and %S uses ls mode of node to alter register width
649 * * modifier does not prefix immediates with $, but AM with *
650 * l modifier for %lu and %ld
652 static void ia32_emitf(const ir_node *node, const char *fmt, ...)
658 const char *start = fmt;
659 ia32_emit_mod_t mod = 0;
661 while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
664 be_emit_string_len(start, fmt - start);
668 be_emit_finish_line_gas(node);
680 mod |= EMIT_ALTERNATE_AM;
685 mod |= EMIT_RESPECT_LS;
702 if (mod & EMIT_ALTERNATE_AM)
709 const arch_register_t *reg = va_arg(ap, const arch_register_t*);
710 if (mod & EMIT_ALTERNATE_AM)
712 if (get_ia32_op_type(node) == ia32_AddrModeS) {
715 emit_register(reg, NULL);
721 if (get_ia32_op_type(node) == ia32_AddrModeS) {
722 if (mod & EMIT_ALTERNATE_AM)
727 assert(get_ia32_op_type(node) == ia32_Normal);
732 default: goto unknown;
739 const arch_register_t *reg;
741 if (*fmt < '0' || '9' <= *fmt)
745 reg = get_out_reg(node, pos);
746 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
751 if (!(mod & EMIT_ALTERNATE_AM))
753 emit_ia32_Immediate_no_prefix(node);
757 ia32_emit_cfop_target(node);
761 ia32_emit_mode_suffix_mode(get_ia32_ls_mode(node));
766 int pnc = va_arg(ap, int);
767 ia32_emit_cmp_suffix(pnc);
772 const arch_register_t *reg = va_arg(ap, const arch_register_t*);
773 emit_register(reg, NULL);
782 if (*fmt < '0' || '9' <= *fmt)
786 in = get_irn_n(node, pos);
787 if (is_ia32_Immediate(in)) {
788 if (!(mod & EMIT_ALTERNATE_AM))
790 emit_ia32_Immediate_no_prefix(in);
792 const arch_register_t *reg;
794 if (mod & EMIT_ALTERNATE_AM)
796 reg = get_in_reg(node, pos);
797 emit_register(reg, mod & EMIT_RESPECT_LS ? get_ia32_ls_mode(node) : NULL);
803 const char *str = va_arg(ap, const char*);
809 if (mod & EMIT_LONG) {
810 unsigned long num = va_arg(ap, unsigned long);
811 be_emit_irprintf("%lu", num);
813 unsigned num = va_arg(ap, unsigned);
814 be_emit_irprintf("%u", num);
819 if (mod & EMIT_LONG) {
820 long num = va_arg(ap, long);
821 be_emit_irprintf("%ld", num);
823 int num = va_arg(ap, int);
824 be_emit_irprintf("%d", num);
830 panic("unknown format conversion in ia32_emitf()");
838 * Emits registers and/or address mode of a binary operation.
840 void ia32_emit_binop(const ir_node *node)
842 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
843 ia32_emitf(node, "%#S4, %#AS3");
845 ia32_emitf(node, "%#AS4, %#S3");
850 * Emits registers and/or address mode of a binary operation.
852 void ia32_emit_x87_binop(const ir_node *node)
854 switch(get_ia32_op_type(node)) {
857 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
858 const arch_register_t *in1 = x87_attr->x87[0];
859 const arch_register_t *in = x87_attr->x87[1];
860 const arch_register_t *out = x87_attr->x87[2];
864 } else if (out == in) {
869 be_emit_string(arch_register_get_name(in));
870 be_emit_cstring(", %");
871 be_emit_string(arch_register_get_name(out));
879 assert(0 && "unsupported op type");
884 * Emits registers and/or address mode of a unary operation.
886 void ia32_emit_unop(const ir_node *node, int pos)
890 ia32_emitf(node, fmt);
893 static void emit_ia32_IMul(const ir_node *node)
895 ir_node *left = get_irn_n(node, n_ia32_IMul_left);
896 const arch_register_t *out_reg = get_out_reg(node, pn_ia32_IMul_res);
898 /* do we need the 3-address form? */
899 if (is_ia32_NoReg_GP(left) ||
900 get_in_reg(node, n_ia32_IMul_left) != out_reg) {
901 ia32_emitf(node, "\timul%M %#S4, %#AS3, %#D0\n");
903 ia32_emitf(node, "\timul%M %#AS4, %#S3\n");
908 * walks up a tree of copies/perms/spills/reloads to find the original value
909 * that is moved around
911 static ir_node *find_original_value(ir_node *node)
913 if (irn_visited(node))
916 mark_irn_visited(node);
917 if (be_is_Copy(node)) {
918 return find_original_value(be_get_Copy_op(node));
919 } else if (be_is_CopyKeep(node)) {
920 return find_original_value(be_get_CopyKeep_op(node));
921 } else if (is_Proj(node)) {
922 ir_node *pred = get_Proj_pred(node);
923 if (be_is_Perm(pred)) {
924 return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
925 } else if (be_is_MemPerm(pred)) {
926 return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
927 } else if (is_ia32_Load(pred)) {
928 return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
932 } else if (is_ia32_Store(node)) {
933 return find_original_value(get_irn_n(node, n_ia32_Store_val));
934 } else if (is_Phi(node)) {
936 arity = get_irn_arity(node);
937 for (i = 0; i < arity; ++i) {
938 ir_node *in = get_irn_n(node, i);
939 ir_node *res = find_original_value(in);
950 static int determine_final_pnc(const ir_node *node, int flags_pos,
953 ir_node *flags = get_irn_n(node, flags_pos);
954 const ia32_attr_t *flags_attr;
955 flags = skip_Proj(flags);
957 if (is_ia32_Sahf(flags)) {
958 ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
959 if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
960 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
961 inc_irg_visited(current_ir_graph);
962 cmp = find_original_value(cmp);
964 assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
965 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
968 flags_attr = get_ia32_attr_const(cmp);
969 if (flags_attr->data.ins_permuted)
970 pnc = get_mirrored_pnc(pnc);
971 pnc |= ia32_pn_Cmp_float;
972 } else if (is_ia32_Ucomi(flags) || is_ia32_Fucomi(flags)
973 || is_ia32_Fucompi(flags)) {
974 flags_attr = get_ia32_attr_const(flags);
976 if (flags_attr->data.ins_permuted)
977 pnc = get_mirrored_pnc(pnc);
978 pnc |= ia32_pn_Cmp_float;
980 flags_attr = get_ia32_attr_const(flags);
982 if (flags_attr->data.ins_permuted)
983 pnc = get_mirrored_pnc(pnc);
984 if (flags_attr->data.cmp_unsigned)
985 pnc |= ia32_pn_Cmp_unsigned;
991 static pn_Cmp ia32_get_negated_pnc(pn_Cmp pnc)
993 ir_mode *mode = pnc & ia32_pn_Cmp_float ? mode_F : mode_Iu;
994 return get_negated_pnc(pnc, mode);
997 void ia32_emit_cmp_suffix_node(const ir_node *node,
1000 const ia32_attr_t *attr = get_ia32_attr_const(node);
1002 pn_Cmp pnc = get_ia32_condcode(node);
1004 pnc = determine_final_pnc(node, flags_pos, pnc);
1005 if (attr->data.ins_permuted)
1006 pnc = ia32_get_negated_pnc(pnc);
1008 ia32_emit_cmp_suffix(pnc);
1012 * Emits an exception label for a given node.
1014 static void ia32_emit_exc_label(const ir_node *node)
1016 be_emit_string(be_gas_insn_label_prefix());
1017 be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
1021 * Returns the Proj with projection number proj and NOT mode_M
1023 static ir_node *get_proj(const ir_node *node, long proj)
1025 const ir_edge_t *edge;
1028 assert(get_irn_mode(node) == mode_T && "expected mode_T node");
1030 foreach_out_edge(node, edge) {
1031 src = get_edge_src_irn(edge);
1033 assert(is_Proj(src) && "Proj expected");
1034 if (get_irn_mode(src) == mode_M)
1037 if (get_Proj_proj(src) == proj)
1043 static int can_be_fallthrough(const ir_node *node)
1045 ir_node *target_block = get_cfop_target_block(node);
1046 ir_node *block = get_nodes_block(node);
1047 return get_prev_block_sched(target_block) == block;
1051 * Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
1053 static void emit_ia32_Jcc(const ir_node *node)
1055 int need_parity_label = 0;
1056 const ir_node *proj_true;
1057 const ir_node *proj_false;
1058 const ir_node *block;
1059 pn_Cmp pnc = get_ia32_condcode(node);
1061 pnc = determine_final_pnc(node, 0, pnc);
1063 /* get both Projs */
1064 proj_true = get_proj(node, pn_ia32_Jcc_true);
1065 assert(proj_true && "Jcc without true Proj");
1067 proj_false = get_proj(node, pn_ia32_Jcc_false);
1068 assert(proj_false && "Jcc without false Proj");
1070 block = get_nodes_block(node);
1072 if (can_be_fallthrough(proj_true)) {
1073 /* exchange both proj's so the second one can be omitted */
1074 const ir_node *t = proj_true;
1076 proj_true = proj_false;
1078 pnc = ia32_get_negated_pnc(pnc);
1081 if (pnc & ia32_pn_Cmp_float) {
1082 /* Some floating point comparisons require a test of the parity flag,
1083 * which indicates that the result is unordered */
1086 ia32_emitf(proj_true, "\tjp %L\n");
1091 ia32_emitf(proj_true, "\tjnp %L\n");
1097 /* we need a local label if the false proj is a fallthrough
1098 * as the falseblock might have no label emitted then */
1099 if (can_be_fallthrough(proj_false)) {
1100 need_parity_label = 1;
1101 ia32_emitf(proj_false, "\tjp 1f\n");
1103 ia32_emitf(proj_false, "\tjp %L\n");
1110 ia32_emitf(proj_true, "\tjp %L\n");
1118 ia32_emitf(proj_true, "\tj%P %L\n", pnc);
1121 if (need_parity_label) {
1122 ia32_emitf(NULL, "1:\n");
1125 /* the second Proj might be a fallthrough */
1126 if (can_be_fallthrough(proj_false)) {
1127 ia32_emitf(proj_false, "\t/* fallthrough to %L */\n");
1129 ia32_emitf(proj_false, "\tjmp %L\n");
1133 static void emit_ia32_CMov(const ir_node *node)
1135 const ia32_attr_t *attr = get_ia32_attr_const(node);
1136 int ins_permuted = attr->data.ins_permuted;
1137 const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
1138 pn_Cmp pnc = get_ia32_condcode(node);
1139 const arch_register_t *in_true;
1140 const arch_register_t *in_false;
1142 pnc = determine_final_pnc(node, n_ia32_CMov_eflags, pnc);
1144 in_true = arch_get_irn_register(get_irn_n(node, n_ia32_CMov_val_true));
1145 in_false = arch_get_irn_register(get_irn_n(node, n_ia32_CMov_val_false));
1147 /* should be same constraint fullfilled? */
1148 if (out == in_false) {
1149 /* yes -> nothing to do */
1150 } else if (out == in_true) {
1151 const arch_register_t *tmp;
1153 assert(get_ia32_op_type(node) == ia32_Normal);
1155 ins_permuted = !ins_permuted;
1162 ia32_emitf(node, "\tmovl %R, %R\n", in_false, out);
1166 pnc = ia32_get_negated_pnc(pnc);
1168 /* TODO: handling of Nans isn't correct yet */
1170 ia32_emitf(node, "\tcmov%P %#AR, %#R\n", pnc, in_true, out);
1173 /*********************************************************
1176 * ___ _ __ ___ _| |_ _ _ _ _ __ ___ _ __ ___
1177 * / _ \ '_ ` _ \| | __| | | | | | '_ ` _ \| '_ \/ __|
1178 * | __/ | | | | | | |_ | | |_| | | | | | | |_) \__ \
1179 * \___|_| |_| |_|_|\__| | |\__,_|_| |_| |_| .__/|___/
1182 *********************************************************/
1184 /* jump table entry (target and corresponding number) */
1185 typedef struct _branch_t {
1190 /* jump table for switch generation */
1191 typedef struct _jmp_tbl_t {
1192 ir_node *defProj; /**< default target */
1193 long min_value; /**< smallest switch case */
1194 long max_value; /**< largest switch case */
1195 long num_branches; /**< number of jumps */
1196 char *label; /**< label of the jump table */
1197 branch_t *branches; /**< jump array */
1201 * Compare two variables of type branch_t. Used to sort all switch cases
1203 static int ia32_cmp_branch_t(const void *a, const void *b)
1205 branch_t *b1 = (branch_t *)a;
1206 branch_t *b2 = (branch_t *)b;
1208 if (b1->value <= b2->value)
1215 * Emits code for a SwitchJmp (creates a jump table if
1216 * possible otherwise a cmp-jmp cascade). Port from
1219 static void emit_ia32_SwitchJmp(const ir_node *node)
1221 unsigned long interval;
1227 const ir_edge_t *edge;
1229 /* fill the table structure */
1230 tbl.label = XMALLOCN(char, SNPRINTF_BUF_LEN);
1231 tbl.label = get_unique_label(tbl.label, SNPRINTF_BUF_LEN, ".TBL_");
1233 tbl.num_branches = get_irn_n_edges(node) - 1;
1234 tbl.branches = XMALLOCNZ(branch_t, tbl.num_branches);
1235 tbl.min_value = INT_MAX;
1236 tbl.max_value = INT_MIN;
1238 default_pn = get_ia32_condcode(node);
1240 /* go over all proj's and collect them */
1241 foreach_out_edge(node, edge) {
1242 proj = get_edge_src_irn(edge);
1243 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
1245 pnc = get_Proj_proj(proj);
1247 /* check for default proj */
1248 if (pnc == default_pn) {
1249 assert(tbl.defProj == NULL && "found two default Projs at SwitchJmp");
1252 tbl.min_value = pnc < tbl.min_value ? pnc : tbl.min_value;
1253 tbl.max_value = pnc > tbl.max_value ? pnc : tbl.max_value;
1255 /* create branch entry */
1256 tbl.branches[i].target = proj;
1257 tbl.branches[i].value = pnc;
1262 assert(i == tbl.num_branches);
1264 /* sort the branches by their number */
1265 qsort(tbl.branches, tbl.num_branches, sizeof(tbl.branches[0]), ia32_cmp_branch_t);
1267 /* two-complement's magic make this work without overflow */
1268 interval = tbl.max_value - tbl.min_value;
1270 /* emit the table */
1271 ia32_emitf(node, "\tcmpl $%u, %S0\n", interval);
1272 ia32_emitf(tbl.defProj, "\tja %L\n");
1274 if (tbl.num_branches > 1) {
1276 ia32_emitf(node, "\tjmp *%s(,%S0,4)\n", tbl.label);
1278 be_gas_emit_switch_section(GAS_SECTION_RODATA);
1279 ia32_emitf(NULL, "\t.align 4\n");
1280 ia32_emitf(NULL, "%s:\n", tbl.label);
1282 last_value = tbl.branches[0].value;
1283 for (i = 0; i != tbl.num_branches; ++i) {
1284 while (last_value != tbl.branches[i].value) {
1285 ia32_emitf(tbl.defProj, ".long %L\n");
1288 ia32_emitf(tbl.branches[i].target, ".long %L\n");
1291 be_gas_emit_switch_section(GAS_SECTION_TEXT);
1293 /* one jump is enough */
1294 ia32_emitf(tbl.branches[0].target, "\tjmp %L\n");
1304 * Emits code for a unconditional jump.
1306 static void emit_Jmp(const ir_node *node)
1310 /* for now, the code works for scheduled and non-schedules blocks */
1311 block = get_nodes_block(node);
1313 /* we have a block schedule */
1314 if (can_be_fallthrough(node)) {
1315 ia32_emitf(node, "\t/* fallthrough to %L */\n");
1317 ia32_emitf(node, "\tjmp %L\n");
1322 * Emit an inline assembler operand.
1324 * @param node the ia32_ASM node
1325 * @param s points to the operand (a %c)
1327 * @return pointer to the first char in s NOT in the current operand
1329 static const char* emit_asm_operand(const ir_node *node, const char *s)
1331 const ia32_attr_t *ia32_attr = get_ia32_attr_const(node);
1332 const ia32_asm_attr_t *attr = CONST_CAST_IA32_ATTR(ia32_asm_attr_t,
1334 const arch_register_t *reg;
1335 const ia32_asm_reg_t *asm_regs = attr->register_map;
1336 const ia32_asm_reg_t *asm_reg;
1337 const char *reg_name;
1346 /* parse modifiers */
1349 ir_fprintf(stderr, "Warning: asm text (%+F) ends with %%\n", node);
1374 "Warning: asm text (%+F) contains unknown modifier '%c' for asm op\n",
1381 sscanf(s, "%d%n", &num, &p);
1383 ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
1390 if (num < 0 || ARR_LEN(asm_regs) <= num) {
1392 "Error: Custom assembler references invalid input/output (%+F)\n",
1396 asm_reg = & asm_regs[num];
1397 assert(asm_reg->valid);
1400 if (asm_reg->use_input == 0) {
1401 reg = get_out_reg(node, asm_reg->inout_pos);
1403 ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
1405 /* might be an immediate value */
1406 if (is_ia32_Immediate(pred)) {
1407 emit_ia32_Immediate(pred);
1410 reg = get_in_reg(node, asm_reg->inout_pos);
1414 "Warning: no register assigned for %d asm op (%+F)\n",
1419 if (asm_reg->memory) {
1424 if (modifier != 0) {
1428 reg_name = ia32_get_mapped_reg_name(isa->regs_8bit, reg);
1431 reg_name = ia32_get_mapped_reg_name(isa->regs_8bit_high, reg);
1434 reg_name = ia32_get_mapped_reg_name(isa->regs_16bit, reg);
1437 panic("Invalid asm op modifier");
1439 be_emit_string(reg_name);
1441 emit_register(reg, asm_reg->mode);
1444 if (asm_reg->memory) {
1452 * Emits code for an ASM pseudo op.
1454 static void emit_ia32_Asm(const ir_node *node)
1456 const void *gen_attr = get_irn_generic_attr_const(node);
1457 const ia32_asm_attr_t *attr
1458 = CONST_CAST_IA32_ATTR(ia32_asm_attr_t, gen_attr);
1459 ident *asm_text = attr->asm_text;
1460 const char *s = get_id_str(asm_text);
1462 ia32_emitf(node, "#APP\t\n");
1469 s = emit_asm_operand(node, s);
1475 ia32_emitf(NULL, "\n#NO_APP\n");
1478 /**********************************
1481 * | | ___ _ __ _ _| |_) |
1482 * | | / _ \| '_ \| | | | _ <
1483 * | |___| (_) | |_) | |_| | |_) |
1484 * \_____\___/| .__/ \__, |____/
1487 **********************************/
1490 * Emit movsb/w instructions to make mov count divideable by 4
1492 static void emit_CopyB_prolog(unsigned size)
1495 ia32_emitf(NULL, "\tmovsb\n");
1497 ia32_emitf(NULL, "\tmovsw\n");
1501 * Emit rep movsd instruction for memcopy.
1503 static void emit_ia32_CopyB(const ir_node *node)
1505 unsigned size = get_ia32_copyb_size(node);
1507 emit_CopyB_prolog(size);
1508 ia32_emitf(node, "\trep movsd\n");
1512 * Emits unrolled memcopy.
1514 static void emit_ia32_CopyB_i(const ir_node *node)
1516 unsigned size = get_ia32_copyb_size(node);
1518 emit_CopyB_prolog(size);
1522 ia32_emitf(NULL, "\tmovsd\n");
1528 /***************************
1532 * | | / _ \| '_ \ \ / /
1533 * | |___| (_) | | | \ V /
1534 * \_____\___/|_| |_|\_/
1536 ***************************/
1539 * Emit code for conversions (I, FP), (FP, I) and (FP, FP).
1541 static void emit_ia32_Conv_with_FP(const ir_node *node, const char* conv_f,
1544 ir_mode *ls_mode = get_ia32_ls_mode(node);
1545 int ls_bits = get_mode_size_bits(ls_mode);
1546 const char *conv = ls_bits == 32 ? conv_f : conv_d;
1548 ia32_emitf(node, "\tcvt%s %AS3, %D0\n", conv);
1551 static void emit_ia32_Conv_I2FP(const ir_node *node)
1553 emit_ia32_Conv_with_FP(node, "si2ss", "si2sd");
1556 static void emit_ia32_Conv_FP2I(const ir_node *node)
1558 emit_ia32_Conv_with_FP(node, "ss2si", "sd2si");
1561 static void emit_ia32_Conv_FP2FP(const ir_node *node)
1563 emit_ia32_Conv_with_FP(node, "sd2ss", "ss2sd");
1567 * Emits code for an Int conversion.
1569 static void emit_ia32_Conv_I2I(const ir_node *node)
1571 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1572 int signed_mode = mode_is_signed(smaller_mode);
1573 const char *sign_suffix;
1575 assert(!mode_is_float(smaller_mode));
1577 sign_suffix = signed_mode ? "s" : "z";
1578 ia32_emitf(node, "\tmov%s%Ml %#AS3, %D0\n", sign_suffix);
1584 static void emit_ia32_Call(const ir_node *node)
1586 /* Special case: Call must not have its immediates prefixed by $, instead
1587 * address mode is prefixed by *. */
1588 ia32_emitf(node, "\tcall %*AS3\n");
1592 /*******************************************
1595 * | |__ ___ _ __ ___ __| | ___ ___
1596 * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __|
1597 * | |_) | __/ | | | (_) | (_| | __/\__ \
1598 * |_.__/ \___|_| |_|\___/ \__,_|\___||___/
1600 *******************************************/
1603 * Emits code to increase stack pointer.
1605 static void emit_be_IncSP(const ir_node *node)
1607 int offs = be_get_IncSP_offset(node);
1613 ia32_emitf(node, "\tsubl $%u, %D0\n", offs);
1615 ia32_emitf(node, "\taddl $%u, %D0\n", -offs);
1620 * Emits code for Copy/CopyKeep.
1622 static void Copy_emitter(const ir_node *node, const ir_node *op)
1624 const arch_register_t *in = arch_get_irn_register(op);
1625 const arch_register_t *out = arch_get_irn_register(node);
1630 if (is_unknown_reg(in))
1632 /* copies of vf nodes aren't real... */
1633 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
1636 if (get_irn_mode(node) == mode_E) {
1637 ia32_emitf(node, "\tmovsd %R, %R\n", in, out);
1639 ia32_emitf(node, "\tmovl %R, %R\n", in, out);
1643 static void emit_be_Copy(const ir_node *node)
1645 Copy_emitter(node, be_get_Copy_op(node));
1648 static void emit_be_CopyKeep(const ir_node *node)
1650 Copy_emitter(node, be_get_CopyKeep_op(node));
1654 * Emits code for exchange.
1656 static void emit_be_Perm(const ir_node *node)
1658 const arch_register_t *in0, *in1;
1659 const arch_register_class_t *cls0, *cls1;
1661 in0 = arch_get_irn_register(get_irn_n(node, 0));
1662 in1 = arch_get_irn_register(get_irn_n(node, 1));
1664 cls0 = arch_register_get_class(in0);
1665 cls1 = arch_register_get_class(in1);
1667 assert(cls0 == cls1 && "Register class mismatch at Perm");
1669 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
1670 ia32_emitf(node, "\txchg %R, %R\n", in1, in0);
1671 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
1672 ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
1673 ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
1674 ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
1675 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
1677 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
1680 panic("unexpected register class in be_Perm (%+F)", node);
1685 * Emits code for Constant loading.
1687 static void emit_ia32_Const(const ir_node *node)
1689 ia32_emitf(node, "\tmovl %I, %D0\n");
1693 * Emits code to load the TLS base
1695 static void emit_ia32_LdTls(const ir_node *node)
1697 ia32_emitf(node, "\tmovl %%gs:0, %D0\n");
1700 /* helper function for emit_ia32_Minus64Bit */
1701 static void emit_mov(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1703 ia32_emitf(node, "\tmovl %R, %R\n", src, dst);
1706 /* helper function for emit_ia32_Minus64Bit */
1707 static void emit_neg(const ir_node* node, const arch_register_t *reg)
1709 ia32_emitf(node, "\tnegl %R\n", reg);
1712 /* helper function for emit_ia32_Minus64Bit */
1713 static void emit_sbb0(const ir_node* node, const arch_register_t *reg)
1715 ia32_emitf(node, "\tsbbl $0, %R\n", reg);
1718 /* helper function for emit_ia32_Minus64Bit */
1719 static void emit_sbb(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1721 ia32_emitf(node, "\tsbbl %R, %R\n", src, dst);
1724 /* helper function for emit_ia32_Minus64Bit */
1725 static void emit_xchg(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1727 ia32_emitf(node, "\txchgl %R, %R\n", src, dst);
1730 /* helper function for emit_ia32_Minus64Bit */
1731 static void emit_zero(const ir_node* node, const arch_register_t *reg)
1733 ia32_emitf(node, "\txorl %R, %R\n", reg, reg);
1736 static void emit_ia32_Minus64Bit(const ir_node *node)
1738 const arch_register_t *in_lo = get_in_reg(node, 0);
1739 const arch_register_t *in_hi = get_in_reg(node, 1);
1740 const arch_register_t *out_lo = get_out_reg(node, 0);
1741 const arch_register_t *out_hi = get_out_reg(node, 1);
1743 if (out_lo == in_lo) {
1744 if (out_hi != in_hi) {
1745 /* a -> a, b -> d */
1748 /* a -> a, b -> b */
1751 } else if (out_lo == in_hi) {
1752 if (out_hi == in_lo) {
1753 /* a -> b, b -> a */
1754 emit_xchg(node, in_lo, in_hi);
1757 /* a -> b, b -> d */
1758 emit_mov(node, in_hi, out_hi);
1759 emit_mov(node, in_lo, out_lo);
1763 if (out_hi == in_lo) {
1764 /* a -> c, b -> a */
1765 emit_mov(node, in_lo, out_lo);
1767 } else if (out_hi == in_hi) {
1768 /* a -> c, b -> b */
1769 emit_mov(node, in_lo, out_lo);
1772 /* a -> c, b -> d */
1773 emit_mov(node, in_lo, out_lo);
1779 emit_neg( node, out_hi);
1780 emit_neg( node, out_lo);
1781 emit_sbb0(node, out_hi);
1785 emit_zero(node, out_hi);
1786 emit_neg( node, out_lo);
1787 emit_sbb( node, in_hi, out_hi);
1790 static void emit_ia32_GetEIP(const ir_node *node)
1792 ia32_emitf(node, "\tcall %s\n", pic_base_label);
1793 ia32_emitf(NULL, "%s:\n", pic_base_label);
1794 ia32_emitf(node, "\tpopl %D0\n");
1797 static void emit_ia32_ClimbFrame(const ir_node *node)
1799 const ia32_climbframe_attr_t *attr = get_ia32_climbframe_attr_const(node);
1801 ia32_emitf(node, "\tmovl %S0, %D0\n");
1802 ia32_emitf(node, "\tmovl $%u, %S1\n", attr->count);
1803 ia32_emitf(NULL, BLOCK_PREFIX "%ld:\n", get_irn_node_nr(node));
1804 ia32_emitf(node, "\tmovl (%D0), %D0\n");
1805 ia32_emitf(node, "\tdec %S1\n");
1806 ia32_emitf(node, "\tjnz " BLOCK_PREFIX "%ld\n", get_irn_node_nr(node));
1809 static void emit_be_Return(const ir_node *node)
1811 unsigned pop = be_Return_get_pop(node);
1813 if (pop > 0 || be_Return_get_emit_pop(node)) {
1814 ia32_emitf(node, "\tret $%u\n", pop);
1816 ia32_emitf(node, "\tret\n");
1820 static void emit_Nothing(const ir_node *node)
1826 /***********************************************************************************
1829 * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
1830 * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
1831 * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
1832 * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
1834 ***********************************************************************************/
1837 * Enters the emitter functions for handled nodes into the generic
1838 * pointer of an opcode.
1840 static void ia32_register_emitters(void)
1842 #define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
1843 #define IA32_EMIT(a) IA32_EMIT2(a,a)
1844 #define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
1845 #define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
1846 #define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
1847 #define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
1849 /* first clear the generic function pointer for all ops */
1850 clear_irp_opcodes_generic_func();
1852 /* register all emitter functions defined in spec */
1853 ia32_register_spec_emitters();
1855 /* other ia32 emitter functions */
1856 IA32_EMIT2(Conv_I2I8Bit, Conv_I2I);
1861 IA32_EMIT(Conv_FP2FP);
1862 IA32_EMIT(Conv_FP2I);
1863 IA32_EMIT(Conv_I2FP);
1864 IA32_EMIT(Conv_I2I);
1871 IA32_EMIT(Minus64Bit);
1872 IA32_EMIT(SwitchJmp);
1873 IA32_EMIT(ClimbFrame);
1875 /* benode emitter */
1898 typedef void (*emit_func_ptr) (const ir_node *);
1901 * Assign and emit an exception label if the current instruction can fail.
1903 static void ia32_assign_exc_label(ir_node *node)
1905 /* assign a new ID to the instruction */
1906 set_ia32_exc_label_id(node, ++exc_label_id);
1908 ia32_emit_exc_label(node);
1910 be_emit_pad_comment();
1911 be_emit_cstring("/* exception to Block ");
1912 ia32_emit_cfop_target(node);
1913 be_emit_cstring(" */\n");
1914 be_emit_write_line();
1918 * Emits code for a node.
1920 static void ia32_emit_node(ir_node *node)
1922 ir_op *op = get_irn_op(node);
1924 DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
1926 if (is_ia32_irn(node)) {
1927 if (get_ia32_exc_label(node)) {
1928 /* emit the exception label of this instruction */
1929 ia32_assign_exc_label(node);
1931 if (mark_spill_reload) {
1932 if (is_ia32_is_spill(node)) {
1933 ia32_emitf(NULL, "\txchg %ebx, %ebx /* spill mark */\n");
1935 if (is_ia32_is_reload(node)) {
1936 ia32_emitf(NULL, "\txchg %edx, %edx /* reload mark */\n");
1938 if (is_ia32_is_remat(node)) {
1939 ia32_emitf(NULL, "\txchg %ecx, %ecx /* remat mark */\n");
1943 if (op->ops.generic) {
1944 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
1946 be_dbg_set_dbg_info(get_irn_dbg_info(node));
1951 ir_fprintf(stderr, "Error: No emit handler for node %+F (%+G, graph %+F)\n", node, node, current_ir_graph);
1957 * Emits gas alignment directives
1959 static void ia32_emit_alignment(unsigned align, unsigned skip)
1961 ia32_emitf(NULL, "\t.p2align %u,,%u\n", align, skip);
1965 * Emits gas alignment directives for Labels depended on cpu architecture.
1967 static void ia32_emit_align_label(void)
1969 unsigned align = ia32_cg_config.label_alignment;
1970 unsigned maximum_skip = ia32_cg_config.label_alignment_max_skip;
1971 ia32_emit_alignment(align, maximum_skip);
1975 * Test whether a block should be aligned.
1976 * For cpus in the P4/Athlon class it is useful to align jump labels to
1977 * 16 bytes. However we should only do that if the alignment nops before the
1978 * label aren't executed more often than we have jumps to the label.
1980 static int should_align_block(const ir_node *block)
1982 static const double DELTA = .0001;
1983 ir_exec_freq *exec_freq = cg->birg->exec_freq;
1984 ir_node *prev = get_prev_block_sched(block);
1986 double prev_freq = 0; /**< execfreq of the fallthrough block */
1987 double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
1990 if (exec_freq == NULL)
1992 if (ia32_cg_config.label_alignment_factor <= 0)
1995 block_freq = get_block_execfreq(exec_freq, block);
1996 if (block_freq < DELTA)
1999 n_cfgpreds = get_Block_n_cfgpreds(block);
2000 for(i = 0; i < n_cfgpreds; ++i) {
2001 const ir_node *pred = get_Block_cfgpred_block(block, i);
2002 double pred_freq = get_block_execfreq(exec_freq, pred);
2005 prev_freq += pred_freq;
2007 jmp_freq += pred_freq;
2011 if (prev_freq < DELTA && !(jmp_freq < DELTA))
2014 jmp_freq /= prev_freq;
2016 return jmp_freq > ia32_cg_config.label_alignment_factor;
2020 * Emit the block header for a block.
2022 * @param block the block
2023 * @param prev_block the previous block
2025 static void ia32_emit_block_header(ir_node *block)
2027 ir_graph *irg = current_ir_graph;
2028 int need_label = block_needs_label(block);
2030 ir_exec_freq *exec_freq = cg->birg->exec_freq;
2032 if (block == get_irg_end_block(irg))
2035 if (ia32_cg_config.label_alignment > 0) {
2036 /* align the current block if:
2037 * a) if should be aligned due to its execution frequency
2038 * b) there is no fall-through here
2040 if (should_align_block(block)) {
2041 ia32_emit_align_label();
2043 /* if the predecessor block has no fall-through,
2044 we can always align the label. */
2046 int has_fallthrough = 0;
2048 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
2049 ir_node *cfg_pred = get_Block_cfgpred(block, i);
2050 if (can_be_fallthrough(cfg_pred)) {
2051 has_fallthrough = 1;
2056 if (!has_fallthrough)
2057 ia32_emit_align_label();
2061 if (need_label || has_Block_label(block)) {
2062 ia32_emit_block_name(block);
2065 be_emit_pad_comment();
2066 be_emit_cstring(" /* ");
2068 be_emit_cstring("\t/* ");
2069 ia32_emit_block_name(block);
2070 be_emit_cstring(": ");
2073 be_emit_cstring("preds:");
2075 /* emit list of pred blocks in comment */
2076 arity = get_irn_arity(block);
2078 be_emit_cstring(" none");
2080 for (i = 0; i < arity; ++i) {
2081 ir_node *predblock = get_Block_cfgpred_block(block, i);
2082 be_emit_irprintf(" %d", get_irn_node_nr(predblock));
2085 if (exec_freq != NULL) {
2086 be_emit_irprintf(", freq: %f",
2087 get_block_execfreq(exec_freq, block));
2089 be_emit_cstring(" */\n");
2090 be_emit_write_line();
2094 * Walks over the nodes in a block connected by scheduling edges
2095 * and emits code for each node.
2097 static void ia32_gen_block(ir_node *block)
2101 ia32_emit_block_header(block);
2103 /* emit the contents of the block */
2104 be_dbg_set_dbg_info(get_irn_dbg_info(block));
2105 sched_foreach(block, node) {
2106 ia32_emit_node(node);
2110 typedef struct exc_entry {
2111 ir_node *exc_instr; /** The instruction that can issue an exception. */
2112 ir_node *block; /** The block to call then. */
2117 * Sets labels for control flow nodes (jump target).
2118 * Links control predecessors to there destination blocks.
2120 static void ia32_gen_labels(ir_node *block, void *data)
2122 exc_entry **exc_list = data;
2126 for (n = get_Block_n_cfgpreds(block) - 1; n >= 0; --n) {
2127 pred = get_Block_cfgpred(block, n);
2128 set_irn_link(pred, block);
2130 pred = skip_Proj(pred);
2131 if (is_ia32_irn(pred) && get_ia32_exc_label(pred)) {
2136 ARR_APP1(exc_entry, *exc_list, e);
2137 set_irn_link(pred, block);
2143 * Compare two exception_entries.
2145 static int cmp_exc_entry(const void *a, const void *b)
2147 const exc_entry *ea = a;
2148 const exc_entry *eb = b;
2150 if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
2156 * Main driver. Emits the code for one routine.
2158 void ia32_gen_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
2160 ir_entity *entity = get_irg_entity(irg);
2161 exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
2166 do_pic = cg->birg->main_env->options->pic;
2168 ia32_register_emitters();
2170 get_unique_label(pic_base_label, sizeof(pic_base_label), ".PIC_BASE");
2172 be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
2173 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
2175 /* we use links to point to target blocks */
2176 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
2177 irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
2179 /* initialize next block links */
2180 n = ARR_LEN(cg->blk_sched);
2181 for (i = 0; i < n; ++i) {
2182 ir_node *block = cg->blk_sched[i];
2183 ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
2185 set_irn_link(block, prev);
2188 for (i = 0; i < n; ++i) {
2189 ir_node *block = cg->blk_sched[i];
2191 ia32_gen_block(block);
2194 be_gas_emit_function_epilog(entity);
2195 be_dbg_method_end();
2197 be_emit_write_line();
2199 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
2201 /* Sort the exception table using the exception label id's.
2202 Those are ascending with ascending addresses. */
2203 qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
2207 for (i = 0; i < ARR_LEN(exc_list); ++i) {
2208 be_emit_cstring("\t.long ");
2209 ia32_emit_exc_label(exc_list[i].exc_instr);
2211 be_emit_cstring("\t.long ");
2212 ia32_emit_block_name(exc_list[i].block);
2216 DEL_ARR_F(exc_list);
2219 static const lc_opt_table_entry_t ia32_emitter_options[] = {
2220 LC_OPT_ENT_BOOL("mark_spill_reload", "mark spills and reloads with ud opcodes", &mark_spill_reload),
2224 void ia32_init_emitter(void)
2226 lc_opt_entry_t *be_grp;
2227 lc_opt_entry_t *ia32_grp;
2229 be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2230 ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2232 lc_opt_add_table(ia32_grp, ia32_emitter_options);
2234 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.emitter");