2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the ia32 node emitter.
23 * @author Christian Wuerdig, Matthias Braun
41 #include "iredges_t.h"
44 #include "raw_bitset.h"
47 #include "../besched_t.h"
48 #include "../benode_t.h"
50 #include "../be_dbgout.h"
51 #include "../beemitter.h"
52 #include "../begnuas.h"
53 #include "../beirg_t.h"
54 #include "../be_dbgout.h"
56 #include "ia32_emitter.h"
57 #include "gen_ia32_emitter.h"
58 #include "gen_ia32_regalloc_if.h"
59 #include "ia32_nodes_attr.h"
60 #include "ia32_new_nodes.h"
61 #include "ia32_map_regs.h"
62 #include "ia32_architecture.h"
63 #include "bearch_ia32_t.h"
65 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
67 #define BLOCK_PREFIX ".L"
69 #define SNPRINTF_BUF_LEN 128
71 static const arch_env_t *arch_env;
72 static const ia32_isa_t *isa;
73 static ia32_code_gen_t *cg;
75 static char pic_base_label[128];
76 static ir_label_t exc_label_id;
77 static int mark_spill_reload = 0;
79 /** Return the next block in Block schedule */
80 static ir_node *get_prev_block_sched(const ir_node *block)
82 return get_irn_link(block);
85 static int is_fallthrough(const ir_node *cfgpred)
89 if (!is_Proj(cfgpred))
91 pred = get_Proj_pred(cfgpred);
92 if (is_ia32_SwitchJmp(pred))
98 static int block_needs_label(const ir_node *block)
101 int n_cfgpreds = get_Block_n_cfgpreds(block);
103 if (n_cfgpreds == 0) {
105 } else if (n_cfgpreds == 1) {
106 ir_node *cfgpred = get_Block_cfgpred(block, 0);
107 ir_node *cfgpred_block = get_nodes_block(cfgpred);
109 if (get_prev_block_sched(block) == cfgpred_block
110 && is_fallthrough(cfgpred)) {
119 * Returns the register at in position pos.
121 static const arch_register_t *get_in_reg(const ir_node *irn, int pos)
124 const arch_register_t *reg = NULL;
126 assert(get_irn_arity(irn) > pos && "Invalid IN position");
128 /* The out register of the operator at position pos is the
129 in register we need. */
130 op = get_irn_n(irn, pos);
132 reg = arch_get_irn_register(arch_env, op);
134 assert(reg && "no in register found");
136 if (reg == &ia32_gp_regs[REG_GP_NOREG])
137 panic("trying to emit noreg for %+F input %d", irn, pos);
139 /* in case of unknown register: just return a valid register */
140 if (reg == &ia32_gp_regs[REG_GP_UKNWN]) {
141 const arch_register_req_t *req;
143 /* ask for the requirements */
144 req = arch_get_register_req(arch_env, irn, pos);
146 if (arch_register_req_is(req, limited)) {
147 /* in case of limited requirements: get the first allowed register */
148 unsigned idx = rbitset_next(req->limited, 0, 1);
149 reg = arch_register_for_index(req->cls, idx);
151 /* otherwise get first register in class */
152 reg = arch_register_for_index(req->cls, 0);
160 * Returns the register at out position pos.
162 static const arch_register_t *get_out_reg(const ir_node *irn, int pos)
165 const arch_register_t *reg = NULL;
167 /* 1st case: irn is not of mode_T, so it has only */
168 /* one OUT register -> good */
169 /* 2nd case: irn is of mode_T -> collect all Projs and ask the */
170 /* Proj with the corresponding projnum for the register */
172 if (get_irn_mode(irn) != mode_T) {
174 reg = arch_get_irn_register(arch_env, irn);
175 } else if (is_ia32_irn(irn)) {
176 reg = get_ia32_out_reg(irn, pos);
178 const ir_edge_t *edge;
180 foreach_out_edge(irn, edge) {
181 proj = get_edge_src_irn(edge);
182 assert(is_Proj(proj) && "non-Proj from mode_T node");
183 if (get_Proj_proj(proj) == pos) {
184 reg = arch_get_irn_register(arch_env, proj);
190 assert(reg && "no out register found");
195 * Add a number to a prefix. This number will not be used a second time.
197 static char *get_unique_label(char *buf, size_t buflen, const char *prefix)
199 static unsigned long id = 0;
200 snprintf(buf, buflen, "%s%lu", prefix, ++id);
204 /*************************************************************
206 * (_) | | / _| | | | |
207 * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
208 * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
209 * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
210 * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
213 *************************************************************/
215 static void emit_8bit_register(const arch_register_t *reg)
217 const char *reg_name = arch_register_get_name(reg);
220 be_emit_char(reg_name[1]);
224 static void emit_16bit_register(const arch_register_t *reg)
226 const char *reg_name = ia32_get_mapped_reg_name(isa->regs_16bit, reg);
229 be_emit_string(reg_name);
232 static void emit_register(const arch_register_t *reg, const ir_mode *mode)
234 const char *reg_name;
237 int size = get_mode_size_bits(mode);
239 case 8: emit_8bit_register(reg); return;
240 case 16: emit_16bit_register(reg); return;
242 assert(mode_is_float(mode) || size == 32);
245 reg_name = arch_register_get_name(reg);
248 be_emit_string(reg_name);
251 void ia32_emit_source_register(const ir_node *node, int pos)
253 const arch_register_t *reg = get_in_reg(node, pos);
255 emit_register(reg, NULL);
258 static void emit_ia32_Immediate(const ir_node *node);
260 void ia32_emit_8bit_source_register_or_immediate(const ir_node *node, int pos)
262 const arch_register_t *reg;
263 ir_node *in = get_irn_n(node, pos);
264 if (is_ia32_Immediate(in)) {
265 emit_ia32_Immediate(in);
269 reg = get_in_reg(node, pos);
270 emit_8bit_register(reg);
273 void ia32_emit_dest_register(const ir_node *node, int pos)
275 const arch_register_t *reg = get_out_reg(node, pos);
277 emit_register(reg, NULL);
280 void ia32_emit_8bit_dest_register(const ir_node *node, int pos)
282 const arch_register_t *reg = get_out_reg(node, pos);
284 emit_register(reg, mode_Bu);
287 void ia32_emit_x87_register(const ir_node *node, int pos)
289 const ia32_x87_attr_t *attr = get_ia32_x87_attr_const(node);
293 be_emit_string(attr->x87[pos]->name);
296 static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
298 assert(mode_is_int(mode) || mode_is_reference(mode));
299 switch (get_mode_size_bits(mode)) {
300 case 8: be_emit_char('b'); return;
301 case 16: be_emit_char('w'); return;
302 case 32: be_emit_char('l'); return;
303 /* gas docu says q is the suffix but gcc, objdump and icc use ll
305 case 64: be_emit_cstring("ll"); return;
307 panic("Can't output mode_suffix for %+F", mode);
310 void ia32_emit_mode_suffix(const ir_node *node)
312 ir_mode *mode = get_ia32_ls_mode(node);
316 ia32_emit_mode_suffix_mode(mode);
319 void ia32_emit_x87_mode_suffix(const ir_node *node)
323 /* we only need to emit the mode on address mode */
324 if (get_ia32_op_type(node) == ia32_Normal)
327 mode = get_ia32_ls_mode(node);
328 assert(mode != NULL);
330 if (mode_is_float(mode)) {
331 switch (get_mode_size_bits(mode)) {
332 case 32: be_emit_char('s'); return;
333 case 64: be_emit_char('l'); return;
335 case 96: be_emit_char('t'); return;
338 assert(mode_is_int(mode));
339 switch (get_mode_size_bits(mode)) {
340 case 16: be_emit_char('s'); return;
341 case 32: be_emit_char('l'); return;
342 /* gas docu says q is the suffix but gcc, objdump and icc use ll
344 case 64: be_emit_cstring("ll"); return;
347 panic("Can't output mode_suffix for %+F", mode);
350 static char get_xmm_mode_suffix(ir_mode *mode)
352 assert(mode_is_float(mode));
353 switch(get_mode_size_bits(mode)) {
356 default: panic("Invalid XMM mode");
360 void ia32_emit_xmm_mode_suffix(const ir_node *node)
362 ir_mode *mode = get_ia32_ls_mode(node);
363 assert(mode != NULL);
365 be_emit_char(get_xmm_mode_suffix(mode));
368 void ia32_emit_xmm_mode_suffix_s(const ir_node *node)
370 ir_mode *mode = get_ia32_ls_mode(node);
371 assert(mode != NULL);
372 be_emit_char(get_xmm_mode_suffix(mode));
375 void ia32_emit_extend_suffix(const ir_mode *mode)
377 if (get_mode_size_bits(mode) == 32)
379 be_emit_char(mode_is_signed(mode) ? 's' : 'z');
382 void ia32_emit_source_register_or_immediate(const ir_node *node, int pos)
384 ir_node *in = get_irn_n(node, pos);
385 if (is_ia32_Immediate(in)) {
386 emit_ia32_Immediate(in);
388 const ir_mode *mode = get_ia32_ls_mode(node);
389 const arch_register_t *reg = get_in_reg(node, pos);
390 emit_register(reg, mode);
395 * Returns the target block for a control flow node.
397 static ir_node *get_cfop_target_block(const ir_node *irn)
399 assert(get_irn_mode(irn) == mode_X);
400 return get_irn_link(irn);
404 * Emits a block label for the given block.
406 static void ia32_emit_block_name(const ir_node *block)
408 if (has_Block_label(block)) {
409 be_emit_string(be_gas_block_label_prefix());
410 be_emit_irprintf("%lu", get_Block_label(block));
412 be_emit_cstring(BLOCK_PREFIX);
413 be_emit_irprintf("%ld", get_irn_node_nr(block));
418 * Emits the target label for a control flow node.
420 static void ia32_emit_cfop_target(const ir_node *node)
422 ir_node *block = get_cfop_target_block(node);
423 ia32_emit_block_name(block);
427 * coding of conditions
429 struct cmp2conditon_t {
435 * positive conditions for signed compares
437 static const struct cmp2conditon_t cmp2condition_s[] = {
438 { NULL, pn_Cmp_False }, /* always false */
439 { "e", pn_Cmp_Eq }, /* == */
440 { "l", pn_Cmp_Lt }, /* < */
441 { "le", pn_Cmp_Le }, /* <= */
442 { "g", pn_Cmp_Gt }, /* > */
443 { "ge", pn_Cmp_Ge }, /* >= */
444 { "ne", pn_Cmp_Lg }, /* != */
445 { NULL, pn_Cmp_Leg}, /* always true */
449 * positive conditions for unsigned compares
451 static const struct cmp2conditon_t cmp2condition_u[] = {
452 { NULL, pn_Cmp_False }, /* always false */
453 { "e", pn_Cmp_Eq }, /* == */
454 { "b", pn_Cmp_Lt }, /* < */
455 { "be", pn_Cmp_Le }, /* <= */
456 { "a", pn_Cmp_Gt }, /* > */
457 { "ae", pn_Cmp_Ge }, /* >= */
458 { "ne", pn_Cmp_Lg }, /* != */
459 { NULL, pn_Cmp_Leg }, /* always true */
462 static void ia32_emit_cmp_suffix(int pnc)
466 if ((pnc & ia32_pn_Cmp_float) || (pnc & ia32_pn_Cmp_unsigned)) {
468 assert(cmp2condition_u[pnc].num == pnc);
469 str = cmp2condition_u[pnc].name;
472 assert(cmp2condition_s[pnc].num == pnc);
473 str = cmp2condition_s[pnc].name;
480 * fmt parameter output
481 * ---- ---------------------- ---------------------------------------------
483 * %AM <node> address mode of the node
484 * %AR const arch_register_t* address mode of the node or register
485 * %ASx <node> address mode of the node or source register x
486 * %Dx <node> destination register x
487 * %I <node> immediate of the node
488 * %L <node> control flow target of the node
489 * %M <node> mode suffix of the node
490 * %P int condition code
491 * %R const arch_register_t* register
492 * %Sx <node> source register x
493 * %s const char* string
494 * %u unsigned int unsigned int
497 * # modifier for %ASx, %D and %S uses ls mode of node to alter register width
499 static void ia32_emitf(const ir_node *node, const char *fmt, ...)
505 const char *start = fmt;
506 const ir_mode *mode = NULL;
508 while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
511 be_emit_string_len(start, fmt - start);
515 be_emit_finish_line_gas(node);
527 mode = get_ia32_ls_mode(node);
543 const arch_register_t *reg = va_arg(ap, const arch_register_t*);
544 if (get_ia32_op_type(node) == ia32_AddrModeS) {
547 emit_register(reg, NULL);
553 if (get_ia32_op_type(node) == ia32_AddrModeS) {
557 assert(get_ia32_op_type(node) == ia32_Normal);
562 default: goto unknown;
569 const arch_register_t *reg;
571 if (*fmt < '0' || '9' <= *fmt)
575 reg = get_out_reg(node, pos);
576 emit_register(reg, mode);
581 emit_ia32_Immediate(node);
585 ia32_emit_cfop_target(node);
589 ia32_emit_mode_suffix_mode(get_ia32_ls_mode(node));
594 int pnc = va_arg(ap, int);
595 ia32_emit_cmp_suffix(pnc);
600 const arch_register_t *reg = va_arg(ap, const arch_register_t*);
601 emit_register(reg, NULL);
610 if (*fmt < '0' || '9' <= *fmt)
614 in = get_irn_n(node, pos);
615 if (is_ia32_Immediate(in)) {
616 emit_ia32_Immediate(in);
618 const arch_register_t *reg = get_in_reg(node, pos);
619 emit_register(reg, mode);
625 const char *str = va_arg(ap, const char*);
631 unsigned num = va_arg(ap, unsigned);
632 be_emit_irprintf("%u", num);
638 panic("unknown conversion");
646 * Emits registers and/or address mode of a binary operation.
648 void ia32_emit_binop(const ir_node *node)
650 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
651 ia32_emitf(node, "%#S4, %#AS3");
653 ia32_emitf(node, "%#AS4, %#S3");
658 * Emits registers and/or address mode of a binary operation.
660 void ia32_emit_x87_binop(const ir_node *node)
662 switch(get_ia32_op_type(node)) {
665 const ia32_x87_attr_t *x87_attr = get_ia32_x87_attr_const(node);
666 const arch_register_t *in1 = x87_attr->x87[0];
667 const arch_register_t *in = x87_attr->x87[1];
668 const arch_register_t *out = x87_attr->x87[2];
672 } else if (out == in) {
677 be_emit_string(arch_register_get_name(in));
678 be_emit_cstring(", %");
679 be_emit_string(arch_register_get_name(out));
687 assert(0 && "unsupported op type");
692 * Emits registers and/or address mode of a unary operation.
694 void ia32_emit_unop(const ir_node *node, int pos)
698 ia32_emitf(node, fmt);
701 static void ia32_emit_entity(ir_entity *entity, int no_pic_adjust)
705 set_entity_backend_marked(entity, 1);
706 id = get_entity_ld_ident(entity);
709 if (get_entity_owner(entity) == get_tls_type()) {
710 if (get_entity_visibility(entity) == visibility_external_allocated) {
711 be_emit_cstring("@INDNTPOFF");
713 be_emit_cstring("@NTPOFF");
717 if (!no_pic_adjust && do_pic) {
718 /* TODO: only do this when necessary */
720 be_emit_string(pic_base_label);
725 * Emits address mode.
727 void ia32_emit_am(const ir_node *node)
729 ir_entity *ent = get_ia32_am_sc(node);
730 int offs = get_ia32_am_offs_int(node);
731 ir_node *base = get_irn_n(node, n_ia32_base);
732 int has_base = !is_ia32_NoReg_GP(base);
733 ir_node *index = get_irn_n(node, n_ia32_index);
734 int has_index = !is_ia32_NoReg_GP(index);
736 /* just to be sure... */
737 assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
741 if (is_ia32_am_sc_sign(node))
743 ia32_emit_entity(ent, 0);
746 /* also handle special case if nothing is set */
747 if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
749 be_emit_irprintf("%+d", offs);
751 be_emit_irprintf("%d", offs);
755 if (has_base || has_index) {
760 const arch_register_t *reg = get_in_reg(node, n_ia32_base);
761 emit_register(reg, NULL);
764 /* emit index + scale */
766 const arch_register_t *reg = get_in_reg(node, n_ia32_index);
769 emit_register(reg, NULL);
771 scale = get_ia32_am_scale(node);
773 be_emit_irprintf(",%d", 1 << scale);
780 static void emit_ia32_IMul(const ir_node *node)
782 ir_node *left = get_irn_n(node, n_ia32_IMul_left);
783 const arch_register_t *out_reg = get_out_reg(node, pn_ia32_IMul_res);
785 /* do we need the 3-address form? */
786 if (is_ia32_NoReg_GP(left) ||
787 get_in_reg(node, n_ia32_IMul_left) != out_reg) {
788 ia32_emitf(node, "\timul%M %#S4, %#AS3, %#D0\n");
790 ia32_emitf(node, "\timul%M %#AS4, %#S3\n");
795 * walks up a tree of copies/perms/spills/reloads to find the original value
796 * that is moved around
798 static ir_node *find_original_value(ir_node *node)
800 if (irn_visited(node))
803 mark_irn_visited(node);
804 if (be_is_Copy(node)) {
805 return find_original_value(be_get_Copy_op(node));
806 } else if (be_is_CopyKeep(node)) {
807 return find_original_value(be_get_CopyKeep_op(node));
808 } else if (is_Proj(node)) {
809 ir_node *pred = get_Proj_pred(node);
810 if (be_is_Perm(pred)) {
811 return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
812 } else if (be_is_MemPerm(pred)) {
813 return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
814 } else if (is_ia32_Load(pred)) {
815 return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
819 } else if (is_ia32_Store(node)) {
820 return find_original_value(get_irn_n(node, n_ia32_Store_val));
821 } else if (is_Phi(node)) {
823 arity = get_irn_arity(node);
824 for (i = 0; i < arity; ++i) {
825 ir_node *in = get_irn_n(node, i);
826 ir_node *res = find_original_value(in);
837 static int determine_final_pnc(const ir_node *node, int flags_pos,
840 ir_node *flags = get_irn_n(node, flags_pos);
841 const ia32_attr_t *flags_attr;
842 flags = skip_Proj(flags);
844 if (is_ia32_Sahf(flags)) {
845 ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
846 if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
847 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
848 inc_irg_visited(current_ir_graph);
849 cmp = find_original_value(cmp);
851 assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
852 || is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
855 flags_attr = get_ia32_attr_const(cmp);
856 if (flags_attr->data.ins_permuted)
857 pnc = get_mirrored_pnc(pnc);
858 pnc |= ia32_pn_Cmp_float;
859 } else if (is_ia32_Ucomi(flags) || is_ia32_Fucomi(flags)
860 || is_ia32_Fucompi(flags)) {
861 flags_attr = get_ia32_attr_const(flags);
863 if (flags_attr->data.ins_permuted)
864 pnc = get_mirrored_pnc(pnc);
865 pnc |= ia32_pn_Cmp_float;
867 flags_attr = get_ia32_attr_const(flags);
869 if (flags_attr->data.ins_permuted)
870 pnc = get_mirrored_pnc(pnc);
871 if (flags_attr->data.cmp_unsigned)
872 pnc |= ia32_pn_Cmp_unsigned;
878 void ia32_emit_cmp_suffix_node(const ir_node *node,
881 const ia32_attr_t *attr = get_ia32_attr_const(node);
883 pn_Cmp pnc = get_ia32_condcode(node);
885 pnc = determine_final_pnc(node, flags_pos, pnc);
886 if (attr->data.ins_permuted) {
887 if (pnc & ia32_pn_Cmp_float) {
888 pnc = get_negated_pnc(pnc, mode_F);
890 pnc = get_negated_pnc(pnc, mode_Iu);
894 ia32_emit_cmp_suffix(pnc);
898 * Emits an exception label for a given node.
900 static void ia32_emit_exc_label(const ir_node *node)
902 be_emit_string(be_gas_insn_label_prefix());
903 be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
907 * Returns the Proj with projection number proj and NOT mode_M
909 static ir_node *get_proj(const ir_node *node, long proj)
911 const ir_edge_t *edge;
914 assert(get_irn_mode(node) == mode_T && "expected mode_T node");
916 foreach_out_edge(node, edge) {
917 src = get_edge_src_irn(edge);
919 assert(is_Proj(src) && "Proj expected");
920 if (get_irn_mode(src) == mode_M)
923 if (get_Proj_proj(src) == proj)
929 static int can_be_fallthrough(const ir_node *node)
931 ir_node *target_block = get_cfop_target_block(node);
932 ir_node *block = get_nodes_block(node);
933 return get_prev_block_sched(target_block) == block;
937 * Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
939 static void emit_ia32_Jcc(const ir_node *node)
941 int need_parity_label = 0;
942 const ir_node *proj_true;
943 const ir_node *proj_false;
944 const ir_node *block;
945 pn_Cmp pnc = get_ia32_condcode(node);
947 pnc = determine_final_pnc(node, 0, pnc);
950 proj_true = get_proj(node, pn_ia32_Jcc_true);
951 assert(proj_true && "Jcc without true Proj");
953 proj_false = get_proj(node, pn_ia32_Jcc_false);
954 assert(proj_false && "Jcc without false Proj");
956 block = get_nodes_block(node);
958 if (can_be_fallthrough(proj_true)) {
959 /* exchange both proj's so the second one can be omitted */
960 const ir_node *t = proj_true;
962 proj_true = proj_false;
964 if (pnc & ia32_pn_Cmp_float) {
965 pnc = get_negated_pnc(pnc, mode_F);
967 pnc = get_negated_pnc(pnc, mode_Iu);
971 if (pnc & ia32_pn_Cmp_float) {
972 /* Some floating point comparisons require a test of the parity flag,
973 * which indicates that the result is unordered */
976 ia32_emitf(proj_true, "\tjp %L\n");
981 ia32_emitf(proj_true, "\tjnp %L\n");
987 /* we need a local label if the false proj is a fallthrough
988 * as the falseblock might have no label emitted then */
989 if (can_be_fallthrough(proj_false)) {
990 need_parity_label = 1;
991 ia32_emitf(proj_false, "\tjp 1f\n");
993 ia32_emitf(proj_false, "\tjp %L\n");
1000 ia32_emitf(proj_true, "\tjp %L\n");
1008 ia32_emitf(proj_true, "\tj%P %L\n", pnc);
1011 if (need_parity_label) {
1012 ia32_emitf(NULL, "1:\n");
1015 /* the second Proj might be a fallthrough */
1016 if (can_be_fallthrough(proj_false)) {
1017 ia32_emitf(proj_false, "\t/* fallthrough to %L */\n");
1019 ia32_emitf(proj_false, "\tjmp %L\n");
1023 static void emit_ia32_CMov(const ir_node *node)
1025 const ia32_attr_t *attr = get_ia32_attr_const(node);
1026 int ins_permuted = attr->data.ins_permuted;
1027 const arch_register_t *out = arch_get_irn_register(arch_env, node);
1028 pn_Cmp pnc = get_ia32_condcode(node);
1029 const arch_register_t *in_true;
1030 const arch_register_t *in_false;
1032 pnc = determine_final_pnc(node, n_ia32_CMov_eflags, pnc);
1034 in_true = arch_get_irn_register(arch_env,
1035 get_irn_n(node, n_ia32_CMov_val_true));
1036 in_false = arch_get_irn_register(arch_env,
1037 get_irn_n(node, n_ia32_CMov_val_false));
1039 /* should be same constraint fullfilled? */
1040 if (out == in_false) {
1041 /* yes -> nothing to do */
1042 } else if (out == in_true) {
1043 const arch_register_t *tmp;
1045 assert(get_ia32_op_type(node) == ia32_Normal);
1047 ins_permuted = !ins_permuted;
1054 ia32_emitf(node, "\tmovl %R, %R\n", in_false, out);
1058 if (pnc & ia32_pn_Cmp_float) {
1059 pnc = get_negated_pnc(pnc, mode_F);
1061 pnc = get_negated_pnc(pnc, mode_Iu);
1065 /* TODO: handling of Nans isn't correct yet */
1067 ia32_emitf(node, "\tcmov%P %AR, %#R\n", pnc, in_true, out);
1070 /*********************************************************
1073 * ___ _ __ ___ _| |_ _ _ _ _ __ ___ _ __ ___
1074 * / _ \ '_ ` _ \| | __| | | | | | '_ ` _ \| '_ \/ __|
1075 * | __/ | | | | | | |_ | | |_| | | | | | | |_) \__ \
1076 * \___|_| |_| |_|_|\__| | |\__,_|_| |_| |_| .__/|___/
1079 *********************************************************/
1081 /* jump table entry (target and corresponding number) */
1082 typedef struct _branch_t {
1087 /* jump table for switch generation */
1088 typedef struct _jmp_tbl_t {
1089 ir_node *defProj; /**< default target */
1090 long min_value; /**< smallest switch case */
1091 long max_value; /**< largest switch case */
1092 long num_branches; /**< number of jumps */
1093 char *label; /**< label of the jump table */
1094 branch_t *branches; /**< jump array */
1098 * Compare two variables of type branch_t. Used to sort all switch cases
1100 static int ia32_cmp_branch_t(const void *a, const void *b)
1102 branch_t *b1 = (branch_t *)a;
1103 branch_t *b2 = (branch_t *)b;
1105 if (b1->value <= b2->value)
1112 * Emits code for a SwitchJmp (creates a jump table if
1113 * possible otherwise a cmp-jmp cascade). Port from
1116 static void emit_ia32_SwitchJmp(const ir_node *node)
1118 unsigned long interval;
1124 const ir_edge_t *edge;
1126 /* fill the table structure */
1127 tbl.label = XMALLOCN(char, SNPRINTF_BUF_LEN);
1128 tbl.label = get_unique_label(tbl.label, SNPRINTF_BUF_LEN, ".TBL_");
1130 tbl.num_branches = get_irn_n_edges(node) - 1;
1131 tbl.branches = XMALLOCNZ(branch_t, tbl.num_branches);
1132 tbl.min_value = INT_MAX;
1133 tbl.max_value = INT_MIN;
1135 default_pn = get_ia32_condcode(node);
1137 /* go over all proj's and collect them */
1138 foreach_out_edge(node, edge) {
1139 proj = get_edge_src_irn(edge);
1140 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
1142 pnc = get_Proj_proj(proj);
1144 /* check for default proj */
1145 if (pnc == default_pn) {
1146 assert(tbl.defProj == NULL && "found two default Projs at SwitchJmp");
1149 tbl.min_value = pnc < tbl.min_value ? pnc : tbl.min_value;
1150 tbl.max_value = pnc > tbl.max_value ? pnc : tbl.max_value;
1152 /* create branch entry */
1153 tbl.branches[i].target = proj;
1154 tbl.branches[i].value = pnc;
1159 assert(i == tbl.num_branches);
1161 /* sort the branches by their number */
1162 qsort(tbl.branches, tbl.num_branches, sizeof(tbl.branches[0]), ia32_cmp_branch_t);
1164 /* two-complement's magic make this work without overflow */
1165 interval = tbl.max_value - tbl.min_value;
1167 /* emit the table */
1168 ia32_emitf(node, "\tcmpl $%u, %S0\n", interval);
1169 ia32_emitf(tbl.defProj, "\tja %L\n");
1171 if (tbl.num_branches > 1) {
1173 ia32_emitf(node, "\tjmp *%s(,%S0,4)\n", tbl.label);
1175 be_gas_emit_switch_section(GAS_SECTION_RODATA);
1176 ia32_emitf(NULL, "\t.align 4\n");
1177 ia32_emitf(NULL, "%s:\n", tbl.label);
1179 last_value = tbl.branches[0].value;
1180 for (i = 0; i != tbl.num_branches; ++i) {
1181 while (last_value != tbl.branches[i].value) {
1182 ia32_emitf(tbl.defProj, ".long %L\n");
1185 ia32_emitf(tbl.branches[i].target, ".long %L\n");
1188 be_gas_emit_switch_section(GAS_SECTION_TEXT);
1190 /* one jump is enough */
1191 ia32_emitf(tbl.branches[0].target, "\tjmp %L\n");
1201 * Emits code for a unconditional jump.
1203 static void emit_Jmp(const ir_node *node)
1207 /* for now, the code works for scheduled and non-schedules blocks */
1208 block = get_nodes_block(node);
1210 /* we have a block schedule */
1211 if (can_be_fallthrough(node)) {
1212 ia32_emitf(node, "\t/* fallthrough to %L */\n");
1214 ia32_emitf(node, "\tjmp %L\n");
1218 static void emit_ia32_Immediate(const ir_node *node)
1220 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
1223 if (attr->symconst != NULL) {
1226 ia32_emit_entity(attr->symconst, 0);
1228 if (attr->symconst == NULL || attr->offset != 0) {
1229 if (attr->symconst != NULL) {
1230 be_emit_irprintf("%+d", attr->offset);
1232 be_emit_irprintf("0x%X", attr->offset);
1238 * Emit an inline assembler operand.
1240 * @param node the ia32_ASM node
1241 * @param s points to the operand (a %c)
1243 * @return pointer to the first char in s NOT in the current operand
1245 static const char* emit_asm_operand(const ir_node *node, const char *s)
1247 const ia32_attr_t *ia32_attr = get_ia32_attr_const(node);
1248 const ia32_asm_attr_t *attr = CONST_CAST_IA32_ATTR(ia32_asm_attr_t,
1250 const arch_register_t *reg;
1251 const ia32_asm_reg_t *asm_regs = attr->register_map;
1252 const ia32_asm_reg_t *asm_reg;
1253 const char *reg_name;
1262 /* parse modifiers */
1265 ir_fprintf(stderr, "Warning: asm text (%+F) ends with %\n", node);
1289 ir_fprintf(stderr, "Warning: asm text (%+F) contains unknown modifier "
1290 "'%c' for asm op\n", node, c);
1296 sscanf(s, "%d%n", &num, &p);
1298 ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
1305 if (num < 0 || num >= ARR_LEN(asm_regs)) {
1306 ir_fprintf(stderr, "Error: Custom assembler references invalid "
1307 "input/output (%+F)\n", node);
1310 asm_reg = & asm_regs[num];
1311 assert(asm_reg->valid);
1314 if (asm_reg->use_input == 0) {
1315 reg = get_out_reg(node, asm_reg->inout_pos);
1317 ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
1319 /* might be an immediate value */
1320 if (is_ia32_Immediate(pred)) {
1321 emit_ia32_Immediate(pred);
1324 reg = get_in_reg(node, asm_reg->inout_pos);
1327 ir_fprintf(stderr, "Warning: no register assigned for %d asm op "
1328 "(%+F)\n", num, node);
1332 if (asm_reg->memory) {
1337 if (modifier != 0) {
1341 reg_name = ia32_get_mapped_reg_name(isa->regs_8bit, reg);
1344 reg_name = ia32_get_mapped_reg_name(isa->regs_8bit_high, reg);
1347 reg_name = ia32_get_mapped_reg_name(isa->regs_16bit, reg);
1350 panic("Invalid asm op modifier");
1352 be_emit_string(reg_name);
1354 emit_register(reg, asm_reg->mode);
1357 if (asm_reg->memory) {
1365 * Emits code for an ASM pseudo op.
1367 static void emit_ia32_Asm(const ir_node *node)
1369 const void *gen_attr = get_irn_generic_attr_const(node);
1370 const ia32_asm_attr_t *attr
1371 = CONST_CAST_IA32_ATTR(ia32_asm_attr_t, gen_attr);
1372 ident *asm_text = attr->asm_text;
1373 const char *s = get_id_str(asm_text);
1375 ia32_emitf(node, "#APP\t\n");
1382 s = emit_asm_operand(node, s);
1388 ia32_emitf(NULL, "\n#NO_APP\n");
1391 /**********************************
1394 * | | ___ _ __ _ _| |_) |
1395 * | | / _ \| '_ \| | | | _ <
1396 * | |___| (_) | |_) | |_| | |_) |
1397 * \_____\___/| .__/ \__, |____/
1400 **********************************/
1403 * Emit movsb/w instructions to make mov count divideable by 4
1405 static void emit_CopyB_prolog(unsigned size)
1408 ia32_emitf(NULL, "\tmovsb\n");
1410 ia32_emitf(NULL, "\tmovsw\n");
1414 * Emit rep movsd instruction for memcopy.
1416 static void emit_ia32_CopyB(const ir_node *node)
1418 unsigned size = get_ia32_copyb_size(node);
1420 emit_CopyB_prolog(size);
1421 ia32_emitf(node, "\trep movsd\n");
1425 * Emits unrolled memcopy.
1427 static void emit_ia32_CopyB_i(const ir_node *node)
1429 unsigned size = get_ia32_copyb_size(node);
1431 emit_CopyB_prolog(size);
1435 ia32_emitf(NULL, "\tmovsd\n");
1441 /***************************
1445 * | | / _ \| '_ \ \ / /
1446 * | |___| (_) | | | \ V /
1447 * \_____\___/|_| |_|\_/
1449 ***************************/
1452 * Emit code for conversions (I, FP), (FP, I) and (FP, FP).
1454 static void emit_ia32_Conv_with_FP(const ir_node *node)
1456 ir_mode *ls_mode = get_ia32_ls_mode(node);
1457 int ls_bits = get_mode_size_bits(ls_mode);
1460 if (is_ia32_Conv_I2FP(node)) {
1461 if (ls_bits == 32) {
1466 } else if (is_ia32_Conv_FP2I(node)) {
1467 if (ls_bits == 32) {
1473 assert(is_ia32_Conv_FP2FP(node));
1474 if (ls_bits == 32) {
1481 ia32_emitf(node, "\tcvt%s %AS3, %D0\n", conv);
1484 static void emit_ia32_Conv_I2FP(const ir_node *node)
1486 emit_ia32_Conv_with_FP(node);
1489 static void emit_ia32_Conv_FP2I(const ir_node *node)
1491 emit_ia32_Conv_with_FP(node);
1494 static void emit_ia32_Conv_FP2FP(const ir_node *node)
1496 emit_ia32_Conv_with_FP(node);
1500 * Emits code for an Int conversion.
1502 static void emit_ia32_Conv_I2I(const ir_node *node)
1504 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1505 int smaller_bits = get_mode_size_bits(smaller_mode);
1506 int signed_mode = mode_is_signed(smaller_mode);
1508 assert(!mode_is_float(smaller_mode));
1509 assert(smaller_bits == 8 || smaller_bits == 16);
1512 smaller_bits == 16 &&
1513 &ia32_gp_regs[REG_EAX] == get_out_reg(node, 0) &&
1514 &ia32_gp_regs[REG_EAX] == arch_get_irn_register(arch_env, get_irn_n(node, n_ia32_unary_op))) {
1515 /* argument and result are both in EAX and signedness is ok: use the
1516 * smaller cwtl opcode */
1517 ia32_emitf(node, "\tcwtl\n");
1519 const char *sign_suffix = signed_mode ? "s" : "z";
1520 ia32_emitf(node, "\tmov%s%Ml %#AS3, %D0\n", sign_suffix);
1525 /*******************************************
1528 * | |__ ___ _ __ ___ __| | ___ ___
1529 * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __|
1530 * | |_) | __/ | | | (_) | (_| | __/\__ \
1531 * |_.__/ \___|_| |_|\___/ \__,_|\___||___/
1533 *******************************************/
1536 * Emits a backend call
1538 static void emit_be_Call(const ir_node *node)
1540 ir_entity *ent = be_Call_get_entity(node);
1542 be_emit_cstring("\tcall ");
1544 ia32_emit_entity(ent, 1);
1546 const arch_register_t *reg = get_in_reg(node, be_pos_Call_ptr);
1548 emit_register(reg, NULL);
1550 be_emit_finish_line_gas(node);
1554 * Emits code to increase stack pointer.
1556 static void emit_be_IncSP(const ir_node *node)
1558 int offs = be_get_IncSP_offset(node);
1564 ia32_emitf(node, "\tsubl $%u, %D0\n", offs);
1566 ia32_emitf(node, "\taddl $%u, %D0\n", -offs);
1571 * Emits code for Copy/CopyKeep.
1573 static void Copy_emitter(const ir_node *node, const ir_node *op)
1575 const arch_register_t *in = arch_get_irn_register(arch_env, op);
1576 const arch_register_t *out = arch_get_irn_register(arch_env, node);
1581 if (is_unknown_reg(in))
1583 /* copies of vf nodes aren't real... */
1584 if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
1587 if (get_irn_mode(node) == mode_E) {
1588 ia32_emitf(node, "\tmovsd %R, %R\n", in, out);
1590 ia32_emitf(node, "\tmovl %R, %R\n", in, out);
1594 static void emit_be_Copy(const ir_node *node)
1596 Copy_emitter(node, be_get_Copy_op(node));
1599 static void emit_be_CopyKeep(const ir_node *node)
1601 Copy_emitter(node, be_get_CopyKeep_op(node));
1605 * Emits code for exchange.
1607 static void emit_be_Perm(const ir_node *node)
1609 const arch_register_t *in0, *in1;
1610 const arch_register_class_t *cls0, *cls1;
1612 in0 = arch_get_irn_register(arch_env, get_irn_n(node, 0));
1613 in1 = arch_get_irn_register(arch_env, get_irn_n(node, 1));
1615 cls0 = arch_register_get_class(in0);
1616 cls1 = arch_register_get_class(in1);
1618 assert(cls0 == cls1 && "Register class mismatch at Perm");
1620 if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
1621 ia32_emitf(node, "\txchg %R, %R\n", in1, in0);
1622 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_xmm]) {
1623 ia32_emitf(NULL, "\txorpd %R, %R\n", in1, in0);
1624 ia32_emitf(NULL, "\txorpd %R, %R\n", in0, in1);
1625 ia32_emitf(node, "\txorpd %R, %R\n", in1, in0);
1626 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_vfp]) {
1628 } else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
1631 panic("unexpected register class in be_Perm (%+F)", node);
1636 * Emits code for Constant loading.
1638 static void emit_ia32_Const(const ir_node *node)
1640 ia32_emitf(node, "\tmovl %I, %D0\n");
1644 * Emits code to load the TLS base
1646 static void emit_ia32_LdTls(const ir_node *node)
1648 ia32_emitf(node, "\tmovl %%gs:0, %D0\n");
1651 /* helper function for emit_ia32_Minus64Bit */
1652 static void emit_mov(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1654 ia32_emitf(node, "\tmovl %R, %R\n", src, dst);
1657 /* helper function for emit_ia32_Minus64Bit */
1658 static void emit_neg(const ir_node* node, const arch_register_t *reg)
1660 ia32_emitf(node, "\tnegl %R\n", reg);
1663 /* helper function for emit_ia32_Minus64Bit */
1664 static void emit_sbb0(const ir_node* node, const arch_register_t *reg)
1666 ia32_emitf(node, "\tsbbl $0, %R\n", reg);
1669 /* helper function for emit_ia32_Minus64Bit */
1670 static void emit_sbb(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1672 ia32_emitf(node, "\tsbbl %R, %R\n", src, dst);
1675 /* helper function for emit_ia32_Minus64Bit */
1676 static void emit_xchg(const ir_node* node, const arch_register_t *src, const arch_register_t *dst)
1678 ia32_emitf(node, "\txchgl %R, %R\n", src, dst);
1681 /* helper function for emit_ia32_Minus64Bit */
1682 static void emit_zero(const ir_node* node, const arch_register_t *reg)
1684 ia32_emitf(node, "\txorl %R, %R\n", reg, reg);
1687 static void emit_ia32_Minus64Bit(const ir_node *node)
1689 const arch_register_t *in_lo = get_in_reg(node, 0);
1690 const arch_register_t *in_hi = get_in_reg(node, 1);
1691 const arch_register_t *out_lo = get_out_reg(node, 0);
1692 const arch_register_t *out_hi = get_out_reg(node, 1);
1694 if (out_lo == in_lo) {
1695 if (out_hi != in_hi) {
1696 /* a -> a, b -> d */
1699 /* a -> a, b -> b */
1702 } else if (out_lo == in_hi) {
1703 if (out_hi == in_lo) {
1704 /* a -> b, b -> a */
1705 emit_xchg(node, in_lo, in_hi);
1708 /* a -> b, b -> d */
1709 emit_mov(node, in_hi, out_hi);
1710 emit_mov(node, in_lo, out_lo);
1714 if (out_hi == in_lo) {
1715 /* a -> c, b -> a */
1716 emit_mov(node, in_lo, out_lo);
1718 } else if (out_hi == in_hi) {
1719 /* a -> c, b -> b */
1720 emit_mov(node, in_lo, out_lo);
1723 /* a -> c, b -> d */
1724 emit_mov(node, in_lo, out_lo);
1730 emit_neg( node, out_hi);
1731 emit_neg( node, out_lo);
1732 emit_sbb0(node, out_hi);
1736 emit_zero(node, out_hi);
1737 emit_neg( node, out_lo);
1738 emit_sbb( node, in_hi, out_hi);
1741 static void emit_ia32_GetEIP(const ir_node *node)
1743 ia32_emitf(node, "\tcall %s\n", pic_base_label);
1744 ia32_emitf(NULL, "%s:\n", pic_base_label);
1745 ia32_emitf(node, "\tpopl %D0\n");
1748 static void emit_be_Return(const ir_node *node)
1750 unsigned pop = be_Return_get_pop(node);
1752 if (pop > 0 || be_Return_get_emit_pop(node)) {
1753 ia32_emitf(node, "\tret $%u\n", pop);
1755 ia32_emitf(node, "\tret\n");
1759 static void emit_Nothing(const ir_node *node)
1765 /***********************************************************************************
1768 * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
1769 * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
1770 * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
1771 * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
1773 ***********************************************************************************/
1776 * Enters the emitter functions for handled nodes into the generic
1777 * pointer of an opcode.
1779 static void ia32_register_emitters(void)
1781 #define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
1782 #define IA32_EMIT(a) IA32_EMIT2(a,a)
1783 #define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
1784 #define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
1785 #define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
1786 #define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
1788 /* first clear the generic function pointer for all ops */
1789 clear_irp_opcodes_generic_func();
1791 /* register all emitter functions defined in spec */
1792 ia32_register_spec_emitters();
1794 /* other ia32 emitter functions */
1795 IA32_EMIT2(Conv_I2I8Bit, Conv_I2I);
1799 IA32_EMIT(Conv_FP2FP);
1800 IA32_EMIT(Conv_FP2I);
1801 IA32_EMIT(Conv_I2FP);
1802 IA32_EMIT(Conv_I2I);
1809 IA32_EMIT(Minus64Bit);
1810 IA32_EMIT(SwitchJmp);
1812 /* benode emitter */
1837 typedef void (*emit_func_ptr) (const ir_node *);
1840 * Assign and emit an exception label if the current instruction can fail.
1842 static void ia32_assign_exc_label(ir_node *node)
1844 /* assign a new ID to the instruction */
1845 set_ia32_exc_label_id(node, ++exc_label_id);
1847 ia32_emit_exc_label(node);
1849 be_emit_pad_comment();
1850 be_emit_cstring("/* exception to Block ");
1851 ia32_emit_cfop_target(node);
1852 be_emit_cstring(" */\n");
1853 be_emit_write_line();
1857 * Emits code for a node.
1859 static void ia32_emit_node(ir_node *node)
1861 ir_op *op = get_irn_op(node);
1863 DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
1865 if (is_ia32_irn(node)) {
1866 if (get_ia32_exc_label(node)) {
1867 /* emit the exception label of this instruction */
1868 ia32_assign_exc_label(node);
1870 if (mark_spill_reload) {
1871 if (is_ia32_is_spill(node)) {
1872 ia32_emitf(NULL, "\txchg %ebx, %ebx /* spill mark */\n");
1874 if (is_ia32_is_reload(node)) {
1875 ia32_emitf(NULL, "\txchg %edx, %edx /* reload mark */\n");
1877 if (is_ia32_is_remat(node)) {
1878 ia32_emitf(NULL, "\txchg %ecx, %ecx /* remat mark */\n");
1882 if (op->ops.generic) {
1883 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
1885 be_dbg_set_dbg_info(get_irn_dbg_info(node));
1890 ir_fprintf(stderr, "Error: No emit handler for node %+F (%+G, graph %+F)\n", node, node, current_ir_graph);
1896 * Emits gas alignment directives
1898 static void ia32_emit_alignment(unsigned align, unsigned skip)
1900 ia32_emitf(NULL, "\t.p2align %u,,%u\n", align, skip);
1904 * Emits gas alignment directives for Labels depended on cpu architecture.
1906 static void ia32_emit_align_label(void)
1908 unsigned align = ia32_cg_config.label_alignment;
1909 unsigned maximum_skip = ia32_cg_config.label_alignment_max_skip;
1910 ia32_emit_alignment(align, maximum_skip);
1914 * Test whether a block should be aligned.
1915 * For cpus in the P4/Athlon class it is useful to align jump labels to
1916 * 16 bytes. However we should only do that if the alignment nops before the
1917 * label aren't executed more often than we have jumps to the label.
1919 static int should_align_block(const ir_node *block)
1921 static const double DELTA = .0001;
1922 ir_exec_freq *exec_freq = cg->birg->exec_freq;
1923 ir_node *prev = get_prev_block_sched(block);
1925 double prev_freq = 0; /**< execfreq of the fallthrough block */
1926 double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
1929 if (exec_freq == NULL)
1931 if (ia32_cg_config.label_alignment_factor <= 0)
1934 block_freq = get_block_execfreq(exec_freq, block);
1935 if (block_freq < DELTA)
1938 n_cfgpreds = get_Block_n_cfgpreds(block);
1939 for(i = 0; i < n_cfgpreds; ++i) {
1940 const ir_node *pred = get_Block_cfgpred_block(block, i);
1941 double pred_freq = get_block_execfreq(exec_freq, pred);
1944 prev_freq += pred_freq;
1946 jmp_freq += pred_freq;
1950 if (prev_freq < DELTA && !(jmp_freq < DELTA))
1953 jmp_freq /= prev_freq;
1955 return jmp_freq > ia32_cg_config.label_alignment_factor;
1959 * Emit the block header for a block.
1961 * @param block the block
1962 * @param prev_block the previous block
1964 static void ia32_emit_block_header(ir_node *block)
1966 ir_graph *irg = current_ir_graph;
1967 int need_label = block_needs_label(block);
1969 ir_exec_freq *exec_freq = cg->birg->exec_freq;
1971 if (block == get_irg_end_block(irg) || block == get_irg_start_block(irg))
1974 if (ia32_cg_config.label_alignment > 0) {
1975 /* align the current block if:
1976 * a) if should be aligned due to its execution frequency
1977 * b) there is no fall-through here
1979 if (should_align_block(block)) {
1980 ia32_emit_align_label();
1982 /* if the predecessor block has no fall-through,
1983 we can always align the label. */
1985 int has_fallthrough = 0;
1987 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1988 ir_node *cfg_pred = get_Block_cfgpred(block, i);
1989 if (can_be_fallthrough(cfg_pred)) {
1990 has_fallthrough = 1;
1995 if (!has_fallthrough)
1996 ia32_emit_align_label();
2000 if (need_label || has_Block_label(block)) {
2001 ia32_emit_block_name(block);
2004 be_emit_pad_comment();
2005 be_emit_cstring(" /* ");
2007 be_emit_cstring("\t/* ");
2008 ia32_emit_block_name(block);
2009 be_emit_cstring(": ");
2012 be_emit_cstring("preds:");
2014 /* emit list of pred blocks in comment */
2015 arity = get_irn_arity(block);
2016 for (i = 0; i < arity; ++i) {
2017 ir_node *predblock = get_Block_cfgpred_block(block, i);
2018 be_emit_irprintf(" %d", get_irn_node_nr(predblock));
2020 if (exec_freq != NULL) {
2021 be_emit_irprintf(" freq: %f",
2022 get_block_execfreq(exec_freq, block));
2024 be_emit_cstring(" */\n");
2025 be_emit_write_line();
2029 * Walks over the nodes in a block connected by scheduling edges
2030 * and emits code for each node.
2032 static void ia32_gen_block(ir_node *block)
2036 ia32_emit_block_header(block);
2038 /* emit the contents of the block */
2039 be_dbg_set_dbg_info(get_irn_dbg_info(block));
2040 sched_foreach(block, node) {
2041 ia32_emit_node(node);
2045 typedef struct exc_entry {
2046 ir_node *exc_instr; /** The instruction that can issue an exception. */
2047 ir_node *block; /** The block to call then. */
2052 * Sets labels for control flow nodes (jump target).
2053 * Links control predecessors to there destination blocks.
2055 static void ia32_gen_labels(ir_node *block, void *data)
2057 exc_entry **exc_list = data;
2061 for (n = get_Block_n_cfgpreds(block) - 1; n >= 0; --n) {
2062 pred = get_Block_cfgpred(block, n);
2063 set_irn_link(pred, block);
2065 pred = skip_Proj(pred);
2066 if (is_ia32_irn(pred) && get_ia32_exc_label(pred)) {
2071 ARR_APP1(exc_entry, *exc_list, e);
2072 set_irn_link(pred, block);
2078 * Compare two exception_entries.
2080 static int cmp_exc_entry(const void *a, const void *b)
2082 const exc_entry *ea = a;
2083 const exc_entry *eb = b;
2085 if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
2091 * Main driver. Emits the code for one routine.
2093 void ia32_gen_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
2095 ir_entity *entity = get_irg_entity(irg);
2096 exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
2100 isa = (const ia32_isa_t*) cg->arch_env;
2101 arch_env = cg->arch_env;
2102 do_pic = cg->birg->main_env->options->pic;
2104 ia32_register_emitters();
2106 get_unique_label(pic_base_label, sizeof(pic_base_label), ".PIC_BASE");
2108 be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
2109 be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
2111 /* we use links to point to target blocks */
2112 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
2113 irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
2115 /* initialize next block links */
2116 n = ARR_LEN(cg->blk_sched);
2117 for (i = 0; i < n; ++i) {
2118 ir_node *block = cg->blk_sched[i];
2119 ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
2121 set_irn_link(block, prev);
2124 for (i = 0; i < n; ++i) {
2125 ir_node *block = cg->blk_sched[i];
2127 ia32_gen_block(block);
2130 be_gas_emit_function_epilog(entity);
2131 be_dbg_method_end();
2133 be_emit_write_line();
2135 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
2137 /* Sort the exception table using the exception label id's.
2138 Those are ascending with ascending addresses. */
2139 qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
2143 for (i = 0; i < ARR_LEN(exc_list); ++i) {
2144 be_emit_cstring("\t.long ");
2145 ia32_emit_exc_label(exc_list[i].exc_instr);
2147 be_emit_cstring("\t.long ");
2148 ia32_emit_block_name(exc_list[i].block);
2152 DEL_ARR_F(exc_list);
2155 static const lc_opt_table_entry_t ia32_emitter_options[] = {
2156 LC_OPT_ENT_BOOL("mark_spill_reload", "mark spills and reloads with ud opcodes", &mark_spill_reload),
2160 void ia32_init_emitter(void)
2162 lc_opt_entry_t *be_grp;
2163 lc_opt_entry_t *ia32_grp;
2165 be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2166 ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2168 lc_opt_add_table(ia32_grp, ia32_emitter_options);
2170 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.emitter");