2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
23 * @author Hannes Rapp, Matthias Braun
29 #include "bitfiddle.h"
41 #include "raw_bitset.h"
46 #include "beblocksched.h"
52 #include "bepeephole.h"
54 #include "sparc_emitter.h"
55 #include "gen_sparc_emitter.h"
56 #include "sparc_nodes_attr.h"
57 #include "sparc_new_nodes.h"
58 #include "gen_sparc_regalloc_if.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 static ir_heights_t *heights;
63 static const ir_node *delay_slot_filler; /**< this node has been choosen to fill
64 the next delay slot */
66 static void sparc_emit_node(const ir_node *node);
67 static bool emitting_delay_slot;
70 * indent before instruction. (Adds additional indentation when emitting
73 static void sparc_emit_indent(void)
76 if (emitting_delay_slot)
80 static void sparc_emit_immediate(ir_node const *const node)
82 const sparc_attr_t *attr = get_sparc_attr_const(node);
83 ir_entity *entity = attr->immediate_value_entity;
86 int32_t value = attr->immediate_value;
87 assert(sparc_is_value_imm_encodeable(value));
88 be_emit_irprintf("%d", value);
90 if (get_entity_owner(entity) == get_tls_type()) {
91 be_emit_cstring("%tle_lox10(");
93 be_emit_cstring("%lo(");
95 be_gas_emit_entity(entity);
96 if (attr->immediate_value != 0) {
97 be_emit_irprintf("%+d", attr->immediate_value);
103 static void sparc_emit_high_immediate(ir_node const *node)
105 const sparc_attr_t *attr = get_sparc_attr_const(node);
106 ir_entity *entity = attr->immediate_value_entity;
108 if (entity == NULL) {
109 uint32_t value = (uint32_t) attr->immediate_value;
110 be_emit_irprintf("%%hi(0x%X)", value);
112 if (get_entity_owner(entity) == get_tls_type()) {
113 be_emit_cstring("%tle_hix22(");
115 be_emit_cstring("%hi(");
117 be_gas_emit_entity(entity);
118 if (attr->immediate_value != 0) {
119 be_emit_irprintf("%+d", attr->immediate_value);
125 static void sparc_emit_source_register(ir_node const *node, int const pos)
127 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
129 be_emit_string(arch_register_get_name(reg));
132 static void sparc_emit_dest_register(ir_node const *const node, int const pos)
134 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
136 be_emit_string(arch_register_get_name(reg));
142 static void sparc_emit_offset(const ir_node *node, int offset_node_pos)
144 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
146 if (attr->is_reg_reg) {
147 assert(!attr->is_frame_entity);
148 assert(attr->base.immediate_value == 0);
149 assert(attr->base.immediate_value_entity == NULL);
151 sparc_emit_source_register(node, offset_node_pos);
152 } else if (attr->is_frame_entity) {
153 int32_t offset = attr->base.immediate_value;
155 assert(sparc_is_value_imm_encodeable(offset));
156 be_emit_irprintf("%+ld", offset);
158 } else if (attr->base.immediate_value != 0
159 || attr->base.immediate_value_entity != NULL) {
161 sparc_emit_immediate(node);
168 static void sparc_emit_load_mode(ir_node const *const node)
170 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
171 ir_mode *mode = attr->load_store_mode;
172 int bits = get_mode_size_bits(mode);
173 bool is_signed = mode_is_signed(mode);
176 case 8: be_emit_string(is_signed ? "sb" : "ub"); break;
177 case 16: be_emit_string(is_signed ? "sh" : "uh"); break;
179 case 64: be_emit_char('d'); break;
180 case 128: be_emit_char('q'); break;
181 default: panic("invalid load/store mode %+F", mode);
186 * Emit store mode char
188 static void sparc_emit_store_mode(ir_node const *const node)
190 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
191 ir_mode *mode = attr->load_store_mode;
192 int bits = get_mode_size_bits(mode);
195 case 8: be_emit_char('b'); break;
196 case 16: be_emit_char('h'); break;
198 case 64: be_emit_char('d'); break;
199 case 128: be_emit_char('q'); break;
200 default: panic("invalid load/store mode %+F", mode);
204 static void emit_fp_suffix(const ir_mode *mode)
206 assert(mode_is_float(mode));
207 switch (get_mode_size_bits(mode)) {
208 case 32: be_emit_char('s'); break;
209 case 64: be_emit_char('d'); break;
210 case 128: be_emit_char('q'); break;
211 default: panic("invalid FP mode");
215 static ir_node *get_jump_target(const ir_node *jump)
217 return (ir_node*)get_irn_link(jump);
221 * Returns the target label for a control flow node.
223 static void sparc_emit_cfop_target(const ir_node *node)
225 ir_node *block = get_jump_target(node);
226 be_gas_emit_block_name(block);
230 * returns true if a sparc_call calls a register and not an immediate
232 static bool is_sparc_reg_call(const ir_node *node)
234 const sparc_attr_t *attr = get_sparc_attr_const(node);
235 return attr->immediate_value_entity == NULL;
238 static int get_sparc_Call_dest_addr_pos(const ir_node *node)
240 assert(is_sparc_reg_call(node));
241 return get_irn_arity(node)-1;
244 static bool ba_is_fallthrough(const ir_node *node)
246 ir_node *block = get_nodes_block(node);
247 ir_node *next_block = (ir_node*)get_irn_link(block);
248 return get_irn_link(node) == next_block;
251 static bool is_no_instruction(const ir_node *node)
253 /* copies are nops if src_reg == dest_reg */
254 if (be_is_Copy(node) || be_is_CopyKeep(node)) {
255 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
256 const arch_register_t *dest_reg = arch_get_irn_register_out(node, 0);
258 if (src_reg == dest_reg)
261 if (be_is_IncSP(node) && be_get_IncSP_offset(node) == 0)
263 /* Ba is not emitted if it is a simple fallthrough */
264 if (is_sparc_Ba(node) && ba_is_fallthrough(node))
267 return be_is_Keep(node) || be_is_Start(node) || is_Phi(node);
270 static bool has_delay_slot(const ir_node *node)
272 if (is_sparc_Ba(node)) {
273 return !ba_is_fallthrough(node);
276 return arch_get_irn_flags(node) & sparc_arch_irn_flag_has_delay_slot;
279 /** returns true if the emitter for this sparc node can produce more than one
280 * actual sparc instruction.
281 * Usually it is a bad sign if we have to add instructions here. We should
282 * rather try to get them lowered down. So we can actually put them into
283 * delay slots and make them more accessible to the scheduler.
285 static bool emits_multiple_instructions(const ir_node *node)
287 if (has_delay_slot(node))
290 if (is_sparc_Call(node)) {
291 return arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return;
294 return is_sparc_SMulh(node) || is_sparc_UMulh(node)
295 || is_sparc_SDiv(node) || is_sparc_UDiv(node)
296 || be_is_MemPerm(node) || be_is_Perm(node)
297 || is_sparc_SubSP(node);
300 static bool uses_reg(const ir_node *node, const arch_register_t *reg)
302 int arity = get_irn_arity(node);
303 for (int i = 0; i < arity; ++i) {
304 const arch_register_t *in_reg = arch_get_irn_register_in(node, i);
311 static bool writes_reg(const ir_node *node, const arch_register_t *reg)
313 unsigned n_outs = arch_get_irn_n_outs(node);
314 for (unsigned o = 0; o < n_outs; ++o) {
315 const arch_register_t *out_reg = arch_get_irn_register_out(node, o);
322 static bool can_move_into_delayslot(const ir_node *node, const ir_node *to)
324 if (!be_can_move_before(heights, node, to))
327 if (is_sparc_Call(to)) {
329 /** all deps are used after the delay slot so, we're fine */
330 if (!is_sparc_reg_call(to))
333 check = get_irn_n(to, get_sparc_Call_dest_addr_pos(to));
334 if (skip_Proj(check) == node)
337 /* the Call also destroys the value of %o7, but since this is
338 * currently marked as ignore register in the backend, it
339 * should never be used by the instruction in the delay slot. */
340 if (uses_reg(node, &sparc_registers[REG_O7]))
343 } else if (is_sparc_Return(to)) {
344 /* return uses the value of %o7, all other values are not
345 * immediately used */
346 if (writes_reg(node, &sparc_registers[REG_O7]))
350 /* the node must not use our computed values */
351 int arity = get_irn_arity(to);
352 for (int i = 0; i < arity; ++i) {
353 ir_node *in = get_irn_n(to, i);
354 if (skip_Proj(in) == node)
362 * search for an instruction that can fill the delay slot of @p node
364 static const ir_node *pick_delay_slot_for(const ir_node *node)
366 const ir_node *schedpoint = node;
368 /* currently we don't track which registers are still alive, so we can't
369 * pick any other instructions other than the one directly preceding */
370 static const unsigned PICK_DELAY_SLOT_MAX_DISTANCE = 10;
372 assert(has_delay_slot(node));
374 while (sched_has_prev(schedpoint)) {
375 schedpoint = sched_prev(schedpoint);
377 if (has_delay_slot(schedpoint))
380 /* skip things which don't really result in instructions */
381 if (is_no_instruction(schedpoint))
384 if (tries++ >= PICK_DELAY_SLOT_MAX_DISTANCE)
387 if (emits_multiple_instructions(schedpoint))
390 if (!can_move_into_delayslot(schedpoint, node))
393 /* found something */
400 void sparc_emitf(ir_node const *const node, char const *fmt, ...)
406 char const *start = fmt;
408 while (*fmt != '%' && *fmt != '\0')
410 be_emit_string_len(start, fmt - start);
427 if (*fmt < '0' || '9' <= *fmt)
429 sparc_emit_dest_register(node, *fmt++ - '0');
433 sparc_attr_t const *const attr = get_sparc_attr_const(node);
434 be_gas_emit_entity(attr->immediate_value_entity);
435 if (attr->immediate_value != 0) {
436 be_emit_irprintf(plus ? "%+d" : "%d", attr->immediate_value);
444 case 'D': mode = get_sparc_fp_conv_attr_const(node)->dest_mode; break;
445 case 'M': mode = get_sparc_fp_attr_const(node)->fp_mode; break;
446 case 'S': mode = get_sparc_fp_conv_attr_const(node)->src_mode; break;
447 default: goto unknown;
449 emit_fp_suffix(mode);
454 sparc_emit_high_immediate(node);
458 sparc_emit_cfop_target(node);
463 case 'L': sparc_emit_load_mode(node); break;
464 case 'S': sparc_emit_store_mode(node); break;
465 default: goto unknown;
470 if (*fmt < '0' || '9' <= *fmt)
472 sparc_emit_offset(node, *fmt++ - '0');
476 arch_register_t const *const reg = va_arg(ap, const arch_register_t*);
478 be_emit_string(arch_register_get_name(reg));
488 if (*fmt < '0' || '9' <= *fmt)
490 unsigned const pos = *fmt++ - '0';
491 if (imm && arch_get_irn_flags(node) & (arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) {
492 sparc_emit_immediate(node);
494 sparc_emit_source_register(node, pos);
500 int const num = va_arg(ap, int);
501 be_emit_irprintf(plus ? "%+d" : "%d", num);
506 char const *const str = va_arg(ap, char const*);
512 unsigned const num = va_arg(ap, unsigned);
513 be_emit_irprintf(plus ? "%+u" : "%u", num);
519 panic("unknown format conversion in sparc_emitf()");
522 be_emit_finish_line_gas(node);
527 * Emits code for stack space management
529 static void emit_be_IncSP(const ir_node *irn)
531 int offset = be_get_IncSP_offset(irn);
536 /* SPARC stack grows downwards */
537 char const *const insn = offset > 0 ? offset = -offset, "add" : "sub";
538 sparc_emitf(irn, "%s %S0, %d, %D0", insn, offset);
542 * Emits code for stack space management.
544 static void emit_sparc_SubSP(const ir_node *irn)
546 sparc_emitf(irn, "sub %S0, %SI1, %D0");
547 sparc_emitf(irn, "add %S0, %u, %D1", SPARC_MIN_STACKSIZE);
550 static void fill_delay_slot(void)
552 emitting_delay_slot = true;
553 if (delay_slot_filler != NULL) {
554 sparc_emit_node(delay_slot_filler);
555 delay_slot_filler = NULL;
557 sparc_emitf(NULL, "nop");
559 emitting_delay_slot = false;
562 static void emit_sparc_Div(const ir_node *node, char const *const insn)
564 sparc_emitf(node, "wr %S0, 0, %%y");
566 /* TODO: we should specify number of delayslots in an architecture
568 unsigned wry_delay_count = 3;
569 for (unsigned i = 0; i < wry_delay_count; ++i) {
573 sparc_emitf(node, "%s %S1, %SI2, %D0", insn);
576 static void emit_sparc_SDiv(const ir_node *node)
578 emit_sparc_Div(node, "sdiv");
581 static void emit_sparc_UDiv(const ir_node *node)
583 emit_sparc_Div(node, "udiv");
586 static void emit_sparc_Call(const ir_node *node)
588 if (is_sparc_reg_call(node)) {
589 int dest_addr = get_sparc_Call_dest_addr_pos(node);
590 sparc_emitf(node, "call %R", arch_get_irn_register_in(node, dest_addr));
592 sparc_emitf(node, "call %E, 0");
597 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return) {
598 sparc_emitf(NULL, "unimp 8");
602 static void emit_be_Perm(const ir_node *irn)
604 sparc_emitf(irn, "xor %S1, %S0, %S0");
605 sparc_emitf(irn, "xor %S1, %S0, %S1");
606 sparc_emitf(irn, "xor %S1, %S0, %S0");
609 /* The stack pointer must always be SPARC_STACK_ALIGNMENT bytes aligned, so get
610 * the next bigger integer that's evenly divisible by it. */
611 static unsigned get_aligned_sp_change(const unsigned num_regs)
613 const unsigned bytes = num_regs * SPARC_REGISTER_SIZE;
614 return round_up2(bytes, SPARC_STACK_ALIGNMENT);
617 /* Spill register l0 or both l0 and l1, depending on n_spilled and n_to_spill.*/
618 static void memperm_emit_spill_registers(const ir_node *node, int n_spilled,
621 assert(n_spilled < n_to_spill);
623 if (n_spilled == 0) {
624 /* We always reserve stack space for two registers because during copy
625 * processing we don't know yet if we also need to handle a cycle which
626 * needs two registers. More complicated code in emit_MemPerm would
627 * prevent wasting SPARC_REGISTER_SIZE bytes of stack space but
628 * it is not worth the worse readability of emit_MemPerm. */
630 /* Keep stack pointer aligned. */
631 unsigned sp_change = get_aligned_sp_change(2);
632 sparc_emitf(node, "sub %%sp, %u, %%sp", sp_change);
634 /* Spill register l0. */
635 sparc_emitf(node, "st %%l0, [%%sp%+d]", SPARC_MIN_STACKSIZE);
638 if (n_to_spill == 2) {
639 /* Spill register l1. */
640 sparc_emitf(node, "st %%l1, [%%sp%+d]", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
644 /* Restore register l0 or both l0 and l1, depending on n_spilled. */
645 static void memperm_emit_restore_registers(const ir_node *node, int n_spilled)
647 if (n_spilled == 2) {
648 /* Restore register l1. */
649 sparc_emitf(node, "ld [%%sp%+d], %%l1", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
652 /* Restore register l0. */
653 sparc_emitf(node, "ld [%%sp%+d], %%l0", SPARC_MIN_STACKSIZE);
655 /* Restore stack pointer. */
656 unsigned sp_change = get_aligned_sp_change(2);
657 sparc_emitf(node, "add %%sp, %u, %%sp", sp_change);
660 /* Emit code to copy in_ent to out_ent. Only uses l0. */
661 static void memperm_emit_copy(const ir_node *node, ir_entity *in_ent,
664 ir_graph *irg = get_irn_irg(node);
665 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
666 int off_in = be_get_stack_entity_offset(layout, in_ent, 0);
667 int off_out = be_get_stack_entity_offset(layout, out_ent, 0);
669 /* Load from input entity. */
670 sparc_emitf(node, "ld [%%fp%+d], %%l0", off_in);
671 /* Store to output entity. */
672 sparc_emitf(node, "st %%l0, [%%fp%+d]", off_out);
675 /* Emit code to swap ent1 and ent2. Uses l0 and l1. */
676 static void memperm_emit_swap(const ir_node *node, ir_entity *ent1,
679 ir_graph *irg = get_irn_irg(node);
680 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
681 int off1 = be_get_stack_entity_offset(layout, ent1, 0);
682 int off2 = be_get_stack_entity_offset(layout, ent2, 0);
684 /* Load from first input entity. */
685 sparc_emitf(node, "ld [%%fp%+d], %%l0", off1);
686 /* Load from second input entity. */
687 sparc_emitf(node, "ld [%%fp%+d], %%l1", off2);
688 /* Store first value to second output entity. */
689 sparc_emitf(node, "st %%l0, [%%fp%+d]", off2);
690 /* Store second value to first output entity. */
691 sparc_emitf(node, "st %%l1, [%%fp%+d]", off1);
694 /* Find the index of ent in ents or return -1 if not found. */
695 static int get_index(ir_entity **ents, int n, ir_entity *ent)
697 for (int i = 0; i < n; ++i) {
706 * Emit code for a MemPerm node.
708 * Analyze MemPerm for copy chains and cyclic swaps and resolve them using
710 * This function is conceptually very similar to permute_values in
713 static void emit_be_MemPerm(const ir_node *node)
715 int memperm_arity = be_get_MemPerm_entity_arity(node);
716 /* Upper limit for the number of participating entities is twice the
717 * arity, e.g., for a simple copying MemPerm node with one input/output. */
718 int max_size = 2 * memperm_arity;
719 ir_entity **entities = ALLOCANZ(ir_entity *, max_size);
720 /* sourceof contains the input entity for each entity. If an entity is
721 * never used as an output, its entry in sourceof is a fix point. */
722 int *sourceof = ALLOCANZ(int, max_size);
723 /* n_users counts how many output entities use this entity as their input.*/
724 int *n_users = ALLOCANZ(int, max_size);
725 /* n_spilled records the number of spilled registers, either 1 or 2. */
728 /* This implementation currently only works with frame pointers. */
729 ir_graph *irg = get_irn_irg(node);
730 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
731 assert(!layout->sp_relative && "MemPerms currently do not work without frame pointers");
733 for (int i = 0; i < max_size; ++i) {
738 for (int i = 0; i < memperm_arity; ++i) {
739 ir_entity *out = be_get_MemPerm_out_entity(node, i);
740 ir_entity *in = be_get_MemPerm_in_entity(node, i);
742 /* Insert into entities to be able to operate on unique indices. */
743 if (get_index(entities, n, out) == -1)
745 if (get_index(entities, n, in) == -1)
748 int oidx = get_index(entities, n, out);
749 int iidx = get_index(entities, n, in);
751 sourceof[oidx] = iidx; /* Remember the source. */
752 ++n_users[iidx]; /* Increment number of users of this entity. */
755 /* First do all the copies. */
756 for (int oidx = 0; oidx < n; /* empty */) {
757 int iidx = sourceof[oidx];
759 /* Nothing to do for fix points.
760 * Also, if entities[oidx] is used as an input by another copy, we
761 * can't overwrite entities[oidx] yet.*/
762 if (iidx == oidx || n_users[oidx] > 0) {
767 /* We found the end of a 'chain', so do the copy. */
768 if (n_spilled == 0) {
769 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/1);
772 memperm_emit_copy(node, entities[iidx], entities[oidx]);
775 sourceof[oidx] = oidx;
777 assert(n_users[iidx] > 0);
778 /* Decrementing the number of users might enable us to do another
782 if (iidx < oidx && n_users[iidx] == 0) {
789 /* The rest are cycles. */
790 for (int oidx = 0; oidx < n; /* empty */) {
791 int iidx = sourceof[oidx];
793 /* Nothing to do for fix points. */
799 assert(n_users[iidx] == 1);
801 /* Swap the two values to resolve the cycle. */
803 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/2);
806 memperm_emit_swap(node, entities[iidx], entities[oidx]);
808 int tidx = sourceof[iidx];
810 sourceof[iidx] = iidx;
812 /* The source of oidx is now the old source of iidx, because we swapped
813 * the two entities. */
814 sourceof[oidx] = tidx;
818 /* Only fix points should remain. */
819 for (int i = 0; i < max_size; ++i) {
820 assert(sourceof[i] == i);
824 assert(n_spilled > 0 && "Useless MemPerm node");
826 memperm_emit_restore_registers(node, n_spilled);
829 static void emit_sparc_Return(const ir_node *node)
831 ir_graph *irg = get_irn_irg(node);
832 ir_entity *entity = get_irg_entity(irg);
833 ir_type *type = get_entity_type(entity);
835 const char *destreg = "%o7";
837 /* hack: we don't explicitely model register changes because of the
838 * restore node. So we have to do it manually here */
839 if (delay_slot_filler != NULL &&
840 (is_sparc_Restore(delay_slot_filler)
841 || is_sparc_RestoreZero(delay_slot_filler))) {
844 char const *const offset = get_method_calling_convention(type) & cc_compound_ret ? "12" : "8";
845 sparc_emitf(node, "jmp %s+%s", destreg, offset);
849 static const arch_register_t *map_i_to_o_reg(const arch_register_t *reg)
851 unsigned idx = reg->global_index;
852 if (idx < REG_I0 || idx > REG_I7)
854 idx += REG_O0 - REG_I0;
855 assert(REG_O0 <= idx && idx <= REG_O7);
856 return &sparc_registers[idx];
859 static void emit_sparc_Restore(const ir_node *node)
861 const arch_register_t *destreg
862 = arch_get_irn_register_out(node, pn_sparc_Restore_res);
863 sparc_emitf(node, "restore %S2, %SI3, %R", map_i_to_o_reg(destreg));
866 static void emit_sparc_FrameAddr(const ir_node *node)
868 const sparc_attr_t *attr = get_sparc_attr_const(node);
869 int32_t offset = attr->immediate_value;
871 char const *const insn = offset > 0 ? offset = -offset, "sub" : "add";
872 assert(sparc_is_value_imm_encodeable(offset));
873 sparc_emitf(node, "%s %S0, %d, %D0", insn, offset);
876 static const char *get_icc_unsigned(ir_relation relation)
878 switch (relation & (ir_relation_less_equal_greater)) {
879 case ir_relation_false: return "bn";
880 case ir_relation_equal: return "be";
881 case ir_relation_less: return "blu";
882 case ir_relation_less_equal: return "bleu";
883 case ir_relation_greater: return "bgu";
884 case ir_relation_greater_equal: return "bgeu";
885 case ir_relation_less_greater: return "bne";
886 case ir_relation_less_equal_greater: return "ba";
887 default: panic("Cmp has unsupported relation");
891 static const char *get_icc_signed(ir_relation relation)
893 switch (relation & (ir_relation_less_equal_greater)) {
894 case ir_relation_false: return "bn";
895 case ir_relation_equal: return "be";
896 case ir_relation_less: return "bl";
897 case ir_relation_less_equal: return "ble";
898 case ir_relation_greater: return "bg";
899 case ir_relation_greater_equal: return "bge";
900 case ir_relation_less_greater: return "bne";
901 case ir_relation_less_equal_greater: return "ba";
902 default: panic("Cmp has unsupported relation");
906 static const char *get_fcc(ir_relation relation)
909 case ir_relation_false: return "fbn";
910 case ir_relation_equal: return "fbe";
911 case ir_relation_less: return "fbl";
912 case ir_relation_less_equal: return "fble";
913 case ir_relation_greater: return "fbg";
914 case ir_relation_greater_equal: return "fbge";
915 case ir_relation_less_greater: return "fblg";
916 case ir_relation_less_equal_greater: return "fbo";
917 case ir_relation_unordered: return "fbu";
918 case ir_relation_unordered_equal: return "fbue";
919 case ir_relation_unordered_less: return "fbul";
920 case ir_relation_unordered_less_equal: return "fbule";
921 case ir_relation_unordered_greater: return "fbug";
922 case ir_relation_unordered_greater_equal: return "fbuge";
923 case ir_relation_unordered_less_greater: return "fbne";
924 case ir_relation_true: return "fba";
926 panic("invalid relation");
929 typedef const char* (*get_cc_func)(ir_relation relation);
931 static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
933 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
934 ir_relation relation = attr->relation;
935 const ir_node *proj_true = NULL;
936 const ir_node *proj_false = NULL;
938 foreach_out_edge(node, edge) {
939 ir_node *proj = get_edge_src_irn(edge);
940 long nr = get_Proj_proj(proj);
941 if (nr == pn_Cond_true) {
948 /* for now, the code works for scheduled and non-schedules blocks */
949 const ir_node *block = get_nodes_block(node);
951 /* we have a block schedule */
952 const ir_node *next_block = (ir_node*)get_irn_link(block);
954 if (get_irn_link(proj_true) == next_block) {
955 /* exchange both proj's so the second one can be omitted */
956 const ir_node *t = proj_true;
958 proj_true = proj_false;
960 relation = get_negated_relation(relation);
963 /* emit the true proj */
964 sparc_emitf(proj_true, "%s %L", get_cc(relation));
967 if (get_irn_link(proj_false) == next_block) {
968 if (be_options.verbose_asm) {
969 sparc_emitf(proj_false, "/* fallthrough to %L */");
972 sparc_emitf(proj_false, "ba %L");
977 static void emit_sparc_Bicc(const ir_node *node)
979 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
980 bool is_unsigned = attr->is_unsigned;
981 emit_sparc_branch(node, is_unsigned ? get_icc_unsigned : get_icc_signed);
984 static void emit_sparc_fbfcc(const ir_node *node)
986 /* if the flags producing node was immediately in front of us, emit
988 ir_node *flags = get_irn_n(node, n_sparc_fbfcc_flags);
989 ir_node *prev = sched_prev(node);
990 if (is_Block(prev)) {
991 /* TODO: when the flags come from another block, then we have to do
992 * more complicated tests to see wether the flag producing node is
993 * potentially in front of us (could happen for fallthroughs) */
994 panic("TODO: fbfcc flags come from other block");
996 if (skip_Proj(flags) == prev) {
997 sparc_emitf(NULL, "nop");
999 emit_sparc_branch(node, get_fcc);
1002 static void emit_sparc_Ba(const ir_node *node)
1004 if (ba_is_fallthrough(node)) {
1005 if (be_options.verbose_asm) {
1006 sparc_emitf(node, "/* fallthrough to %L */");
1009 sparc_emitf(node, "ba %L");
1014 static void emit_sparc_SwitchJmp(const ir_node *node)
1016 const sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr_const(node);
1018 sparc_emitf(node, "jmp %S0");
1021 be_emit_jump_table(node, attr->table, attr->table_entity, get_jump_target);
1024 static void emit_fmov(const ir_node *node, const arch_register_t *src_reg,
1025 const arch_register_t *dst_reg)
1027 sparc_emitf(node, "fmovs %R, %R", src_reg, dst_reg);
1030 static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
1032 unsigned idx = reg->global_index;
1033 assert(reg == &sparc_registers[idx]);
1035 assert(idx - REG_F0 < N_sparc_fp_REGS);
1036 return &sparc_registers[idx];
1039 static void emit_be_Copy(const ir_node *node)
1041 ir_mode *mode = get_irn_mode(node);
1042 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
1043 const arch_register_t *dst_reg = arch_get_irn_register_out(node, 0);
1045 if (src_reg == dst_reg)
1048 if (mode_is_float(mode)) {
1049 unsigned bits = get_mode_size_bits(mode);
1050 int n = bits > 32 ? bits > 64 ? 3 : 1 : 0;
1051 emit_fmov(node, src_reg, dst_reg);
1052 for (int i = 0; i < n; ++i) {
1053 src_reg = get_next_fp_reg(src_reg);
1054 dst_reg = get_next_fp_reg(dst_reg);
1055 emit_fmov(node, src_reg, dst_reg);
1057 } else if (mode_is_data(mode)) {
1058 sparc_emitf(node, "mov %S0, %D0");
1060 panic("invalid mode");
1064 static void emit_nothing(const ir_node *irn)
1069 typedef void (*emit_func) (const ir_node *);
1071 static inline void set_emitter(ir_op *op, emit_func sparc_emit_node)
1073 op->ops.generic = (op_func)sparc_emit_node;
1077 * Enters the emitter functions for handled nodes into the generic
1078 * pointer of an opcode.
1080 static void sparc_register_emitters(void)
1082 /* first clear the generic function pointer for all ops */
1083 ir_clear_opcodes_generic_func();
1084 /* register all emitter functions defined in spec */
1085 sparc_register_spec_emitters();
1087 /* custom emitter */
1088 set_emitter(op_be_Copy, emit_be_Copy);
1089 set_emitter(op_be_CopyKeep, emit_be_Copy);
1090 set_emitter(op_be_IncSP, emit_be_IncSP);
1091 set_emitter(op_be_MemPerm, emit_be_MemPerm);
1092 set_emitter(op_be_Perm, emit_be_Perm);
1093 set_emitter(op_sparc_Ba, emit_sparc_Ba);
1094 set_emitter(op_sparc_Bicc, emit_sparc_Bicc);
1095 set_emitter(op_sparc_Call, emit_sparc_Call);
1096 set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc);
1097 set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr);
1098 set_emitter(op_sparc_SubSP, emit_sparc_SubSP);
1099 set_emitter(op_sparc_Restore, emit_sparc_Restore);
1100 set_emitter(op_sparc_Return, emit_sparc_Return);
1101 set_emitter(op_sparc_SDiv, emit_sparc_SDiv);
1102 set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp);
1103 set_emitter(op_sparc_UDiv, emit_sparc_UDiv);
1105 /* no need to emit anything for the following nodes */
1106 set_emitter(op_be_Keep, emit_nothing);
1107 set_emitter(op_sparc_Start, emit_nothing);
1108 set_emitter(op_Phi, emit_nothing);
1112 * Emits code for a node.
1114 static void sparc_emit_node(const ir_node *node)
1116 ir_op *op = get_irn_op(node);
1118 if (op->ops.generic) {
1119 emit_func func = (emit_func) op->ops.generic;
1120 be_dwarf_location(get_irn_dbg_info(node));
1123 panic("No emit handler for node %+F (graph %+F)\n", node,
1128 static ir_node *find_next_delay_slot(ir_node *from)
1130 ir_node *schedpoint = from;
1131 while (!has_delay_slot(schedpoint)) {
1132 if (!sched_has_next(schedpoint))
1134 schedpoint = sched_next(schedpoint);
1139 static bool block_needs_label(const ir_node *block, const ir_node *sched_prev)
1141 if (get_Block_entity(block) != NULL)
1144 int n_cfgpreds = get_Block_n_cfgpreds(block);
1145 if (n_cfgpreds == 0) {
1147 } else if (n_cfgpreds > 1) {
1150 ir_node *cfgpred = get_Block_cfgpred(block, 0);
1151 ir_node *cfgpred_block = get_nodes_block(cfgpred);
1152 if (is_Proj(cfgpred) && is_sparc_SwitchJmp(get_Proj_pred(cfgpred)))
1154 return sched_prev != cfgpred_block || get_irn_link(cfgpred) != block;
1159 * Walks over the nodes in a block connected by scheduling edges
1160 * and emits code for each node.
1162 static void sparc_emit_block(ir_node *block, ir_node *prev)
1164 bool needs_label = block_needs_label(block, prev);
1166 be_gas_begin_block(block, needs_label);
1168 ir_node *next_delay_slot = find_next_delay_slot(sched_first(block));
1169 if (next_delay_slot != NULL)
1170 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1172 sched_foreach(block, node) {
1173 if (node == delay_slot_filler) {
1177 sparc_emit_node(node);
1179 if (node == next_delay_slot) {
1180 assert(delay_slot_filler == NULL);
1181 next_delay_slot = find_next_delay_slot(sched_next(node));
1182 if (next_delay_slot != NULL)
1183 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1189 * Emits code for function start.
1191 static void sparc_emit_func_prolog(ir_graph *irg)
1193 ir_entity *entity = get_irg_entity(irg);
1194 be_gas_emit_function_prolog(entity, 4, NULL);
1198 * Emits code for function end
1200 static void sparc_emit_func_epilog(ir_graph *irg)
1202 ir_entity *entity = get_irg_entity(irg);
1203 be_gas_emit_function_epilog(entity);
1206 static void sparc_gen_labels(ir_node *block, void *env)
1210 int n = get_Block_n_cfgpreds(block);
1211 for (n--; n >= 0; n--) {
1212 ir_node *pred = get_Block_cfgpred(block, n);
1213 set_irn_link(pred, block); // link the pred of a block (which is a jmp)
1217 void sparc_emit_routine(ir_graph *irg)
1219 heights = heights_new(irg);
1221 /* register all emitter functions */
1222 sparc_register_emitters();
1224 /* create the block schedule. For now, we don't need it earlier. */
1225 ir_node **block_schedule = be_create_block_schedule(irg);
1227 sparc_emit_func_prolog(irg);
1228 irg_block_walk_graph(irg, sparc_gen_labels, NULL, NULL);
1230 /* inject block scheduling links & emit code of each block */
1231 size_t n = ARR_LEN(block_schedule);
1232 for (size_t i = 0; i < n; ++i) {
1233 ir_node *block = block_schedule[i];
1234 ir_node *next_block = i+1 < n ? block_schedule[i+1] : NULL;
1235 set_irn_link(block, next_block);
1238 for (size_t i = 0; i < n; ++i) {
1239 ir_node *block = block_schedule[i];
1240 ir_node *prev = i>=1 ? block_schedule[i-1] : NULL;
1241 if (block == get_irg_end_block(irg))
1243 sparc_emit_block(block, prev);
1246 /* emit function epilog */
1247 sparc_emit_func_epilog(irg);
1249 heights_free(heights);
1252 void sparc_init_emitter(void)
1254 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.emit");