2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
23 * @author Hannes Rapp, Matthias Braun
30 #include "bitfiddle.h"
42 #include "raw_bitset.h"
47 #include "beblocksched.h"
50 #include "be_dbgout.h"
53 #include "bepeephole.h"
55 #include "sparc_emitter.h"
56 #include "gen_sparc_emitter.h"
57 #include "sparc_nodes_attr.h"
58 #include "sparc_new_nodes.h"
59 #include "gen_sparc_regalloc_if.h"
61 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
63 static ir_heights_t *heights;
64 static const ir_node *delay_slot_filler; /**< this node has been choosen to fill
65 the next delay slot */
67 static void sparc_emit_node(const ir_node *node);
68 static bool emitting_delay_slot;
70 void sparc_emit_indent(void)
73 if (emitting_delay_slot)
77 void sparc_emit_immediate(const ir_node *node)
79 const sparc_attr_t *attr = get_sparc_attr_const(node);
80 ir_entity *entity = attr->immediate_value_entity;
83 int32_t value = attr->immediate_value;
84 assert(sparc_is_value_imm_encodeable(value));
85 be_emit_irprintf("%d", value);
87 if (get_entity_owner(entity) == get_tls_type()) {
88 be_emit_cstring("%tle_lox10(");
90 be_emit_cstring("%lo(");
92 be_gas_emit_entity(entity);
93 if (attr->immediate_value != 0) {
94 be_emit_irprintf("%+d", attr->immediate_value);
100 void sparc_emit_high_immediate(const ir_node *node)
102 const sparc_attr_t *attr = get_sparc_attr_const(node);
103 ir_entity *entity = attr->immediate_value_entity;
105 if (entity == NULL) {
106 uint32_t value = (uint32_t) attr->immediate_value;
107 be_emit_irprintf("%%hi(0x%X)", value);
109 if (get_entity_owner(entity) == get_tls_type()) {
110 be_emit_cstring("%tle_hix22(");
112 be_emit_cstring("%hi(");
114 be_gas_emit_entity(entity);
115 if (attr->immediate_value != 0) {
116 be_emit_irprintf("%+d", attr->immediate_value);
122 void sparc_emit_source_register(const ir_node *node, int pos)
124 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
126 be_emit_string(arch_register_get_name(reg));
129 void sparc_emit_dest_register(const ir_node *node, int pos)
131 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
133 be_emit_string(arch_register_get_name(reg));
137 * Emits either a imm or register depending on arity of node
139 * @param register no (-1 if no register)
141 void sparc_emit_reg_or_imm(const ir_node *node, int pos)
143 if (arch_get_irn_flags(node) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
144 // we have a imm input
145 sparc_emit_immediate(node);
148 sparc_emit_source_register(node, pos);
155 void sparc_emit_offset(const ir_node *node, int offset_node_pos)
157 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
159 if (attr->is_reg_reg) {
160 assert(!attr->is_frame_entity);
161 assert(attr->base.immediate_value == 0);
162 assert(attr->base.immediate_value_entity == NULL);
164 sparc_emit_source_register(node, offset_node_pos);
165 } else if (attr->is_frame_entity) {
166 int32_t offset = attr->base.immediate_value;
168 assert(sparc_is_value_imm_encodeable(offset));
169 be_emit_irprintf("%+ld", offset);
171 } else if (attr->base.immediate_value != 0
172 || attr->base.immediate_value_entity != NULL) {
174 sparc_emit_immediate(node);
178 void sparc_emit_source_reg_and_offset(const ir_node *node, int regpos,
181 const arch_register_t *reg = arch_get_irn_register_in(node, regpos);
182 const sparc_load_store_attr_t *attr;
185 if (reg == &sparc_registers[REG_SP]) {
186 attr = get_sparc_load_store_attr_const(node);
187 if (!attr->is_reg_reg
188 && attr->base.immediate_value < SPARC_SAVE_AREA_SIZE) {
190 ir_fprintf(stderr, "warning: emitting stack pointer relative load/store with offset < %d\n", SPARC_SAVE_AREA_SIZE);
195 sparc_emit_source_register(node, regpos);
196 sparc_emit_offset(node, offpos);
199 void sparc_emit_float_load_store_mode(const ir_node *node)
201 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
202 ir_mode *mode = attr->load_store_mode;
203 int bits = get_mode_size_bits(mode);
205 assert(mode_is_float(mode));
209 case 64: be_emit_char('d'); return;
210 case 128: be_emit_char('q'); return;
212 panic("invalid float load/store mode %+F", mode);
216 * Emit load mode char
218 void sparc_emit_load_mode(const ir_node *node)
220 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
221 ir_mode *mode = attr->load_store_mode;
222 int bits = get_mode_size_bits(mode);
223 bool is_signed = mode_is_signed(mode);
226 be_emit_string(is_signed ? "sh" : "uh");
227 } else if (bits == 8) {
228 be_emit_string(is_signed ? "sb" : "ub");
229 } else if (bits == 64) {
237 * Emit store mode char
239 void sparc_emit_store_mode(const ir_node *node)
241 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
242 ir_mode *mode = attr->load_store_mode;
243 int bits = get_mode_size_bits(mode);
247 } else if (bits == 8) {
249 } else if (bits == 64) {
256 static void emit_fp_suffix(const ir_mode *mode)
258 unsigned bits = get_mode_size_bits(mode);
259 assert(mode_is_float(mode));
263 } else if (bits == 64) {
265 } else if (bits == 128) {
268 panic("invalid FP mode");
272 void sparc_emit_fp_conv_source(const ir_node *node)
274 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
275 emit_fp_suffix(attr->src_mode);
278 void sparc_emit_fp_conv_destination(const ir_node *node)
280 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
281 emit_fp_suffix(attr->dest_mode);
285 * emits the FP mode suffix char
287 void sparc_emit_fp_mode_suffix(const ir_node *node)
289 const sparc_fp_attr_t *attr = get_sparc_fp_attr_const(node);
290 emit_fp_suffix(attr->fp_mode);
293 static ir_node *get_jump_target(const ir_node *jump)
295 return (ir_node*)get_irn_link(jump);
299 * Returns the target label for a control flow node.
301 static void sparc_emit_cfop_target(const ir_node *node)
303 ir_node *block = get_jump_target(node);
304 be_gas_emit_block_name(block);
308 * returns true if a sparc_call calls a register and not an immediate
310 static bool is_sparc_reg_call(const ir_node *node)
312 const sparc_attr_t *attr = get_sparc_attr_const(node);
313 return attr->immediate_value_entity == NULL;
316 static int get_sparc_Call_dest_addr_pos(const ir_node *node)
318 assert(is_sparc_reg_call(node));
319 return get_irn_arity(node)-1;
322 static bool ba_is_fallthrough(const ir_node *node)
324 ir_node *block = get_nodes_block(node);
325 ir_node *next_block = (ir_node*)get_irn_link(block);
326 return get_irn_link(node) == next_block;
329 static bool is_no_instruction(const ir_node *node)
331 /* copies are nops if src_reg == dest_reg */
332 if (be_is_Copy(node) || be_is_CopyKeep(node)) {
333 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
334 const arch_register_t *dest_reg = arch_get_irn_register_out(node, 0);
336 if (src_reg == dest_reg)
339 if (be_is_IncSP(node) && be_get_IncSP_offset(node) == 0)
341 /* Ba is not emitted if it is a simple fallthrough */
342 if (is_sparc_Ba(node) && ba_is_fallthrough(node))
345 return be_is_Keep(node) || be_is_Start(node) || is_Phi(node);
348 static bool has_delay_slot(const ir_node *node)
350 if (is_sparc_Ba(node)) {
351 return !ba_is_fallthrough(node);
354 return arch_get_irn_flags(node) & sparc_arch_irn_flag_has_delay_slot;
357 /** returns true if the emitter for this sparc node can produce more than one
358 * actual sparc instruction.
359 * Usually it is a bad sign if we have to add instructions here. We should
360 * rather try to get them lowered down. So we can actually put them into
361 * delay slots and make them more accessible to the scheduler.
363 static bool emits_multiple_instructions(const ir_node *node)
365 if (has_delay_slot(node))
368 if (is_sparc_Call(node)) {
369 return arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return;
372 return is_sparc_SMulh(node) || is_sparc_UMulh(node)
373 || is_sparc_SDiv(node) || is_sparc_UDiv(node)
374 || be_is_MemPerm(node) || be_is_Perm(node);
377 static bool uses_reg(const ir_node *node, const arch_register_t *reg)
379 int arity = get_irn_arity(node);
382 for (i = 0; i < arity; ++i) {
383 const arch_register_t *in_reg = arch_get_irn_register_in(node, i);
390 static bool writes_reg(const ir_node *node, const arch_register_t *reg)
392 unsigned n_outs = arch_get_irn_n_outs(node);
394 for (o = 0; o < n_outs; ++o) {
395 const arch_register_t *out_reg = arch_get_irn_register_out(node, o);
402 static bool can_move_into_delayslot(const ir_node *node, const ir_node *to)
404 if (!be_can_move_before(node, to))
407 if (is_sparc_Call(to)) {
409 /** all deps are used after the delay slot so, we're fine */
410 if (!is_sparc_reg_call(to))
413 check = get_irn_n(to, get_sparc_Call_dest_addr_pos(to));
414 if (skip_Proj(check) == node)
417 /* the Call also destroys the value of %o7, but since this is
418 * currently marked as ignore register in the backend, it
419 * should never be used by the instruction in the delay slot. */
420 if (uses_reg(node, &sparc_registers[REG_O7]))
423 } else if (is_sparc_Return(to)) {
424 /* return uses the value of %o7, all other values are not
425 * immediately used */
426 if (writes_reg(node, &sparc_registers[REG_O7]))
430 /* the node must not use our computed values */
431 int arity = get_irn_arity(to);
433 for (i = 0; i < arity; ++i) {
434 ir_node *in = get_irn_n(to, i);
435 if (skip_Proj(in) == node)
443 * search for an instruction that can fill the delay slot of @p node
445 static const ir_node *pick_delay_slot_for(const ir_node *node)
447 const ir_node *schedpoint = node;
449 /* currently we don't track which registers are still alive, so we can't
450 * pick any other instructions other than the one directly preceding */
451 static const unsigned PICK_DELAY_SLOT_MAX_DISTANCE = 10;
453 assert(has_delay_slot(node));
455 while (sched_has_prev(schedpoint)) {
456 schedpoint = sched_prev(schedpoint);
458 if (has_delay_slot(schedpoint))
461 /* skip things which don't really result in instructions */
462 if (is_no_instruction(schedpoint))
465 if (tries++ >= PICK_DELAY_SLOT_MAX_DISTANCE)
468 if (emits_multiple_instructions(schedpoint))
471 if (!can_move_into_delayslot(schedpoint, node))
474 /* found something */
482 * Emits code for stack space management
484 static void emit_be_IncSP(const ir_node *irn)
486 int offset = be_get_IncSP_offset(irn);
491 /* SPARC stack grows downwards */
494 be_emit_cstring("sub ");
497 be_emit_cstring("add ");
500 sparc_emit_source_register(irn, 0);
501 be_emit_irprintf(", %d", -offset);
502 be_emit_cstring(", ");
503 sparc_emit_dest_register(irn, 0);
504 be_emit_finish_line_gas(irn);
508 * emits code for mulh
510 static void emit_sparc_Mulh(const ir_node *irn)
513 if (is_sparc_UMulh(irn)) {
516 assert(is_sparc_SMulh(irn));
519 be_emit_cstring("mul ");
521 sparc_emit_source_register(irn, 0);
522 be_emit_cstring(", ");
523 sparc_emit_reg_or_imm(irn, 1);
524 be_emit_cstring(", ");
525 sparc_emit_dest_register(irn, 0);
526 be_emit_finish_line_gas(irn);
528 // our result is in the y register now
529 // we just copy it to the assigned target reg
531 be_emit_cstring("mov %y, ");
532 sparc_emit_dest_register(irn, 0);
533 be_emit_finish_line_gas(irn);
536 static void fill_delay_slot(void)
538 emitting_delay_slot = true;
539 if (delay_slot_filler != NULL) {
540 sparc_emit_node(delay_slot_filler);
541 delay_slot_filler = NULL;
544 be_emit_cstring("nop\n");
545 be_emit_write_line();
547 emitting_delay_slot = false;
550 static void emit_sparc_Div(const ir_node *node, bool is_signed)
552 /* can we get the delay count of the wr instruction somewhere? */
553 unsigned wry_delay_count = 3;
557 be_emit_cstring("wr ");
558 sparc_emit_source_register(node, 0);
559 be_emit_cstring(", 0, %y");
560 be_emit_finish_line_gas(node);
562 for (i = 0; i < wry_delay_count; ++i) {
567 be_emit_irprintf("%s ", is_signed ? "sdiv" : "udiv");
568 sparc_emit_source_register(node, 1);
569 be_emit_cstring(", ");
570 sparc_emit_reg_or_imm(node, 2);
571 be_emit_cstring(", ");
572 sparc_emit_dest_register(node, 0);
573 be_emit_finish_line_gas(node);
576 static void emit_sparc_SDiv(const ir_node *node)
578 emit_sparc_Div(node, true);
581 static void emit_sparc_UDiv(const ir_node *node)
583 emit_sparc_Div(node, false);
586 static void emit_sparc_Call(const ir_node *node)
589 be_emit_cstring("call ");
590 if (is_sparc_reg_call(node)) {
591 int dest_addr = get_sparc_Call_dest_addr_pos(node);
592 sparc_emit_source_register(node, dest_addr);
594 const sparc_attr_t *attr = get_sparc_attr_const(node);
595 ir_entity *entity = attr->immediate_value_entity;
596 be_gas_emit_entity(entity);
597 if (attr->immediate_value != 0) {
598 be_emit_irprintf("%+d", attr->immediate_value);
600 be_emit_cstring(", 0");
602 be_emit_finish_line_gas(node);
606 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return) {
608 be_emit_cstring("unimp 8\n");
609 be_emit_write_line();
613 static void emit_be_Perm(const ir_node *irn)
616 be_emit_cstring("xor ");
617 sparc_emit_source_register(irn, 1);
618 be_emit_cstring(", ");
619 sparc_emit_source_register(irn, 0);
620 be_emit_cstring(", ");
621 sparc_emit_source_register(irn, 0);
622 be_emit_finish_line_gas(NULL);
625 be_emit_cstring("xor ");
626 sparc_emit_source_register(irn, 1);
627 be_emit_cstring(", ");
628 sparc_emit_source_register(irn, 0);
629 be_emit_cstring(", ");
630 sparc_emit_source_register(irn, 1);
631 be_emit_finish_line_gas(NULL);
634 be_emit_cstring("xor ");
635 sparc_emit_source_register(irn, 1);
636 be_emit_cstring(", ");
637 sparc_emit_source_register(irn, 0);
638 be_emit_cstring(", ");
639 sparc_emit_source_register(irn, 0);
640 be_emit_finish_line_gas(irn);
643 /* The stack pointer must always be SPARC_STACK_ALIGNMENT bytes aligned, so get
644 * the next bigger integer that's evenly divisible by it. */
645 static unsigned get_aligned_sp_change(const unsigned num_regs)
647 const unsigned bytes = num_regs * SPARC_REGISTER_SIZE;
648 return round_up2(bytes, SPARC_STACK_ALIGNMENT);
651 /* Spill register l0 or both l0 and l1, depending on n_spilled and n_to_spill.*/
652 static void memperm_emit_spill_registers(const ir_node *node, int n_spilled,
655 assert(n_spilled < n_to_spill);
657 if (n_spilled == 0) {
658 /* We always reserve stack space for two registers because during copy
659 * processing we don't know yet if we also need to handle a cycle which
660 * needs two registers. More complicated code in emit_MemPerm would
661 * prevent wasting SPARC_REGISTER_SIZE bytes of stack space but
662 * it is not worth the worse readability of emit_MemPerm. */
664 /* Keep stack pointer aligned. */
665 unsigned sp_change = get_aligned_sp_change(2);
667 be_emit_irprintf("sub %%sp, %u, %%sp", sp_change);
668 be_emit_finish_line_gas(node);
670 /* Spill register l0. */
672 be_emit_irprintf("st %%l0, [%%sp%+d]", SPARC_MIN_STACKSIZE);
673 be_emit_finish_line_gas(node);
676 if (n_to_spill == 2) {
677 /* Spill register l1. */
679 be_emit_irprintf("st %%l1, [%%sp%+d]", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
680 be_emit_finish_line_gas(node);
684 /* Restore register l0 or both l0 and l1, depending on n_spilled. */
685 static void memperm_emit_restore_registers(const ir_node *node, int n_spilled)
689 if (n_spilled == 2) {
690 /* Restore register l1. */
692 be_emit_irprintf("ld [%%sp%+d], %%l1", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
693 be_emit_finish_line_gas(node);
696 /* Restore register l0. */
698 be_emit_irprintf("ld [%%sp%+d], %%l0", SPARC_MIN_STACKSIZE);
699 be_emit_finish_line_gas(node);
701 /* Restore stack pointer. */
702 sp_change = get_aligned_sp_change(2);
704 be_emit_irprintf("add %%sp, %u, %%sp", sp_change);
705 be_emit_finish_line_gas(node);
708 /* Emit code to copy in_ent to out_ent. Only uses l0. */
709 static void memperm_emit_copy(const ir_node *node, ir_entity *in_ent,
712 ir_graph *irg = get_irn_irg(node);
713 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
714 int off_in = be_get_stack_entity_offset(layout, in_ent, 0);
715 int off_out = be_get_stack_entity_offset(layout, out_ent, 0);
717 /* Load from input entity. */
719 be_emit_irprintf("ld [%%fp%+d], %%l0", off_in);
720 be_emit_finish_line_gas(node);
722 /* Store to output entity. */
724 be_emit_irprintf("st %%l0, [%%fp%+d]", off_out);
725 be_emit_finish_line_gas(node);
728 /* Emit code to swap ent1 and ent2. Uses l0 and l1. */
729 static void memperm_emit_swap(const ir_node *node, ir_entity *ent1,
732 ir_graph *irg = get_irn_irg(node);
733 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
734 int off1 = be_get_stack_entity_offset(layout, ent1, 0);
735 int off2 = be_get_stack_entity_offset(layout, ent2, 0);
737 /* Load from first input entity. */
739 be_emit_irprintf("ld [%%fp%+d], %%l0", off1);
740 be_emit_finish_line_gas(node);
742 /* Load from second input entity. */
744 be_emit_irprintf("ld [%%fp%+d], %%l1", off2);
745 be_emit_finish_line_gas(node);
747 /* Store first value to second output entity. */
749 be_emit_irprintf("st %%l0, [%%fp%+d]", off2);
750 be_emit_finish_line_gas(node);
752 /* Store second value to first output entity. */
754 be_emit_irprintf("st %%l1, [%%fp%+d]", off1);
755 be_emit_finish_line_gas(node);
758 /* Find the index of ent in ents or return -1 if not found. */
759 static int get_index(ir_entity **ents, int n, ir_entity *ent)
763 for (i = 0; i < n; ++i)
771 * Emit code for a MemPerm node.
773 * Analyze MemPerm for copy chains and cyclic swaps and resolve them using
775 * This function is conceptually very similar to permute_values in
778 static void emit_be_MemPerm(const ir_node *node)
780 int memperm_arity = be_get_MemPerm_entity_arity(node);
781 /* Upper limit for the number of participating entities is twice the
782 * arity, e.g., for a simple copying MemPerm node with one input/output. */
783 int max_size = 2 * memperm_arity;
784 ir_entity **entities = ALLOCANZ(ir_entity *, max_size);
785 /* sourceof contains the input entity for each entity. If an entity is
786 * never used as an output, its entry in sourceof is a fix point. */
787 int *sourceof = ALLOCANZ(int, max_size);
788 /* n_users counts how many output entities use this entity as their input.*/
789 int *n_users = ALLOCANZ(int, max_size);
790 /* n_spilled records the number of spilled registers, either 1 or 2. */
794 /* This implementation currently only works with frame pointers. */
795 ir_graph *irg = get_irn_irg(node);
796 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
797 assert(!layout->sp_relative && "MemPerms currently do not work without frame pointers");
799 for (i = 0; i < max_size; ++i) {
803 for (i = n = 0; i < memperm_arity; ++i) {
804 ir_entity *out = be_get_MemPerm_out_entity(node, i);
805 ir_entity *in = be_get_MemPerm_in_entity(node, i);
806 int oidx; /* Out index */
807 int iidx; /* In index */
809 /* Insert into entities to be able to operate on unique indices. */
810 if (get_index(entities, n, out) == -1)
812 if (get_index(entities, n, in) == -1)
815 oidx = get_index(entities, n, out);
816 iidx = get_index(entities, n, in);
818 sourceof[oidx] = iidx; /* Remember the source. */
819 ++n_users[iidx]; /* Increment number of users of this entity. */
822 /* First do all the copies. */
823 for (oidx = 0; oidx < n; /* empty */) {
824 int iidx = sourceof[oidx];
826 /* Nothing to do for fix points.
827 * Also, if entities[oidx] is used as an input by another copy, we
828 * can't overwrite entities[oidx] yet.*/
829 if (iidx == oidx || n_users[oidx] > 0) {
834 /* We found the end of a 'chain', so do the copy. */
835 if (n_spilled == 0) {
836 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/1);
839 memperm_emit_copy(node, entities[iidx], entities[oidx]);
842 sourceof[oidx] = oidx;
844 assert(n_users[iidx] > 0);
845 /* Decrementing the number of users might enable us to do another
849 if (iidx < oidx && n_users[iidx] == 0) {
856 /* The rest are cycles. */
857 for (oidx = 0; oidx < n; /* empty */) {
858 int iidx = sourceof[oidx];
861 /* Nothing to do for fix points. */
867 assert(n_users[iidx] == 1);
869 /* Swap the two values to resolve the cycle. */
871 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/2);
874 memperm_emit_swap(node, entities[iidx], entities[oidx]);
876 tidx = sourceof[iidx];
878 sourceof[iidx] = iidx;
880 /* The source of oidx is now the old source of iidx, because we swapped
881 * the two entities. */
882 sourceof[oidx] = tidx;
886 /* Only fix points should remain. */
887 for (i = 0; i < max_size; ++i) {
888 assert(sourceof[i] == i);
892 assert(n_spilled > 0 && "Useless MemPerm node");
894 memperm_emit_restore_registers(node, n_spilled);
897 static void emit_sparc_Return(const ir_node *node)
899 ir_graph *irg = get_irn_irg(node);
900 ir_entity *entity = get_irg_entity(irg);
901 ir_type *type = get_entity_type(entity);
903 const char *destreg = "%o7";
905 /* hack: we don't explicitely model register changes because of the
906 * restore node. So we have to do it manually here */
907 if (delay_slot_filler != NULL &&
908 (is_sparc_Restore(delay_slot_filler)
909 || is_sparc_RestoreZero(delay_slot_filler))) {
913 be_emit_cstring("jmp ");
914 be_emit_string(destreg);
915 if (get_method_calling_convention(type) & cc_compound_ret) {
916 be_emit_cstring("+12");
918 be_emit_cstring("+8");
920 be_emit_finish_line_gas(node);
924 static const arch_register_t *map_i_to_o_reg(const arch_register_t *reg)
926 unsigned idx = reg->global_index;
927 if (idx < REG_I0 || idx > REG_I7)
929 idx += REG_O0 - REG_I0;
930 assert(REG_O0 <= idx && idx <= REG_O7);
931 return &sparc_registers[idx];
934 static void emit_sparc_Restore(const ir_node *node)
936 const arch_register_t *destreg
937 = arch_get_irn_register_out(node, pn_sparc_Restore_res);
939 be_emit_cstring("restore ");
940 sparc_emit_source_register(node, 1);
941 be_emit_cstring(", ");
942 sparc_emit_reg_or_imm(node, 2);
943 be_emit_cstring(", ");
944 destreg = map_i_to_o_reg(destreg);
946 be_emit_string(arch_register_get_name(destreg));
947 be_emit_finish_line_gas(node);
950 static void emit_sparc_FrameAddr(const ir_node *node)
952 const sparc_attr_t *attr = get_sparc_attr_const(node);
953 int32_t offset = attr->immediate_value;
957 be_emit_cstring("add ");
958 sparc_emit_source_register(node, 0);
959 be_emit_cstring(", ");
960 assert(sparc_is_value_imm_encodeable(offset));
961 be_emit_irprintf("%ld", offset);
963 be_emit_cstring("sub ");
964 sparc_emit_source_register(node, 0);
965 be_emit_cstring(", ");
966 assert(sparc_is_value_imm_encodeable(-offset));
967 be_emit_irprintf("%ld", -offset);
970 be_emit_cstring(", ");
971 sparc_emit_dest_register(node, 0);
972 be_emit_finish_line_gas(node);
975 static const char *get_icc_unsigned(ir_relation relation)
977 switch (relation & (ir_relation_less_equal_greater)) {
978 case ir_relation_false: return "bn";
979 case ir_relation_equal: return "be";
980 case ir_relation_less: return "blu";
981 case ir_relation_less_equal: return "bleu";
982 case ir_relation_greater: return "bgu";
983 case ir_relation_greater_equal: return "bgeu";
984 case ir_relation_less_greater: return "bne";
985 case ir_relation_less_equal_greater: return "ba";
986 default: panic("Cmp has unsupported relation");
990 static const char *get_icc_signed(ir_relation relation)
992 switch (relation & (ir_relation_less_equal_greater)) {
993 case ir_relation_false: return "bn";
994 case ir_relation_equal: return "be";
995 case ir_relation_less: return "bl";
996 case ir_relation_less_equal: return "ble";
997 case ir_relation_greater: return "bg";
998 case ir_relation_greater_equal: return "bge";
999 case ir_relation_less_greater: return "bne";
1000 case ir_relation_less_equal_greater: return "ba";
1001 default: panic("Cmp has unsupported relation");
1005 static const char *get_fcc(ir_relation relation)
1008 case ir_relation_false: return "fbn";
1009 case ir_relation_equal: return "fbe";
1010 case ir_relation_less: return "fbl";
1011 case ir_relation_less_equal: return "fble";
1012 case ir_relation_greater: return "fbg";
1013 case ir_relation_greater_equal: return "fbge";
1014 case ir_relation_less_greater: return "fblg";
1015 case ir_relation_less_equal_greater: return "fbo";
1016 case ir_relation_unordered: return "fbu";
1017 case ir_relation_unordered_equal: return "fbue";
1018 case ir_relation_unordered_less: return "fbul";
1019 case ir_relation_unordered_less_equal: return "fbule";
1020 case ir_relation_unordered_greater: return "fbug";
1021 case ir_relation_unordered_greater_equal: return "fbuge";
1022 case ir_relation_unordered_less_greater: return "fbne";
1023 case ir_relation_true: return "fba";
1025 panic("invalid relation");
1028 typedef const char* (*get_cc_func)(ir_relation relation);
1030 static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
1032 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
1033 ir_relation relation = attr->relation;
1034 const ir_node *proj_true = NULL;
1035 const ir_node *proj_false = NULL;
1036 const ir_edge_t *edge;
1037 const ir_node *block;
1038 const ir_node *next_block;
1040 foreach_out_edge(node, edge) {
1041 ir_node *proj = get_edge_src_irn(edge);
1042 long nr = get_Proj_proj(proj);
1043 if (nr == pn_Cond_true) {
1050 /* for now, the code works for scheduled and non-schedules blocks */
1051 block = get_nodes_block(node);
1053 /* we have a block schedule */
1054 next_block = (ir_node*)get_irn_link(block);
1056 if (get_irn_link(proj_true) == next_block) {
1057 /* exchange both proj's so the second one can be omitted */
1058 const ir_node *t = proj_true;
1060 proj_true = proj_false;
1062 relation = get_negated_relation(relation);
1065 /* emit the true proj */
1066 sparc_emit_indent();
1067 be_emit_string(get_cc(relation));
1069 sparc_emit_cfop_target(proj_true);
1070 be_emit_finish_line_gas(proj_true);
1074 sparc_emit_indent();
1075 if (get_irn_link(proj_false) == next_block) {
1076 be_emit_cstring("/* fallthrough to ");
1077 sparc_emit_cfop_target(proj_false);
1078 be_emit_cstring(" */");
1079 be_emit_finish_line_gas(proj_false);
1081 be_emit_cstring("ba ");
1082 sparc_emit_cfop_target(proj_false);
1083 be_emit_finish_line_gas(proj_false);
1088 static void emit_sparc_Bicc(const ir_node *node)
1090 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
1091 bool is_unsigned = attr->is_unsigned;
1092 emit_sparc_branch(node, is_unsigned ? get_icc_unsigned : get_icc_signed);
1095 static void emit_sparc_fbfcc(const ir_node *node)
1097 /* if the flags producing node was immediately in front of us, emit
1099 ir_node *flags = get_irn_n(node, n_sparc_fbfcc_flags);
1100 ir_node *prev = sched_prev(node);
1101 if (is_Block(prev)) {
1102 /* TODO: when the flags come from another block, then we have to do
1103 * more complicated tests to see wether the flag producing node is
1104 * potentially in front of us (could happen for fallthroughs) */
1105 panic("TODO: fbfcc flags come from other block");
1107 if (skip_Proj(flags) == prev) {
1108 sparc_emit_indent();
1109 be_emit_cstring("nop\n");
1111 emit_sparc_branch(node, get_fcc);
1114 static void emit_sparc_Ba(const ir_node *node)
1116 sparc_emit_indent();
1117 if (ba_is_fallthrough(node)) {
1118 be_emit_cstring("/* fallthrough to ");
1119 sparc_emit_cfop_target(node);
1120 be_emit_cstring(" */");
1121 be_emit_finish_line_gas(node);
1123 be_emit_cstring("ba ");
1124 sparc_emit_cfop_target(node);
1125 be_emit_finish_line_gas(node);
1130 static void emit_sparc_SwitchJmp(const ir_node *node)
1132 const sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr_const(node);
1134 sparc_emit_indent();
1135 be_emit_cstring("jmp ");
1136 sparc_emit_source_register(node, 0);
1137 be_emit_finish_line_gas(node);
1140 emit_jump_table(node, attr->default_proj_num, attr->jump_table,
1144 static void emit_fmov(const ir_node *node, const arch_register_t *src_reg,
1145 const arch_register_t *dst_reg)
1147 sparc_emit_indent();
1148 be_emit_cstring("fmovs %");
1149 be_emit_string(arch_register_get_name(src_reg));
1150 be_emit_cstring(", %");
1151 be_emit_string(arch_register_get_name(dst_reg));
1152 be_emit_finish_line_gas(node);
1155 static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
1157 unsigned idx = reg->global_index;
1158 assert(reg == &sparc_registers[idx]);
1160 assert(idx - REG_F0 < N_sparc_fp_REGS);
1161 return &sparc_registers[idx];
1164 static void emit_be_Copy(const ir_node *node)
1166 ir_mode *mode = get_irn_mode(node);
1167 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
1168 const arch_register_t *dst_reg = arch_get_irn_register_out(node, 0);
1170 if (src_reg == dst_reg)
1173 if (mode_is_float(mode)) {
1174 unsigned bits = get_mode_size_bits(mode);
1175 int n = bits > 32 ? bits > 64 ? 3 : 1 : 0;
1177 emit_fmov(node, src_reg, dst_reg);
1178 for (i = 0; i < n; ++i) {
1179 src_reg = get_next_fp_reg(src_reg);
1180 dst_reg = get_next_fp_reg(dst_reg);
1181 emit_fmov(node, src_reg, dst_reg);
1183 } else if (mode_is_data(mode)) {
1184 sparc_emit_indent();
1185 be_emit_cstring("mov ");
1186 sparc_emit_source_register(node, 0);
1187 be_emit_cstring(", ");
1188 sparc_emit_dest_register(node, 0);
1189 be_emit_finish_line_gas(node);
1191 panic("emit_be_Copy: invalid mode");
1195 static void emit_nothing(const ir_node *irn)
1200 typedef void (*emit_func) (const ir_node *);
1202 static inline void set_emitter(ir_op *op, emit_func sparc_emit_node)
1204 op->ops.generic = (op_func)sparc_emit_node;
1208 * Enters the emitter functions for handled nodes into the generic
1209 * pointer of an opcode.
1211 static void sparc_register_emitters(void)
1213 /* first clear the generic function pointer for all ops */
1214 clear_irp_opcodes_generic_func();
1215 /* register all emitter functions defined in spec */
1216 sparc_register_spec_emitters();
1218 /* custom emitter */
1219 set_emitter(op_be_Copy, emit_be_Copy);
1220 set_emitter(op_be_CopyKeep, emit_be_Copy);
1221 set_emitter(op_be_IncSP, emit_be_IncSP);
1222 set_emitter(op_be_MemPerm, emit_be_MemPerm);
1223 set_emitter(op_be_Perm, emit_be_Perm);
1224 set_emitter(op_sparc_Ba, emit_sparc_Ba);
1225 set_emitter(op_sparc_Bicc, emit_sparc_Bicc);
1226 set_emitter(op_sparc_Call, emit_sparc_Call);
1227 set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc);
1228 set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr);
1229 set_emitter(op_sparc_SMulh, emit_sparc_Mulh);
1230 set_emitter(op_sparc_UMulh, emit_sparc_Mulh);
1231 set_emitter(op_sparc_Restore, emit_sparc_Restore);
1232 set_emitter(op_sparc_Return, emit_sparc_Return);
1233 set_emitter(op_sparc_SDiv, emit_sparc_SDiv);
1234 set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp);
1235 set_emitter(op_sparc_UDiv, emit_sparc_UDiv);
1237 /* no need to emit anything for the following nodes */
1238 set_emitter(op_be_Keep, emit_nothing);
1239 set_emitter(op_sparc_Start, emit_nothing);
1240 set_emitter(op_Phi, emit_nothing);
1244 * Emits code for a node.
1246 static void sparc_emit_node(const ir_node *node)
1248 ir_op *op = get_irn_op(node);
1250 if (op->ops.generic) {
1251 emit_func func = (emit_func) op->ops.generic;
1252 be_dbg_set_dbg_info(get_irn_dbg_info(node));
1255 panic("No emit handler for node %+F (graph %+F)\n", node,
1260 static ir_node *find_next_delay_slot(ir_node *from)
1262 ir_node *schedpoint = from;
1263 while (!has_delay_slot(schedpoint)) {
1264 if (!sched_has_next(schedpoint))
1266 schedpoint = sched_next(schedpoint);
1271 static bool block_needs_label(const ir_node *block, const ir_node *sched_prev)
1275 if (has_Block_entity(block))
1278 n_cfgpreds = get_Block_n_cfgpreds(block);
1279 if (n_cfgpreds == 0) {
1281 } else if (n_cfgpreds > 1) {
1284 ir_node *cfgpred = get_Block_cfgpred(block, 0);
1285 ir_node *cfgpred_block = get_nodes_block(cfgpred);
1286 if (is_Proj(cfgpred) && is_sparc_SwitchJmp(get_Proj_pred(cfgpred)))
1288 return sched_prev != cfgpred_block || get_irn_link(cfgpred) != block;
1293 * Walks over the nodes in a block connected by scheduling edges
1294 * and emits code for each node.
1296 static void sparc_emit_block(ir_node *block, ir_node *prev)
1299 ir_node *next_delay_slot;
1301 assert(is_Block(block));
1303 if (block_needs_label(block, prev)) {
1304 be_gas_emit_block_name(block);
1305 be_emit_cstring(":\n");
1306 be_emit_write_line();
1309 next_delay_slot = find_next_delay_slot(sched_first(block));
1310 if (next_delay_slot != NULL)
1311 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1313 sched_foreach(block, node) {
1314 if (node == delay_slot_filler) {
1318 sparc_emit_node(node);
1320 if (node == next_delay_slot) {
1321 assert(delay_slot_filler == NULL);
1322 next_delay_slot = find_next_delay_slot(sched_next(node));
1323 if (next_delay_slot != NULL)
1324 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1330 * Emits code for function start.
1332 static void sparc_emit_func_prolog(ir_graph *irg)
1334 ir_entity *ent = get_irg_entity(irg);
1335 be_gas_emit_function_prolog(ent, 4);
1336 be_emit_write_line();
1340 * Emits code for function end
1342 static void sparc_emit_func_epilog(ir_graph *irg)
1344 ir_entity *ent = get_irg_entity(irg);
1345 const char *irg_name = get_entity_ld_name(ent);
1346 be_emit_write_line();
1347 be_emit_irprintf("\t.size %s, .-%s\n", irg_name, irg_name);
1348 be_emit_cstring("# -- End ");
1349 be_emit_string(irg_name);
1350 be_emit_cstring("\n");
1351 be_emit_write_line();
1354 static void sparc_gen_labels(ir_node *block, void *env)
1357 int n = get_Block_n_cfgpreds(block);
1360 for (n--; n >= 0; n--) {
1361 pred = get_Block_cfgpred(block, n);
1362 set_irn_link(pred, block); // link the pred of a block (which is a jmp)
1366 void sparc_emit_routine(ir_graph *irg)
1368 ir_entity *entity = get_irg_entity(irg);
1369 ir_node **block_schedule;
1373 heights = heights_new(irg);
1375 /* register all emitter functions */
1376 sparc_register_emitters();
1377 be_dbg_method_begin(entity);
1379 /* create the block schedule. For now, we don't need it earlier. */
1380 block_schedule = be_create_block_schedule(irg);
1382 sparc_emit_func_prolog(irg);
1383 irg_block_walk_graph(irg, sparc_gen_labels, NULL, NULL);
1385 /* inject block scheduling links & emit code of each block */
1386 n = ARR_LEN(block_schedule);
1387 for (i = 0; i < n; ++i) {
1388 ir_node *block = block_schedule[i];
1389 ir_node *next_block = i+1 < n ? block_schedule[i+1] : NULL;
1390 set_irn_link(block, next_block);
1393 for (i = 0; i < n; ++i) {
1394 ir_node *block = block_schedule[i];
1395 ir_node *prev = i>=1 ? block_schedule[i-1] : NULL;
1396 if (block == get_irg_end_block(irg))
1398 sparc_emit_block(block, prev);
1401 /* emit function epilog */
1402 sparc_emit_func_epilog(irg);
1404 heights_free(heights);
1407 void sparc_init_emitter(void)
1409 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.emit");