2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
23 * @author Hannes Rapp, Matthias Braun
30 #include "bitfiddle.h"
42 #include "raw_bitset.h"
47 #include "beblocksched.h"
50 #include "be_dbgout.h"
54 #include "sparc_emitter.h"
55 #include "gen_sparc_emitter.h"
56 #include "sparc_nodes_attr.h"
57 #include "sparc_new_nodes.h"
58 #include "gen_sparc_regalloc_if.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 static ir_heights_t *heights;
63 static const ir_node *delay_slot_filler; /**< this node has been choosen to fill
64 the next delay slot */
66 static void sparc_emit_node(const ir_node *node);
68 void sparc_emit_immediate(const ir_node *node)
70 const sparc_attr_t *attr = get_sparc_attr_const(node);
71 ir_entity *entity = attr->immediate_value_entity;
74 int32_t value = attr->immediate_value;
75 assert(sparc_is_value_imm_encodeable(value));
76 be_emit_irprintf("%d", value);
78 if (get_entity_owner(entity) == get_tls_type()) {
79 be_emit_cstring("%tle_lox10(");
81 be_emit_cstring("%lo(");
83 be_gas_emit_entity(entity);
84 if (attr->immediate_value != 0) {
85 be_emit_irprintf("%+d", attr->immediate_value);
91 void sparc_emit_high_immediate(const ir_node *node)
93 const sparc_attr_t *attr = get_sparc_attr_const(node);
94 ir_entity *entity = attr->immediate_value_entity;
97 uint32_t value = (uint32_t) attr->immediate_value;
98 be_emit_irprintf("%%hi(0x%X)", value);
100 if (get_entity_owner(entity) == get_tls_type()) {
101 be_emit_cstring("%tle_hix22(");
103 be_emit_cstring("%hi(");
105 be_gas_emit_entity(entity);
106 if (attr->immediate_value != 0) {
107 be_emit_irprintf("%+d", attr->immediate_value);
113 void sparc_emit_source_register(const ir_node *node, int pos)
115 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
117 be_emit_string(arch_register_get_name(reg));
120 void sparc_emit_dest_register(const ir_node *node, int pos)
122 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
124 be_emit_string(arch_register_get_name(reg));
128 * Emits either a imm or register depending on arity of node
130 * @param register no (-1 if no register)
132 void sparc_emit_reg_or_imm(const ir_node *node, int pos)
134 if (arch_get_irn_flags(node) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
135 // we have a imm input
136 sparc_emit_immediate(node);
139 sparc_emit_source_register(node, pos);
146 void sparc_emit_offset(const ir_node *node, int offset_node_pos)
148 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
150 if (attr->is_reg_reg) {
151 assert(!attr->is_frame_entity);
152 assert(attr->base.immediate_value == 0);
153 assert(attr->base.immediate_value_entity == NULL);
155 sparc_emit_source_register(node, offset_node_pos);
156 } else if (attr->is_frame_entity) {
157 int32_t offset = attr->base.immediate_value;
159 assert(sparc_is_value_imm_encodeable(offset));
160 be_emit_irprintf("%+ld", offset);
162 } else if (attr->base.immediate_value != 0
163 || attr->base.immediate_value_entity != NULL) {
165 sparc_emit_immediate(node);
169 void sparc_emit_source_reg_and_offset(const ir_node *node, int regpos,
172 const arch_register_t *reg = arch_get_irn_register_in(node, regpos);
173 const sparc_load_store_attr_t *attr;
176 if (reg == &sparc_registers[REG_SP]) {
177 attr = get_sparc_load_store_attr_const(node);
178 if (!attr->is_reg_reg
179 && attr->base.immediate_value < SPARC_SAVE_AREA_SIZE) {
181 ir_fprintf(stderr, "warning: emitting stack pointer relative load/store with offset < %d\n", SPARC_SAVE_AREA_SIZE);
186 sparc_emit_source_register(node, regpos);
187 sparc_emit_offset(node, offpos);
190 void sparc_emit_float_load_store_mode(const ir_node *node)
192 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
193 ir_mode *mode = attr->load_store_mode;
194 int bits = get_mode_size_bits(mode);
196 assert(mode_is_float(mode));
200 case 64: be_emit_char('d'); return;
201 case 128: be_emit_char('q'); return;
203 panic("invalid float load/store mode %+F", mode);
207 * Emit load mode char
209 void sparc_emit_load_mode(const ir_node *node)
211 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
212 ir_mode *mode = attr->load_store_mode;
213 int bits = get_mode_size_bits(mode);
214 bool is_signed = mode_is_signed(mode);
217 be_emit_string(is_signed ? "sh" : "uh");
218 } else if (bits == 8) {
219 be_emit_string(is_signed ? "sb" : "ub");
220 } else if (bits == 64) {
228 * Emit store mode char
230 void sparc_emit_store_mode(const ir_node *node)
232 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
233 ir_mode *mode = attr->load_store_mode;
234 int bits = get_mode_size_bits(mode);
238 } else if (bits == 8) {
240 } else if (bits == 64) {
247 static void emit_fp_suffix(const ir_mode *mode)
249 unsigned bits = get_mode_size_bits(mode);
250 assert(mode_is_float(mode));
254 } else if (bits == 64) {
256 } else if (bits == 128) {
259 panic("invalid FP mode");
263 void sparc_emit_fp_conv_source(const ir_node *node)
265 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
266 emit_fp_suffix(attr->src_mode);
269 void sparc_emit_fp_conv_destination(const ir_node *node)
271 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
272 emit_fp_suffix(attr->dest_mode);
276 * emits the FP mode suffix char
278 void sparc_emit_fp_mode_suffix(const ir_node *node)
280 const sparc_fp_attr_t *attr = get_sparc_fp_attr_const(node);
281 emit_fp_suffix(attr->fp_mode);
284 static ir_node *get_jump_target(const ir_node *jump)
286 return (ir_node*)get_irn_link(jump);
290 * Returns the target label for a control flow node.
292 static void sparc_emit_cfop_target(const ir_node *node)
294 ir_node *block = get_jump_target(node);
295 be_gas_emit_block_name(block);
299 * returns true if a sparc_call calls a register and not an immediate
301 static bool is_sparc_reg_call(const ir_node *node)
303 const sparc_attr_t *attr = get_sparc_attr_const(node);
304 return attr->immediate_value_entity == NULL;
307 static int get_sparc_Call_dest_addr_pos(const ir_node *node)
309 assert(is_sparc_reg_call(node));
310 return get_irn_arity(node)-1;
313 static bool ba_is_fallthrough(const ir_node *node)
315 ir_node *block = get_nodes_block(node);
316 ir_node *next_block = (ir_node*)get_irn_link(block);
317 return get_irn_link(node) == next_block;
320 static bool is_no_instruction(const ir_node *node)
322 /* copies are nops if src_reg == dest_reg */
323 if (be_is_Copy(node) || be_is_CopyKeep(node)) {
324 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
325 const arch_register_t *dest_reg = arch_get_irn_register_out(node, 0);
327 if (src_reg == dest_reg)
330 if (be_is_IncSP(node) && be_get_IncSP_offset(node) == 0)
332 /* Ba is not emitted if it is a simple fallthrough */
333 if (is_sparc_Ba(node) && ba_is_fallthrough(node))
336 return be_is_Keep(node) || be_is_Start(node) || is_Phi(node);
339 static bool has_delay_slot(const ir_node *node)
341 if (is_sparc_Ba(node)) {
342 return !ba_is_fallthrough(node);
345 return arch_get_irn_flags(node) & sparc_arch_irn_flag_has_delay_slot;
348 /** returns true if the emitter for this sparc node can produce more than one
349 * actual sparc instruction.
350 * Usually it is a bad sign if we have to add instructions here. We should
351 * rather try to get them lowered down. So we can actually put them into
352 * delay slots and make them more accessible to the scheduler.
354 static bool emits_multiple_instructions(const ir_node *node)
356 if (has_delay_slot(node))
359 if (is_sparc_Call(node)) {
360 return arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return;
363 return is_sparc_SMulh(node) || is_sparc_UMulh(node)
364 || is_sparc_SDiv(node) || is_sparc_UDiv(node)
365 || be_is_MemPerm(node) || be_is_Perm(node);
368 static bool uses_reg(const ir_node *node, const arch_register_t *reg)
370 int arity = get_irn_arity(node);
373 for (i = 0; i < arity; ++i) {
374 const arch_register_t *in_reg = arch_get_irn_register_in(node, i);
381 static bool writes_reg(const ir_node *node, const arch_register_t *reg)
383 unsigned n_outs = arch_get_irn_n_outs(node);
385 for (o = 0; o < n_outs; ++o) {
386 const arch_register_t *out_reg = arch_get_irn_register_out(node, o);
393 static bool can_move_into_delayslot(const ir_node *node, const ir_node *to)
395 int node_arity = get_irn_arity(node);
396 ir_node *schedpoint = sched_next(node);
399 if (schedpoint != to) {
401 int arity = get_irn_arity(schedpoint);
402 unsigned n_outs = arch_get_irn_n_outs(schedpoint);
404 /* the node must not use our computed values */
405 for (i = 0; i < arity; ++i) {
406 ir_node *in = get_irn_n(schedpoint, i);
407 if (skip_Proj(in) == node)
411 /* the node must not overwrite registers of our inputs */
412 for (i = 0; i < node_arity; ++i) {
413 ir_node *in = get_irn_n(node, i);
414 const arch_register_t *reg = arch_get_irn_register(in);
415 const arch_register_req_t *in_req
416 = arch_get_irn_register_req_in(node, i);
420 for (o = 0; o < n_outs; ++o) {
421 const arch_register_t *outreg
422 = arch_get_irn_register_out(schedpoint, o);
423 const arch_register_req_t *outreq
424 = arch_get_irn_register_req_out(schedpoint, o);
427 if (outreg->global_index >= reg->global_index
428 && outreg->global_index
429 < (unsigned)reg->global_index + in_req->width)
431 if (reg->global_index >= outreg->global_index
433 < (unsigned)outreg->global_index + outreq->width)
438 if (is_sparc_Call(to)) {
440 /** all deps are used after the delay slot so, we're fine */
441 if (!is_sparc_reg_call(to))
444 check = get_irn_n(to, get_sparc_Call_dest_addr_pos(to));
445 if (skip_Proj(check) == node)
448 /* the Call also destroys the value of %o7, but since this is
449 * currently marked as ignore register in the backend, it
450 * should never be used by the instruction in the delay slot. */
451 if (uses_reg(node, &sparc_registers[REG_O7]))
454 } else if (is_sparc_Return(to)) {
455 /* return uses the value of %o7, all other values are not
456 * immediately used */
457 if (writes_reg(node, &sparc_registers[REG_O7]))
461 /* the node must not use our computed values */
462 int arity = get_irn_arity(to);
464 for (i = 0; i < arity; ++i) {
465 ir_node *in = get_irn_n(to, i);
466 if (skip_Proj(in) == node)
473 schedpoint = sched_next(schedpoint);
478 * search for an instruction that can fill the delay slot of @p node
480 static const ir_node *pick_delay_slot_for(const ir_node *node)
482 const ir_node *schedpoint = node;
484 /* currently we don't track which registers are still alive, so we can't
485 * pick any other instructions other than the one directly preceding */
486 static const unsigned PICK_DELAY_SLOT_MAX_DISTANCE = 10;
488 assert(has_delay_slot(node));
490 while (sched_has_prev(schedpoint)) {
491 schedpoint = sched_prev(schedpoint);
493 if (has_delay_slot(schedpoint))
496 /* skip things which don't really result in instructions */
497 if (is_no_instruction(schedpoint))
500 if (tries++ >= PICK_DELAY_SLOT_MAX_DISTANCE)
503 if (emits_multiple_instructions(schedpoint))
506 if (!can_move_into_delayslot(schedpoint, node))
509 /* found something */
517 * Emits code for stack space management
519 static void emit_be_IncSP(const ir_node *irn)
521 int offset = be_get_IncSP_offset(irn);
526 /* SPARC stack grows downwards */
528 be_emit_cstring("\tsub ");
531 be_emit_cstring("\tadd ");
534 sparc_emit_source_register(irn, 0);
535 be_emit_irprintf(", %d", -offset);
536 be_emit_cstring(", ");
537 sparc_emit_dest_register(irn, 0);
538 be_emit_finish_line_gas(irn);
542 * emits code for mulh
544 static void emit_sparc_Mulh(const ir_node *irn)
546 be_emit_cstring("\t");
547 if (is_sparc_UMulh(irn)) {
550 assert(is_sparc_SMulh(irn));
553 be_emit_cstring("mul ");
555 sparc_emit_source_register(irn, 0);
556 be_emit_cstring(", ");
557 sparc_emit_reg_or_imm(irn, 1);
558 be_emit_cstring(", ");
559 sparc_emit_dest_register(irn, 0);
560 be_emit_finish_line_gas(irn);
562 // our result is in the y register now
563 // we just copy it to the assigned target reg
564 be_emit_cstring("\tmov %y, ");
565 sparc_emit_dest_register(irn, 0);
566 be_emit_finish_line_gas(irn);
569 static void fill_delay_slot(void)
571 if (delay_slot_filler != NULL) {
572 sparc_emit_node(delay_slot_filler);
573 delay_slot_filler = NULL;
575 be_emit_cstring("\tnop\n");
576 be_emit_write_line();
580 static void emit_sparc_Div(const ir_node *node, bool is_signed)
582 /* can we get the delay count of the wr instruction somewhere? */
583 unsigned wry_delay_count = 3;
586 be_emit_cstring("\twr ");
587 sparc_emit_source_register(node, 0);
588 be_emit_cstring(", 0, %y");
589 be_emit_finish_line_gas(node);
591 for (i = 0; i < wry_delay_count; ++i) {
595 be_emit_irprintf("\t%s ", is_signed ? "sdiv" : "udiv");
596 sparc_emit_source_register(node, 1);
597 be_emit_cstring(", ");
598 sparc_emit_reg_or_imm(node, 2);
599 be_emit_cstring(", ");
600 sparc_emit_dest_register(node, 0);
601 be_emit_finish_line_gas(node);
604 static void emit_sparc_SDiv(const ir_node *node)
606 emit_sparc_Div(node, true);
609 static void emit_sparc_UDiv(const ir_node *node)
611 emit_sparc_Div(node, false);
614 static void emit_sparc_Call(const ir_node *node)
616 be_emit_cstring("\tcall ");
617 if (is_sparc_reg_call(node)) {
618 int dest_addr = get_sparc_Call_dest_addr_pos(node);
619 sparc_emit_source_register(node, dest_addr);
621 const sparc_attr_t *attr = get_sparc_attr_const(node);
622 ir_entity *entity = attr->immediate_value_entity;
623 be_gas_emit_entity(entity);
624 if (attr->immediate_value != 0) {
625 be_emit_irprintf("%+d", attr->immediate_value);
627 be_emit_cstring(", 0");
629 be_emit_finish_line_gas(node);
633 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return) {
634 be_emit_cstring("\tunimp 8\n");
635 be_emit_write_line();
639 static void emit_be_Perm(const ir_node *irn)
641 be_emit_cstring("\txor ");
642 sparc_emit_source_register(irn, 1);
643 be_emit_cstring(", ");
644 sparc_emit_source_register(irn, 0);
645 be_emit_cstring(", ");
646 sparc_emit_source_register(irn, 0);
647 be_emit_finish_line_gas(NULL);
649 be_emit_cstring("\txor ");
650 sparc_emit_source_register(irn, 1);
651 be_emit_cstring(", ");
652 sparc_emit_source_register(irn, 0);
653 be_emit_cstring(", ");
654 sparc_emit_source_register(irn, 1);
655 be_emit_finish_line_gas(NULL);
657 be_emit_cstring("\txor ");
658 sparc_emit_source_register(irn, 1);
659 be_emit_cstring(", ");
660 sparc_emit_source_register(irn, 0);
661 be_emit_cstring(", ");
662 sparc_emit_source_register(irn, 0);
663 be_emit_finish_line_gas(irn);
666 /* The stack pointer must always be SPARC_STACK_ALIGNMENT bytes aligned, so get
667 * the next bigger integer that's evenly divisible by it. */
668 static unsigned get_aligned_sp_change(const unsigned num_regs)
670 const unsigned bytes = num_regs * SPARC_REGISTER_SIZE;
671 return round_up2(bytes, SPARC_STACK_ALIGNMENT);
674 /* Spill register l0 or both l0 and l1, depending on n_spilled and n_to_spill.*/
675 static void memperm_emit_spill_registers(const ir_node *node, int n_spilled,
678 assert(n_spilled < n_to_spill);
680 if (n_spilled == 0) {
681 /* We always reserve stack space for two registers because during copy
682 * processing we don't know yet if we also need to handle a cycle which
683 * needs two registers. More complicated code in emit_MemPerm would
684 * prevent wasting SPARC_REGISTER_SIZE bytes of stack space but
685 * it is not worth the worse readability of emit_MemPerm. */
687 /* Keep stack pointer aligned. */
688 unsigned sp_change = get_aligned_sp_change(2);
689 be_emit_irprintf("\tsub %%sp, %u, %%sp", sp_change);
690 be_emit_finish_line_gas(node);
692 /* Spill register l0. */
693 be_emit_irprintf("\tst %%l0, [%%sp%+d]", SPARC_MIN_STACKSIZE);
694 be_emit_finish_line_gas(node);
697 if (n_to_spill == 2) {
698 /* Spill register l1. */
699 be_emit_irprintf("\tst %%l1, [%%sp%+d]", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
700 be_emit_finish_line_gas(node);
704 /* Restore register l0 or both l0 and l1, depending on n_spilled. */
705 static void memperm_emit_restore_registers(const ir_node *node, int n_spilled)
709 if (n_spilled == 2) {
710 /* Restore register l1. */
711 be_emit_irprintf("\tld [%%sp%+d], %%l1", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
712 be_emit_finish_line_gas(node);
715 /* Restore register l0. */
716 be_emit_irprintf("\tld [%%sp%+d], %%l0", SPARC_MIN_STACKSIZE);
717 be_emit_finish_line_gas(node);
719 /* Restore stack pointer. */
720 sp_change = get_aligned_sp_change(2);
721 be_emit_irprintf("\tadd %%sp, %u, %%sp", sp_change);
722 be_emit_finish_line_gas(node);
725 /* Emit code to copy in_ent to out_ent. Only uses l0. */
726 static void memperm_emit_copy(const ir_node *node, ir_entity *in_ent,
729 ir_graph *irg = get_irn_irg(node);
730 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
731 int off_in = be_get_stack_entity_offset(layout, in_ent, 0);
732 int off_out = be_get_stack_entity_offset(layout, out_ent, 0);
734 /* Load from input entity. */
735 be_emit_irprintf("\tld [%%fp%+d], %%l0", off_in);
736 be_emit_finish_line_gas(node);
738 /* Store to output entity. */
739 be_emit_irprintf("\tst %%l0, [%%fp%+d]", off_out);
740 be_emit_finish_line_gas(node);
743 /* Emit code to swap ent1 and ent2. Uses l0 and l1. */
744 static void memperm_emit_swap(const ir_node *node, ir_entity *ent1,
747 ir_graph *irg = get_irn_irg(node);
748 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
749 int off1 = be_get_stack_entity_offset(layout, ent1, 0);
750 int off2 = be_get_stack_entity_offset(layout, ent2, 0);
752 /* Load from first input entity. */
753 be_emit_irprintf("\tld [%%fp%+d], %%l0", off1);
754 be_emit_finish_line_gas(node);
756 /* Load from second input entity. */
757 be_emit_irprintf("\tld [%%fp%+d], %%l1", off2);
758 be_emit_finish_line_gas(node);
760 /* Store first value to second output entity. */
761 be_emit_irprintf("\tst %%l0, [%%fp%+d]", off2);
762 be_emit_finish_line_gas(node);
764 /* Store second value to first output entity. */
765 be_emit_irprintf("\tst %%l1, [%%fp%+d]", off1);
766 be_emit_finish_line_gas(node);
769 /* Find the index of ent in ents or return -1 if not found. */
770 static int get_index(ir_entity **ents, int n, ir_entity *ent)
774 for (i = 0; i < n; ++i)
782 * Emit code for a MemPerm node.
784 * Analyze MemPerm for copy chains and cyclic swaps and resolve them using
786 * This function is conceptually very similar to permute_values in
789 static void emit_be_MemPerm(const ir_node *node)
791 int memperm_arity = be_get_MemPerm_entity_arity(node);
792 /* Upper limit for the number of participating entities is twice the
793 * arity, e.g., for a simple copying MemPerm node with one input/output. */
794 int max_size = 2 * memperm_arity;
795 ir_entity **entities = ALLOCANZ(ir_entity *, max_size);
796 /* sourceof contains the input entity for each entity. If an entity is
797 * never used as an output, its entry in sourceof is a fix point. */
798 int *sourceof = ALLOCANZ(int, max_size);
799 /* n_users counts how many output entities use this entity as their input.*/
800 int *n_users = ALLOCANZ(int, max_size);
801 /* n_spilled records the number of spilled registers, either 1 or 2. */
805 /* This implementation currently only works with frame pointers. */
806 ir_graph *irg = get_irn_irg(node);
807 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
808 assert(!layout->sp_relative && "MemPerms currently do not work without frame pointers");
810 for (i = 0; i < max_size; ++i) {
814 for (i = n = 0; i < memperm_arity; ++i) {
815 ir_entity *out = be_get_MemPerm_out_entity(node, i);
816 ir_entity *in = be_get_MemPerm_in_entity(node, i);
817 int oidx; /* Out index */
818 int iidx; /* In index */
820 /* Insert into entities to be able to operate on unique indices. */
821 if (get_index(entities, n, out) == -1)
823 if (get_index(entities, n, in) == -1)
826 oidx = get_index(entities, n, out);
827 iidx = get_index(entities, n, in);
829 sourceof[oidx] = iidx; /* Remember the source. */
830 ++n_users[iidx]; /* Increment number of users of this entity. */
833 /* First do all the copies. */
834 for (oidx = 0; oidx < n; /* empty */) {
835 int iidx = sourceof[oidx];
837 /* Nothing to do for fix points.
838 * Also, if entities[oidx] is used as an input by another copy, we
839 * can't overwrite entities[oidx] yet.*/
840 if (iidx == oidx || n_users[oidx] > 0) {
845 /* We found the end of a 'chain', so do the copy. */
846 if (n_spilled == 0) {
847 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/1);
850 memperm_emit_copy(node, entities[iidx], entities[oidx]);
853 sourceof[oidx] = oidx;
855 assert(n_users[iidx] > 0);
856 /* Decrementing the number of users might enable us to do another
860 if (iidx < oidx && n_users[iidx] == 0) {
867 /* The rest are cycles. */
868 for (oidx = 0; oidx < n; /* empty */) {
869 int iidx = sourceof[oidx];
872 /* Nothing to do for fix points. */
878 assert(n_users[iidx] == 1);
880 /* Swap the two values to resolve the cycle. */
882 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/2);
885 memperm_emit_swap(node, entities[iidx], entities[oidx]);
887 tidx = sourceof[iidx];
889 sourceof[iidx] = iidx;
891 /* The source of oidx is now the old source of iidx, because we swapped
892 * the two entities. */
893 sourceof[oidx] = tidx;
897 /* Only fix points should remain. */
898 for (i = 0; i < max_size; ++i) {
899 assert(sourceof[i] == i);
903 assert(n_spilled > 0 && "Useless MemPerm node");
905 memperm_emit_restore_registers(node, n_spilled);
908 static void emit_sparc_Return(const ir_node *node)
910 ir_graph *irg = get_irn_irg(node);
911 ir_entity *entity = get_irg_entity(irg);
912 ir_type *type = get_entity_type(entity);
914 const char *destreg = "%o7";
916 /* hack: we don't explicitely model register changes because of the
917 * restore node. So we have to do it manually here */
918 if (delay_slot_filler != NULL &&
919 (is_sparc_Restore(delay_slot_filler)
920 || is_sparc_RestoreZero(delay_slot_filler))) {
923 be_emit_cstring("\tjmp ");
924 be_emit_string(destreg);
925 if (get_method_calling_convention(type) & cc_compound_ret) {
926 be_emit_cstring("+12");
928 be_emit_cstring("+8");
930 be_emit_finish_line_gas(node);
934 static const arch_register_t *map_i_to_o_reg(const arch_register_t *reg)
936 unsigned idx = reg->global_index;
937 if (idx < REG_I0 || idx > REG_I7)
939 idx += REG_O0 - REG_I0;
940 assert(REG_O0 <= idx && idx <= REG_O7);
941 return &sparc_registers[idx];
944 static void emit_sparc_Restore(const ir_node *node)
946 const arch_register_t *destreg
947 = arch_get_irn_register_out(node, pn_sparc_Restore_res);
948 be_emit_cstring("\trestore ");
949 sparc_emit_source_register(node, 1);
950 be_emit_cstring(", ");
951 sparc_emit_reg_or_imm(node, 2);
952 be_emit_cstring(", ");
953 destreg = map_i_to_o_reg(destreg);
955 be_emit_string(arch_register_get_name(destreg));
956 be_emit_finish_line_gas(node);
959 static void emit_sparc_FrameAddr(const ir_node *node)
961 const sparc_attr_t *attr = get_sparc_attr_const(node);
962 int32_t offset = attr->immediate_value;
965 be_emit_cstring("\tadd ");
966 sparc_emit_source_register(node, 0);
967 be_emit_cstring(", ");
968 assert(sparc_is_value_imm_encodeable(offset));
969 be_emit_irprintf("%ld", offset);
971 be_emit_cstring("\tsub ");
972 sparc_emit_source_register(node, 0);
973 be_emit_cstring(", ");
974 assert(sparc_is_value_imm_encodeable(-offset));
975 be_emit_irprintf("%ld", -offset);
978 be_emit_cstring(", ");
979 sparc_emit_dest_register(node, 0);
980 be_emit_finish_line_gas(node);
983 static const char *get_icc_unsigned(ir_relation relation)
985 switch (relation & (ir_relation_less_equal_greater)) {
986 case ir_relation_false: return "bn";
987 case ir_relation_equal: return "be";
988 case ir_relation_less: return "blu";
989 case ir_relation_less_equal: return "bleu";
990 case ir_relation_greater: return "bgu";
991 case ir_relation_greater_equal: return "bgeu";
992 case ir_relation_less_greater: return "bne";
993 case ir_relation_less_equal_greater: return "ba";
994 default: panic("Cmp has unsupported relation");
998 static const char *get_icc_signed(ir_relation relation)
1000 switch (relation & (ir_relation_less_equal_greater)) {
1001 case ir_relation_false: return "bn";
1002 case ir_relation_equal: return "be";
1003 case ir_relation_less: return "bl";
1004 case ir_relation_less_equal: return "ble";
1005 case ir_relation_greater: return "bg";
1006 case ir_relation_greater_equal: return "bge";
1007 case ir_relation_less_greater: return "bne";
1008 case ir_relation_less_equal_greater: return "ba";
1009 default: panic("Cmp has unsupported relation");
1013 static const char *get_fcc(ir_relation relation)
1016 case ir_relation_false: return "fbn";
1017 case ir_relation_equal: return "fbe";
1018 case ir_relation_less: return "fbl";
1019 case ir_relation_less_equal: return "fble";
1020 case ir_relation_greater: return "fbg";
1021 case ir_relation_greater_equal: return "fbge";
1022 case ir_relation_less_greater: return "fblg";
1023 case ir_relation_less_equal_greater: return "fbo";
1024 case ir_relation_unordered: return "fbu";
1025 case ir_relation_unordered_equal: return "fbue";
1026 case ir_relation_unordered_less: return "fbul";
1027 case ir_relation_unordered_less_equal: return "fbule";
1028 case ir_relation_unordered_greater: return "fbug";
1029 case ir_relation_unordered_greater_equal: return "fbuge";
1030 case ir_relation_unordered_less_greater: return "fbne";
1031 case ir_relation_true: return "fba";
1033 panic("invalid relation");
1036 typedef const char* (*get_cc_func)(ir_relation relation);
1038 static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
1040 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
1041 ir_relation relation = attr->relation;
1042 const ir_node *proj_true = NULL;
1043 const ir_node *proj_false = NULL;
1044 const ir_edge_t *edge;
1045 const ir_node *block;
1046 const ir_node *next_block;
1048 foreach_out_edge(node, edge) {
1049 ir_node *proj = get_edge_src_irn(edge);
1050 long nr = get_Proj_proj(proj);
1051 if (nr == pn_Cond_true) {
1058 /* for now, the code works for scheduled and non-schedules blocks */
1059 block = get_nodes_block(node);
1061 /* we have a block schedule */
1062 next_block = (ir_node*)get_irn_link(block);
1064 if (get_irn_link(proj_true) == next_block) {
1065 /* exchange both proj's so the second one can be omitted */
1066 const ir_node *t = proj_true;
1068 proj_true = proj_false;
1070 relation = get_negated_relation(relation);
1073 /* emit the true proj */
1074 be_emit_cstring("\t");
1075 be_emit_string(get_cc(relation));
1077 sparc_emit_cfop_target(proj_true);
1078 be_emit_finish_line_gas(proj_true);
1082 if (get_irn_link(proj_false) == next_block) {
1083 be_emit_cstring("\t/* fallthrough to ");
1084 sparc_emit_cfop_target(proj_false);
1085 be_emit_cstring(" */");
1086 be_emit_finish_line_gas(proj_false);
1088 be_emit_cstring("\tba ");
1089 sparc_emit_cfop_target(proj_false);
1090 be_emit_finish_line_gas(proj_false);
1095 static void emit_sparc_Bicc(const ir_node *node)
1097 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
1098 bool is_unsigned = attr->is_unsigned;
1099 emit_sparc_branch(node, is_unsigned ? get_icc_unsigned : get_icc_signed);
1102 static void emit_sparc_fbfcc(const ir_node *node)
1104 /* if the flags producing node was immediately in front of us, emit
1106 ir_node *flags = get_irn_n(node, n_sparc_fbfcc_flags);
1107 ir_node *prev = sched_prev(node);
1108 if (is_Block(prev)) {
1109 /* TODO: when the flags come from another block, then we have to do
1110 * more complicated tests to see wether the flag producing node is
1111 * potentially in front of us (could happen for fallthroughs) */
1112 panic("TODO: fbfcc flags come from other block");
1114 if (skip_Proj(flags) == prev) {
1115 be_emit_cstring("\tnop\n");
1117 emit_sparc_branch(node, get_fcc);
1120 static void emit_sparc_Ba(const ir_node *node)
1122 if (ba_is_fallthrough(node)) {
1123 be_emit_cstring("\t/* fallthrough to ");
1124 sparc_emit_cfop_target(node);
1125 be_emit_cstring(" */");
1126 be_emit_finish_line_gas(node);
1128 be_emit_cstring("\tba ");
1129 sparc_emit_cfop_target(node);
1130 be_emit_finish_line_gas(node);
1135 static void emit_sparc_SwitchJmp(const ir_node *node)
1137 const sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr_const(node);
1139 be_emit_cstring("\tjmp ");
1140 sparc_emit_source_register(node, 0);
1141 be_emit_finish_line_gas(node);
1144 emit_jump_table(node, attr->default_proj_num, attr->jump_table,
1148 static void emit_fmov(const ir_node *node, const arch_register_t *src_reg,
1149 const arch_register_t *dst_reg)
1151 be_emit_cstring("\tfmovs %");
1152 be_emit_string(arch_register_get_name(src_reg));
1153 be_emit_cstring(", %");
1154 be_emit_string(arch_register_get_name(dst_reg));
1155 be_emit_finish_line_gas(node);
1158 static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
1160 unsigned idx = reg->global_index;
1161 assert(reg == &sparc_registers[idx]);
1163 assert(idx - REG_F0 < N_sparc_fp_REGS);
1164 return &sparc_registers[idx];
1167 static void emit_be_Copy(const ir_node *node)
1169 ir_mode *mode = get_irn_mode(node);
1170 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
1171 const arch_register_t *dst_reg = arch_get_irn_register_out(node, 0);
1173 if (src_reg == dst_reg)
1176 if (mode_is_float(mode)) {
1177 unsigned bits = get_mode_size_bits(mode);
1178 int n = bits > 32 ? bits > 64 ? 3 : 1 : 0;
1180 emit_fmov(node, src_reg, dst_reg);
1181 for (i = 0; i < n; ++i) {
1182 src_reg = get_next_fp_reg(src_reg);
1183 dst_reg = get_next_fp_reg(dst_reg);
1184 emit_fmov(node, src_reg, dst_reg);
1186 } else if (mode_is_data(mode)) {
1187 be_emit_cstring("\tmov ");
1188 sparc_emit_source_register(node, 0);
1189 be_emit_cstring(", ");
1190 sparc_emit_dest_register(node, 0);
1191 be_emit_finish_line_gas(node);
1193 panic("emit_be_Copy: invalid mode");
1197 static void emit_nothing(const ir_node *irn)
1202 typedef void (*emit_func) (const ir_node *);
1204 static inline void set_emitter(ir_op *op, emit_func sparc_emit_node)
1206 op->ops.generic = (op_func)sparc_emit_node;
1210 * Enters the emitter functions for handled nodes into the generic
1211 * pointer of an opcode.
1213 static void sparc_register_emitters(void)
1215 /* first clear the generic function pointer for all ops */
1216 clear_irp_opcodes_generic_func();
1217 /* register all emitter functions defined in spec */
1218 sparc_register_spec_emitters();
1220 /* custom emitter */
1221 set_emitter(op_be_Copy, emit_be_Copy);
1222 set_emitter(op_be_CopyKeep, emit_be_Copy);
1223 set_emitter(op_be_IncSP, emit_be_IncSP);
1224 set_emitter(op_be_MemPerm, emit_be_MemPerm);
1225 set_emitter(op_be_Perm, emit_be_Perm);
1226 set_emitter(op_sparc_Ba, emit_sparc_Ba);
1227 set_emitter(op_sparc_Bicc, emit_sparc_Bicc);
1228 set_emitter(op_sparc_Call, emit_sparc_Call);
1229 set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc);
1230 set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr);
1231 set_emitter(op_sparc_SMulh, emit_sparc_Mulh);
1232 set_emitter(op_sparc_UMulh, emit_sparc_Mulh);
1233 set_emitter(op_sparc_Restore, emit_sparc_Restore);
1234 set_emitter(op_sparc_Return, emit_sparc_Return);
1235 set_emitter(op_sparc_SDiv, emit_sparc_SDiv);
1236 set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp);
1237 set_emitter(op_sparc_UDiv, emit_sparc_UDiv);
1239 /* no need to emit anything for the following nodes */
1240 set_emitter(op_be_Keep, emit_nothing);
1241 set_emitter(op_sparc_Start, emit_nothing);
1242 set_emitter(op_Phi, emit_nothing);
1246 * Emits code for a node.
1248 static void sparc_emit_node(const ir_node *node)
1250 ir_op *op = get_irn_op(node);
1252 if (op->ops.generic) {
1253 emit_func func = (emit_func) op->ops.generic;
1254 be_dbg_set_dbg_info(get_irn_dbg_info(node));
1257 panic("No emit handler for node %+F (graph %+F)\n", node,
1262 static ir_node *find_next_delay_slot(ir_node *from)
1264 ir_node *schedpoint = from;
1265 while (!has_delay_slot(schedpoint)) {
1266 if (!sched_has_next(schedpoint))
1268 schedpoint = sched_next(schedpoint);
1273 static bool block_needs_label(const ir_node *block, const ir_node *sched_prev)
1277 if (has_Block_entity(block))
1280 n_cfgpreds = get_Block_n_cfgpreds(block);
1281 if (n_cfgpreds == 0) {
1283 } else if (n_cfgpreds > 1) {
1286 ir_node *cfgpred = get_Block_cfgpred(block, 0);
1287 ir_node *cfgpred_block = get_nodes_block(cfgpred);
1288 if (is_Proj(cfgpred) && is_sparc_SwitchJmp(get_Proj_pred(cfgpred)))
1290 return sched_prev != cfgpred_block || get_irn_link(cfgpred) != block;
1295 * Walks over the nodes in a block connected by scheduling edges
1296 * and emits code for each node.
1298 static void sparc_emit_block(ir_node *block, ir_node *prev)
1301 ir_node *next_delay_slot;
1303 assert(is_Block(block));
1305 if (block_needs_label(block, prev)) {
1306 be_gas_emit_block_name(block);
1307 be_emit_cstring(":\n");
1308 be_emit_write_line();
1311 next_delay_slot = find_next_delay_slot(sched_first(block));
1312 if (next_delay_slot != NULL)
1313 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1315 sched_foreach(block, node) {
1316 if (node == delay_slot_filler) {
1320 sparc_emit_node(node);
1322 if (node == next_delay_slot) {
1323 assert(delay_slot_filler == NULL);
1324 next_delay_slot = find_next_delay_slot(sched_next(node));
1325 if (next_delay_slot != NULL)
1326 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1332 * Emits code for function start.
1334 static void sparc_emit_func_prolog(ir_graph *irg)
1336 ir_entity *ent = get_irg_entity(irg);
1337 be_gas_emit_function_prolog(ent, 4);
1338 be_emit_write_line();
1342 * Emits code for function end
1344 static void sparc_emit_func_epilog(ir_graph *irg)
1346 ir_entity *ent = get_irg_entity(irg);
1347 const char *irg_name = get_entity_ld_name(ent);
1348 be_emit_write_line();
1349 be_emit_irprintf("\t.size %s, .-%s\n", irg_name, irg_name);
1350 be_emit_cstring("# -- End ");
1351 be_emit_string(irg_name);
1352 be_emit_cstring("\n");
1353 be_emit_write_line();
1356 static void sparc_gen_labels(ir_node *block, void *env)
1359 int n = get_Block_n_cfgpreds(block);
1362 for (n--; n >= 0; n--) {
1363 pred = get_Block_cfgpred(block, n);
1364 set_irn_link(pred, block); // link the pred of a block (which is a jmp)
1368 void sparc_emit_routine(ir_graph *irg)
1370 ir_entity *entity = get_irg_entity(irg);
1371 ir_node **block_schedule;
1375 heights = heights_new(irg);
1377 /* register all emitter functions */
1378 sparc_register_emitters();
1379 be_dbg_method_begin(entity);
1381 /* create the block schedule. For now, we don't need it earlier. */
1382 block_schedule = be_create_block_schedule(irg);
1384 sparc_emit_func_prolog(irg);
1385 irg_block_walk_graph(irg, sparc_gen_labels, NULL, NULL);
1387 /* inject block scheduling links & emit code of each block */
1388 n = ARR_LEN(block_schedule);
1389 for (i = 0; i < n; ++i) {
1390 ir_node *block = block_schedule[i];
1391 ir_node *next_block = i+1 < n ? block_schedule[i+1] : NULL;
1392 set_irn_link(block, next_block);
1395 for (i = 0; i < n; ++i) {
1396 ir_node *block = block_schedule[i];
1397 ir_node *prev = i>=1 ? block_schedule[i-1] : NULL;
1398 if (block == get_irg_end_block(irg))
1400 sparc_emit_block(block, prev);
1403 /* emit function epilog */
1404 sparc_emit_func_epilog(irg);
1406 heights_free(heights);
1409 void sparc_init_emitter(void)
1411 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.emit");