2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
23 * @author Hannes Rapp, Matthias Braun
30 #include "bitfiddle.h"
42 #include "raw_bitset.h"
46 #include "../besched.h"
47 #include "../beblocksched.h"
49 #include "../begnuas.h"
50 #include "../be_dbgout.h"
51 #include "../benode.h"
52 #include "../bestack.h"
54 #include "sparc_emitter.h"
55 #include "gen_sparc_emitter.h"
56 #include "sparc_nodes_attr.h"
57 #include "sparc_new_nodes.h"
58 #include "gen_sparc_regalloc_if.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 static ir_heights_t *heights;
63 static const ir_node *delay_slot_filler; /**< this node has been choosen to fill
64 the next delay slot */
66 static void sparc_emit_node(const ir_node *node);
68 void sparc_emit_immediate(const ir_node *node)
70 const sparc_attr_t *attr = get_sparc_attr_const(node);
71 ir_entity *entity = attr->immediate_value_entity;
74 int32_t value = attr->immediate_value;
75 assert(sparc_is_value_imm_encodeable(value));
76 be_emit_irprintf("%d", value);
78 if (get_entity_owner(entity) == get_tls_type()) {
79 be_emit_cstring("%tle_lox10(");
81 be_emit_cstring("%lo(");
83 be_gas_emit_entity(entity);
84 if (attr->immediate_value != 0) {
85 be_emit_irprintf("%+d", attr->immediate_value);
91 void sparc_emit_high_immediate(const ir_node *node)
93 const sparc_attr_t *attr = get_sparc_attr_const(node);
94 ir_entity *entity = attr->immediate_value_entity;
97 uint32_t value = (uint32_t) attr->immediate_value;
98 be_emit_irprintf("%%hi(0x%X)", value);
100 if (get_entity_owner(entity) == get_tls_type()) {
101 be_emit_cstring("%tle_hix22(");
103 be_emit_cstring("%hi(");
105 be_gas_emit_entity(entity);
106 if (attr->immediate_value != 0) {
107 be_emit_irprintf("%+d", attr->immediate_value);
113 void sparc_emit_source_register(const ir_node *node, int pos)
115 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
117 be_emit_string(arch_register_get_name(reg));
120 void sparc_emit_dest_register(const ir_node *node, int pos)
122 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
124 be_emit_string(arch_register_get_name(reg));
128 * Emits either a imm or register depending on arity of node
130 * @param register no (-1 if no register)
132 void sparc_emit_reg_or_imm(const ir_node *node, int pos)
134 if (arch_get_irn_flags(node) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
135 // we have a imm input
136 sparc_emit_immediate(node);
139 sparc_emit_source_register(node, pos);
146 void sparc_emit_offset(const ir_node *node, int offset_node_pos)
148 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
150 if (attr->is_reg_reg) {
151 assert(!attr->is_frame_entity);
152 assert(attr->base.immediate_value == 0);
153 assert(attr->base.immediate_value_entity == NULL);
155 sparc_emit_source_register(node, offset_node_pos);
156 } else if (attr->is_frame_entity) {
157 int32_t offset = attr->base.immediate_value;
159 assert(sparc_is_value_imm_encodeable(offset));
160 be_emit_irprintf("%+ld", offset);
162 } else if (attr->base.immediate_value != 0
163 || attr->base.immediate_value_entity != NULL) {
165 sparc_emit_immediate(node);
169 void sparc_emit_float_load_store_mode(const ir_node *node)
171 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
172 ir_mode *mode = attr->load_store_mode;
173 int bits = get_mode_size_bits(mode);
175 assert(mode_is_float(mode));
179 case 64: be_emit_char('d'); return;
180 case 128: be_emit_char('q'); return;
182 panic("invalid flaot load/store mode %+F", mode);
186 * Emit load mode char
188 void sparc_emit_load_mode(const ir_node *node)
190 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
191 ir_mode *mode = attr->load_store_mode;
192 int bits = get_mode_size_bits(mode);
193 bool is_signed = mode_is_signed(mode);
196 be_emit_string(is_signed ? "sh" : "uh");
197 } else if (bits == 8) {
198 be_emit_string(is_signed ? "sb" : "ub");
199 } else if (bits == 64) {
207 * Emit store mode char
209 void sparc_emit_store_mode(const ir_node *node)
211 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
212 ir_mode *mode = attr->load_store_mode;
213 int bits = get_mode_size_bits(mode);
217 } else if (bits == 8) {
219 } else if (bits == 64) {
226 static void emit_fp_suffix(const ir_mode *mode)
228 unsigned bits = get_mode_size_bits(mode);
229 assert(mode_is_float(mode));
233 } else if (bits == 64) {
235 } else if (bits == 128) {
238 panic("invalid FP mode");
242 void sparc_emit_fp_conv_source(const ir_node *node)
244 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
245 emit_fp_suffix(attr->src_mode);
248 void sparc_emit_fp_conv_destination(const ir_node *node)
250 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
251 emit_fp_suffix(attr->dest_mode);
255 * emits the FP mode suffix char
257 void sparc_emit_fp_mode_suffix(const ir_node *node)
259 const sparc_fp_attr_t *attr = get_sparc_fp_attr_const(node);
260 emit_fp_suffix(attr->fp_mode);
263 static ir_node *get_jump_target(const ir_node *jump)
265 return (ir_node*)get_irn_link(jump);
269 * Returns the target label for a control flow node.
271 static void sparc_emit_cfop_target(const ir_node *node)
273 ir_node *block = get_jump_target(node);
274 be_gas_emit_block_name(block);
277 static int get_sparc_Call_dest_addr_pos(const ir_node *node)
279 return get_irn_arity(node)-1;
282 static bool ba_is_fallthrough(const ir_node *node)
284 ir_node *block = get_nodes_block(node);
285 ir_node *next_block = (ir_node*)get_irn_link(block);
286 return get_irn_link(node) == next_block;
289 static bool is_no_instruction(const ir_node *node)
291 /* copies are nops if src_reg == dest_reg */
292 if (be_is_Copy(node) || be_is_CopyKeep(node)) {
293 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
294 const arch_register_t *dest_reg = arch_get_irn_register_out(node, 0);
296 if (src_reg == dest_reg)
299 if (be_is_IncSP(node) && be_get_IncSP_offset(node) == 0)
301 /* Ba is not emitted if it is a simple fallthrough */
302 if (is_sparc_Ba(node) && ba_is_fallthrough(node))
305 return be_is_Keep(node) || be_is_Start(node) || is_Phi(node);
308 static bool has_delay_slot(const ir_node *node)
310 if (is_sparc_Ba(node) && ba_is_fallthrough(node))
313 return is_sparc_Bicc(node) || is_sparc_fbfcc(node) || is_sparc_Ba(node)
314 || is_sparc_SwitchJmp(node) || is_sparc_Call(node)
315 || is_sparc_SDiv(node) || is_sparc_UDiv(node)
316 || is_sparc_Return(node);
319 /** returns true if the emitter for this sparc node can produce more than one
320 * actual sparc instruction.
321 * Usually it is a bad sign if we have to add instructions here. We should
322 * rather try to get them lowered down. So we can actually put them into
323 * delay slots and make them more accessible to the scheduler.
325 static bool emits_multiple_instructions(const ir_node *node)
327 if (has_delay_slot(node))
330 if (is_sparc_Call(node)) {
331 return arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return;
334 return is_sparc_SMulh(node) || is_sparc_UMulh(node)
335 || is_sparc_SDiv(node) || is_sparc_UDiv(node)
336 || be_is_MemPerm(node) || be_is_Perm(node);
340 * search for an instruction that can fill the delay slot of @p node
342 static const ir_node *pick_delay_slot_for(const ir_node *node)
344 const ir_node *check = node;
345 const ir_node *schedpoint = node;
347 /* currently we don't track which registers are still alive, so we can't
348 * pick any other instructions other than the one directly preceding */
349 static const unsigned PICK_DELAY_SLOT_MAX_DISTANCE = 1;
351 assert(has_delay_slot(node));
353 if (is_sparc_Call(node)) {
354 const sparc_attr_t *attr = get_sparc_attr_const(node);
355 ir_entity *entity = attr->immediate_value_entity;
356 if (entity != NULL) {
357 check = NULL; /* pick any instruction, dependencies on Call
360 /* we only need to check the value for the call destination */
361 check = get_irn_n(node, get_sparc_Call_dest_addr_pos(node));
364 /* the Call also destroys the value of %o7, but since this is currently
365 * marked as ignore register in the backend, it should never be used by
366 * the instruction in the delay slot. */
367 } else if (is_sparc_Return(node)) {
368 /* we only have to check the jump destination value */
369 int arity = get_irn_arity(node);
373 for (i = 0; i < arity; ++i) {
374 ir_node *in = get_irn_n(node, i);
375 const arch_register_t *reg = arch_get_irn_register(in);
376 if (reg == &sparc_registers[REG_O7]) {
377 check = skip_Proj(in);
385 while (sched_has_prev(schedpoint)) {
386 schedpoint = sched_prev(schedpoint);
388 if (has_delay_slot(schedpoint))
391 /* skip things which don't really result in instructions */
392 if (is_no_instruction(schedpoint))
395 if (tries++ >= PICK_DELAY_SLOT_MAX_DISTANCE)
398 if (emits_multiple_instructions(schedpoint))
401 /* if check and schedpoint are not in the same block, give up. */
403 && get_nodes_block(check) != get_nodes_block(schedpoint))
406 /* allowed for delayslot: any instruction which is not necessary to
407 * compute an input to the branch. */
409 && heights_reachable_in_block(heights, check, schedpoint))
412 /* found something */
420 * Emits code for stack space management
422 static void emit_be_IncSP(const ir_node *irn)
424 int offset = be_get_IncSP_offset(irn);
429 /* SPARC stack grows downwards */
431 be_emit_cstring("\tsub ");
434 be_emit_cstring("\tadd ");
437 sparc_emit_source_register(irn, 0);
438 be_emit_irprintf(", %d", -offset);
439 be_emit_cstring(", ");
440 sparc_emit_dest_register(irn, 0);
441 be_emit_finish_line_gas(irn);
445 * emits code for mulh
447 static void emit_sparc_Mulh(const ir_node *irn)
449 be_emit_cstring("\t");
450 if (is_sparc_UMulh(irn)) {
453 assert(is_sparc_SMulh(irn));
456 be_emit_cstring("mul ");
458 sparc_emit_source_register(irn, 0);
459 be_emit_cstring(", ");
460 sparc_emit_reg_or_imm(irn, 1);
461 be_emit_cstring(", ");
462 sparc_emit_dest_register(irn, 0);
463 be_emit_finish_line_gas(irn);
465 // our result is in the y register now
466 // we just copy it to the assigned target reg
467 be_emit_cstring("\tmov %y, ");
468 sparc_emit_dest_register(irn, 0);
469 be_emit_finish_line_gas(irn);
472 static void fill_delay_slot(void)
474 if (delay_slot_filler != NULL) {
475 sparc_emit_node(delay_slot_filler);
476 delay_slot_filler = NULL;
478 be_emit_cstring("\tnop\n");
479 be_emit_write_line();
483 static void emit_sparc_Div(const ir_node *node, bool is_signed)
485 /* can we get the delay count of the wr instruction somewhere? */
486 unsigned wry_delay_count = 3;
489 be_emit_cstring("\twr ");
490 sparc_emit_source_register(node, 0);
491 be_emit_cstring(", 0, %y");
492 be_emit_finish_line_gas(node);
494 for (i = 0; i < wry_delay_count; ++i) {
498 be_emit_irprintf("\t%s ", is_signed ? "sdiv" : "udiv");
499 sparc_emit_source_register(node, 1);
500 be_emit_cstring(", ");
501 sparc_emit_reg_or_imm(node, 2);
502 be_emit_cstring(", ");
503 sparc_emit_dest_register(node, 0);
504 be_emit_finish_line_gas(node);
507 static void emit_sparc_SDiv(const ir_node *node)
509 emit_sparc_Div(node, true);
512 static void emit_sparc_UDiv(const ir_node *node)
514 emit_sparc_Div(node, false);
518 * Emits code for Call node
520 static void emit_sparc_Call(const ir_node *node)
522 const sparc_attr_t *attr = get_sparc_attr_const(node);
523 ir_entity *entity = attr->immediate_value_entity;
525 be_emit_cstring("\tcall ");
526 if (entity != NULL) {
527 be_gas_emit_entity(entity);
528 if (attr->immediate_value != 0) {
529 be_emit_irprintf("%+d", attr->immediate_value);
531 be_emit_cstring(", 0");
533 int dest_addr = get_sparc_Call_dest_addr_pos(node);
534 sparc_emit_source_register(node, dest_addr);
536 be_emit_finish_line_gas(node);
540 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return) {
541 be_emit_cstring("\tunimp 8\n");
542 be_emit_write_line();
547 * Emit code for Perm node
549 static void emit_be_Perm(const ir_node *irn)
551 be_emit_cstring("\txor ");
552 sparc_emit_source_register(irn, 1);
553 be_emit_cstring(", ");
554 sparc_emit_source_register(irn, 0);
555 be_emit_cstring(", ");
556 sparc_emit_source_register(irn, 0);
557 be_emit_finish_line_gas(NULL);
559 be_emit_cstring("\txor ");
560 sparc_emit_source_register(irn, 1);
561 be_emit_cstring(", ");
562 sparc_emit_source_register(irn, 0);
563 be_emit_cstring(", ");
564 sparc_emit_source_register(irn, 1);
565 be_emit_finish_line_gas(NULL);
567 be_emit_cstring("\txor ");
568 sparc_emit_source_register(irn, 1);
569 be_emit_cstring(", ");
570 sparc_emit_source_register(irn, 0);
571 be_emit_cstring(", ");
572 sparc_emit_source_register(irn, 0);
573 be_emit_finish_line_gas(irn);
576 /* The stack pointer must always be SPARC_STACK_ALIGNMENT bytes aligned, so get
577 * the next bigger integer that's evenly divisible by it. */
578 static unsigned get_aligned_sp_change(const unsigned num_regs)
580 const unsigned bytes = num_regs * SPARC_REGISTER_SIZE;
581 return round_up2(bytes, SPARC_STACK_ALIGNMENT);
584 /* Spill register l0 or both l0 and l1, depending on n_spilled and n_to_spill.*/
585 static void memperm_emit_spill_registers(const ir_node *node, int n_spilled,
588 assert(n_spilled < n_to_spill);
590 if (n_spilled == 0) {
591 /* We always reserve stack space for two registers because during copy
592 * processing we don't know yet if we also need to handle a cycle which
593 * needs two registers. More complicated code in emit_MemPerm would
594 * prevent wasting SPARC_REGISTER_SIZE bytes of stack space but
595 * it is not worth the worse readability of emit_MemPerm. */
597 /* Keep stack pointer aligned. */
598 unsigned sp_change = get_aligned_sp_change(2);
599 be_emit_irprintf("\tsub %%sp, %u, %%sp", sp_change);
600 be_emit_finish_line_gas(node);
602 /* Spill register l0. */
603 be_emit_irprintf("\tst %%l0, [%%sp%+d]", SPARC_MIN_STACKSIZE);
604 be_emit_finish_line_gas(node);
607 if (n_to_spill == 2) {
608 /* Spill register l1. */
609 be_emit_irprintf("\tst %%l1, [%%sp%+d]", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
610 be_emit_finish_line_gas(node);
614 /* Restore register l0 or both l0 and l1, depending on n_spilled. */
615 static void memperm_emit_restore_registers(const ir_node *node, int n_spilled)
619 if (n_spilled == 2) {
620 /* Restore register l1. */
621 be_emit_irprintf("\tld [%%sp%+d], %%l1", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
622 be_emit_finish_line_gas(node);
625 /* Restore register l0. */
626 be_emit_irprintf("\tld [%%sp%+d], %%l0", SPARC_MIN_STACKSIZE);
627 be_emit_finish_line_gas(node);
629 /* Restore stack pointer. */
630 sp_change = get_aligned_sp_change(2);
631 be_emit_irprintf("\tadd %%sp, %u, %%sp", sp_change);
632 be_emit_finish_line_gas(node);
635 /* Emit code to copy in_ent to out_ent. Only uses l0. */
636 static void memperm_emit_copy(const ir_node *node, ir_entity *in_ent,
639 ir_graph *irg = get_irn_irg(node);
640 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
641 int off_in = be_get_stack_entity_offset(layout, in_ent, 0);
642 int off_out = be_get_stack_entity_offset(layout, out_ent, 0);
644 /* Load from input entity. */
645 be_emit_irprintf("\tld [%%fp%+d], %%l0", off_in);
646 be_emit_finish_line_gas(node);
648 /* Store to output entity. */
649 be_emit_irprintf("\tst %%l0, [%%fp%+d]", off_out);
650 be_emit_finish_line_gas(node);
653 /* Emit code to swap ent1 and ent2. Uses l0 and l1. */
654 static void memperm_emit_swap(const ir_node *node, ir_entity *ent1,
657 ir_graph *irg = get_irn_irg(node);
658 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
659 int off1 = be_get_stack_entity_offset(layout, ent1, 0);
660 int off2 = be_get_stack_entity_offset(layout, ent2, 0);
662 /* Load from first input entity. */
663 be_emit_irprintf("\tld [%%fp%+d], %%l0", off1);
664 be_emit_finish_line_gas(node);
666 /* Load from second input entity. */
667 be_emit_irprintf("\tld [%%fp%+d], %%l1", off2);
668 be_emit_finish_line_gas(node);
670 /* Store first value to second output entity. */
671 be_emit_irprintf("\tst %%l0, [%%fp%+d]", off2);
672 be_emit_finish_line_gas(node);
674 /* Store second value to first output entity. */
675 be_emit_irprintf("\tst %%l1, [%%fp%+d]", off1);
676 be_emit_finish_line_gas(node);
679 /* Find the index of ent in ents or return -1 if not found. */
680 static int get_index(ir_entity **ents, int n, ir_entity *ent)
684 for (i = 0; i < n; ++i)
692 * Emit code for a MemPerm node.
694 * Analyze MemPerm for copy chains and cyclic swaps and resolve them using
696 * This function is conceptually very similar to permute_values in
699 static void emit_be_MemPerm(const ir_node *node)
701 int memperm_arity = be_get_MemPerm_entity_arity(node);
702 /* Upper limit for the number of participating entities is twice the
703 * arity, e.g., for a simple copying MemPerm node with one input/output. */
704 int max_size = 2 * memperm_arity;
705 ir_entity **entities = ALLOCANZ(ir_entity *, max_size);
706 /* sourceof contains the input entity for each entity. If an entity is
707 * never used as an output, its entry in sourceof is a fix point. */
708 int *sourceof = ALLOCANZ(int, max_size);
709 /* n_users counts how many output entities use this entity as their input.*/
710 int *n_users = ALLOCANZ(int, max_size);
711 /* n_spilled records the number of spilled registers, either 1 or 2. */
715 for (i = 0; i < max_size; ++i) {
719 for (i = n = 0; i < memperm_arity; ++i) {
720 ir_entity *out = be_get_MemPerm_out_entity(node, i);
721 ir_entity *in = be_get_MemPerm_in_entity(node, i);
722 int oidx; /* Out index */
723 int iidx; /* In index */
725 /* Insert into entities to be able to operate on unique indices. */
726 if (get_index(entities, n, out) == -1)
728 if (get_index(entities, n, in) == -1)
731 oidx = get_index(entities, n, out);
732 iidx = get_index(entities, n, in);
734 sourceof[oidx] = iidx; /* Remember the source. */
735 ++n_users[iidx]; /* Increment number of users of this entity. */
738 /* First do all the copies. */
739 for (oidx = 0; oidx < n; /* empty */) {
740 int iidx = sourceof[oidx];
742 /* Nothing to do for fix points.
743 * Also, if entities[oidx] is used as an input by another copy, we
744 * can't overwrite entities[oidx] yet.*/
745 if (iidx == oidx || n_users[oidx] > 0) {
750 /* We found the end of a 'chain', so do the copy. */
751 if (n_spilled == 0) {
752 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/1);
755 memperm_emit_copy(node, entities[iidx], entities[oidx]);
758 sourceof[oidx] = oidx;
760 assert(n_users[iidx] > 0);
761 /* Decrementing the number of users might enable us to do another
765 if (iidx < oidx && n_users[iidx] == 0) {
772 /* The rest are cycles. */
773 for (oidx = 0; oidx < n; /* empty */) {
774 int iidx = sourceof[oidx];
777 /* Nothing to do for fix points. */
783 assert(n_users[iidx] == 1);
785 /* Swap the two values to resolve the cycle. */
787 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/2);
790 memperm_emit_swap(node, entities[iidx], entities[oidx]);
792 tidx = sourceof[iidx];
794 sourceof[iidx] = iidx;
796 /* The source of oidx is now the old source of iidx, because we swapped
797 * the two entities. */
798 sourceof[oidx] = tidx;
802 /* Only fix points should remain. */
803 for (i = 0; i < max_size; ++i) {
804 assert(sourceof[i] == i);
808 assert(n_spilled > 0 && "Useless MemPerm node");
810 memperm_emit_restore_registers(node, n_spilled);
813 static void emit_sparc_Return(const ir_node *node)
815 ir_graph *irg = get_irn_irg(node);
816 ir_entity *entity = get_irg_entity(irg);
817 ir_type *type = get_entity_type(entity);
819 const char *destreg = "%o7";
821 /* hack: we don't explicitely model register changes because of the
822 * restore node. So we have to do it manually here */
823 if (delay_slot_filler != NULL &&
824 (is_sparc_Restore(delay_slot_filler)
825 || is_sparc_RestoreZero(delay_slot_filler))) {
828 be_emit_cstring("\tjmp ");
829 be_emit_string(destreg);
830 if (get_method_calling_convention(type) & cc_compound_ret) {
831 be_emit_cstring("+12");
833 be_emit_cstring("+8");
835 be_emit_finish_line_gas(node);
839 static void emit_sparc_FrameAddr(const ir_node *node)
841 const sparc_attr_t *attr = get_sparc_attr_const(node);
842 int32_t offset = attr->immediate_value;
845 be_emit_cstring("\tadd ");
846 sparc_emit_source_register(node, 0);
847 be_emit_cstring(", ");
848 assert(sparc_is_value_imm_encodeable(offset));
849 be_emit_irprintf("%ld", offset);
851 be_emit_cstring("\tsub ");
852 sparc_emit_source_register(node, 0);
853 be_emit_cstring(", ");
854 assert(sparc_is_value_imm_encodeable(-offset));
855 be_emit_irprintf("%ld", -offset);
858 be_emit_cstring(", ");
859 sparc_emit_dest_register(node, 0);
860 be_emit_finish_line_gas(node);
863 static const char *get_icc_unsigned(ir_relation relation)
865 switch (relation & (ir_relation_less_equal_greater)) {
866 case ir_relation_false: return "bn";
867 case ir_relation_equal: return "be";
868 case ir_relation_less: return "blu";
869 case ir_relation_less_equal: return "bleu";
870 case ir_relation_greater: return "bgu";
871 case ir_relation_greater_equal: return "bgeu";
872 case ir_relation_less_greater: return "bne";
873 case ir_relation_less_equal_greater: return "ba";
874 default: panic("Cmp has unsupported relation");
878 static const char *get_icc_signed(ir_relation relation)
880 switch (relation & (ir_relation_less_equal_greater)) {
881 case ir_relation_false: return "bn";
882 case ir_relation_equal: return "be";
883 case ir_relation_less: return "bl";
884 case ir_relation_less_equal: return "ble";
885 case ir_relation_greater: return "bg";
886 case ir_relation_greater_equal: return "bge";
887 case ir_relation_less_greater: return "bne";
888 case ir_relation_less_equal_greater: return "ba";
889 default: panic("Cmp has unsupported relation");
893 static const char *get_fcc(ir_relation relation)
896 case ir_relation_false: return "fbn";
897 case ir_relation_equal: return "fbe";
898 case ir_relation_less: return "fbl";
899 case ir_relation_less_equal: return "fble";
900 case ir_relation_greater: return "fbg";
901 case ir_relation_greater_equal: return "fbge";
902 case ir_relation_less_greater: return "fblg";
903 case ir_relation_less_equal_greater: return "fbo";
904 case ir_relation_unordered: return "fbu";
905 case ir_relation_unordered_equal: return "fbue";
906 case ir_relation_unordered_less: return "fbul";
907 case ir_relation_unordered_less_equal: return "fbule";
908 case ir_relation_unordered_greater: return "fbug";
909 case ir_relation_unordered_greater_equal: return "fbuge";
910 case ir_relation_unordered_less_greater: return "fbne";
911 case ir_relation_true: return "fba";
913 panic("invalid relation");
916 typedef const char* (*get_cc_func)(ir_relation relation);
918 static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
920 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
921 ir_relation relation = attr->relation;
922 const ir_node *proj_true = NULL;
923 const ir_node *proj_false = NULL;
924 const ir_edge_t *edge;
925 const ir_node *block;
926 const ir_node *next_block;
928 foreach_out_edge(node, edge) {
929 ir_node *proj = get_edge_src_irn(edge);
930 long nr = get_Proj_proj(proj);
931 if (nr == pn_Cond_true) {
938 /* for now, the code works for scheduled and non-schedules blocks */
939 block = get_nodes_block(node);
941 /* we have a block schedule */
942 next_block = (ir_node*)get_irn_link(block);
944 if (get_irn_link(proj_true) == next_block) {
945 /* exchange both proj's so the second one can be omitted */
946 const ir_node *t = proj_true;
948 proj_true = proj_false;
950 relation = get_negated_relation(relation);
953 /* emit the true proj */
954 be_emit_cstring("\t");
955 be_emit_string(get_cc(relation));
957 sparc_emit_cfop_target(proj_true);
958 be_emit_finish_line_gas(proj_true);
962 if (get_irn_link(proj_false) == next_block) {
963 be_emit_cstring("\t/* fallthrough to ");
964 sparc_emit_cfop_target(proj_false);
965 be_emit_cstring(" */");
966 be_emit_finish_line_gas(proj_false);
968 be_emit_cstring("\tba ");
969 sparc_emit_cfop_target(proj_false);
970 be_emit_finish_line_gas(proj_false);
975 static void emit_sparc_Bicc(const ir_node *node)
977 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
978 bool is_unsigned = attr->is_unsigned;
979 emit_sparc_branch(node, is_unsigned ? get_icc_unsigned : get_icc_signed);
982 static void emit_sparc_fbfcc(const ir_node *node)
984 /* if the flags producing node was immediately in front of us, emit
986 ir_node *flags = get_irn_n(node, n_sparc_fbfcc_flags);
987 ir_node *prev = sched_prev(node);
988 if (is_Block(prev)) {
989 /* TODO: when the flags come from another block, then we have to do
990 * more complicated tests to see wether the flag producing node is
991 * potentially in front of us (could happen for fallthroughs) */
992 panic("TODO: fbfcc flags come from other block");
994 if (skip_Proj(flags) == prev) {
995 be_emit_cstring("\tnop\n");
997 emit_sparc_branch(node, get_fcc);
1000 static void emit_sparc_Ba(const ir_node *node)
1002 if (ba_is_fallthrough(node)) {
1003 be_emit_cstring("\t/* fallthrough to ");
1004 sparc_emit_cfop_target(node);
1005 be_emit_cstring(" */");
1006 be_emit_finish_line_gas(node);
1008 be_emit_cstring("\tba ");
1009 sparc_emit_cfop_target(node);
1010 be_emit_finish_line_gas(node);
1015 static void emit_sparc_SwitchJmp(const ir_node *node)
1017 const sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr_const(node);
1019 be_emit_cstring("\tjmp ");
1020 sparc_emit_source_register(node, 0);
1021 be_emit_finish_line_gas(node);
1024 emit_jump_table(node, attr->default_proj_num, attr->jump_table,
1028 static void emit_fmov(const ir_node *node, const arch_register_t *src_reg,
1029 const arch_register_t *dst_reg)
1031 be_emit_cstring("\tfmovs %");
1032 be_emit_string(arch_register_get_name(src_reg));
1033 be_emit_cstring(", %");
1034 be_emit_string(arch_register_get_name(dst_reg));
1035 be_emit_finish_line_gas(node);
1038 static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
1040 unsigned idx = reg->global_index;
1041 assert(reg == &sparc_registers[idx]);
1043 assert(idx - REG_F0 < N_sparc_fp_REGS);
1044 return &sparc_registers[idx];
1047 static void emit_be_Copy(const ir_node *node)
1049 ir_mode *mode = get_irn_mode(node);
1050 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
1051 const arch_register_t *dst_reg = arch_get_irn_register_out(node, 0);
1053 if (src_reg == dst_reg)
1056 if (mode_is_float(mode)) {
1057 unsigned bits = get_mode_size_bits(mode);
1058 int n = bits > 32 ? bits > 64 ? 3 : 1 : 0;
1060 emit_fmov(node, src_reg, dst_reg);
1061 for (i = 0; i < n; ++i) {
1062 src_reg = get_next_fp_reg(src_reg);
1063 dst_reg = get_next_fp_reg(dst_reg);
1064 emit_fmov(node, src_reg, dst_reg);
1066 } else if (mode_is_data(mode)) {
1067 be_emit_cstring("\tmov ");
1068 sparc_emit_source_register(node, 0);
1069 be_emit_cstring(", ");
1070 sparc_emit_dest_register(node, 0);
1071 be_emit_finish_line_gas(node);
1073 panic("emit_be_Copy: invalid mode");
1077 static void emit_nothing(const ir_node *irn)
1082 typedef void (*emit_func) (const ir_node *);
1084 static inline void set_emitter(ir_op *op, emit_func sparc_emit_node)
1086 op->ops.generic = (op_func)sparc_emit_node;
1090 * Enters the emitter functions for handled nodes into the generic
1091 * pointer of an opcode.
1093 static void sparc_register_emitters(void)
1095 /* first clear the generic function pointer for all ops */
1096 clear_irp_opcodes_generic_func();
1097 /* register all emitter functions defined in spec */
1098 sparc_register_spec_emitters();
1100 /* custom emitter */
1101 set_emitter(op_be_Copy, emit_be_Copy);
1102 set_emitter(op_be_CopyKeep, emit_be_Copy);
1103 set_emitter(op_be_IncSP, emit_be_IncSP);
1104 set_emitter(op_be_MemPerm, emit_be_MemPerm);
1105 set_emitter(op_be_Perm, emit_be_Perm);
1106 set_emitter(op_sparc_Ba, emit_sparc_Ba);
1107 set_emitter(op_sparc_Bicc, emit_sparc_Bicc);
1108 set_emitter(op_sparc_Call, emit_sparc_Call);
1109 set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc);
1110 set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr);
1111 set_emitter(op_sparc_SMulh, emit_sparc_Mulh);
1112 set_emitter(op_sparc_UMulh, emit_sparc_Mulh);
1113 set_emitter(op_sparc_Return, emit_sparc_Return);
1114 set_emitter(op_sparc_SDiv, emit_sparc_SDiv);
1115 set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp);
1116 set_emitter(op_sparc_UDiv, emit_sparc_UDiv);
1118 /* no need to emit anything for the following nodes */
1119 set_emitter(op_be_Keep, emit_nothing);
1120 set_emitter(op_sparc_Start, emit_nothing);
1121 set_emitter(op_Phi, emit_nothing);
1125 * Emits code for a node.
1127 static void sparc_emit_node(const ir_node *node)
1129 ir_op *op = get_irn_op(node);
1131 if (op->ops.generic) {
1132 emit_func func = (emit_func) op->ops.generic;
1133 be_dbg_set_dbg_info(get_irn_dbg_info(node));
1136 panic("No emit handler for node %+F (graph %+F)\n", node,
1141 static ir_node *find_next_delay_slot(ir_node *from)
1143 ir_node *schedpoint = from;
1144 while (!has_delay_slot(schedpoint)) {
1145 if (!sched_has_next(schedpoint))
1147 schedpoint = sched_next(schedpoint);
1152 static bool block_needs_label(const ir_node *block, const ir_node *sched_prev)
1156 if (has_Block_entity(block))
1159 n_cfgpreds = get_Block_n_cfgpreds(block);
1160 if (n_cfgpreds == 0) {
1162 } else if (n_cfgpreds > 1) {
1165 ir_node *cfgpred = get_Block_cfgpred(block, 0);
1166 ir_node *cfgpred_block = get_nodes_block(cfgpred);
1167 if (is_Proj(cfgpred) && is_sparc_SwitchJmp(get_Proj_pred(cfgpred)))
1169 return sched_prev != cfgpred_block || get_irn_link(cfgpred) != block;
1174 * Walks over the nodes in a block connected by scheduling edges
1175 * and emits code for each node.
1177 static void sparc_emit_block(ir_node *block, ir_node *prev)
1180 ir_node *next_delay_slot;
1182 assert(is_Block(block));
1184 if (block_needs_label(block, prev)) {
1185 be_gas_emit_block_name(block);
1186 be_emit_cstring(":\n");
1187 be_emit_write_line();
1190 next_delay_slot = find_next_delay_slot(sched_first(block));
1191 if (next_delay_slot != NULL)
1192 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1194 sched_foreach(block, node) {
1195 if (node == delay_slot_filler) {
1199 sparc_emit_node(node);
1201 if (node == next_delay_slot) {
1202 assert(delay_slot_filler == NULL);
1203 next_delay_slot = find_next_delay_slot(sched_next(node));
1204 if (next_delay_slot != NULL)
1205 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1211 * Emits code for function start.
1213 static void sparc_emit_func_prolog(ir_graph *irg)
1215 ir_entity *ent = get_irg_entity(irg);
1216 be_gas_emit_function_prolog(ent, 4);
1217 be_emit_write_line();
1221 * Emits code for function end
1223 static void sparc_emit_func_epilog(ir_graph *irg)
1225 ir_entity *ent = get_irg_entity(irg);
1226 const char *irg_name = get_entity_ld_name(ent);
1227 be_emit_write_line();
1228 be_emit_irprintf("\t.size %s, .-%s\n", irg_name, irg_name);
1229 be_emit_cstring("# -- End ");
1230 be_emit_string(irg_name);
1231 be_emit_cstring("\n");
1232 be_emit_write_line();
1235 static void sparc_gen_labels(ir_node *block, void *env)
1238 int n = get_Block_n_cfgpreds(block);
1241 for (n--; n >= 0; n--) {
1242 pred = get_Block_cfgpred(block, n);
1243 set_irn_link(pred, block); // link the pred of a block (which is a jmp)
1247 void sparc_emit_routine(ir_graph *irg)
1249 ir_entity *entity = get_irg_entity(irg);
1250 ir_node **block_schedule;
1254 heights = heights_new(irg);
1256 /* register all emitter functions */
1257 sparc_register_emitters();
1258 be_dbg_method_begin(entity);
1260 /* create the block schedule. For now, we don't need it earlier. */
1261 block_schedule = be_create_block_schedule(irg);
1263 sparc_emit_func_prolog(irg);
1264 irg_block_walk_graph(irg, sparc_gen_labels, NULL, NULL);
1266 /* inject block scheduling links & emit code of each block */
1267 n = ARR_LEN(block_schedule);
1268 for (i = 0; i < n; ++i) {
1269 ir_node *block = block_schedule[i];
1270 ir_node *next_block = i+1 < n ? block_schedule[i+1] : NULL;
1271 set_irn_link(block, next_block);
1274 for (i = 0; i < n; ++i) {
1275 ir_node *block = block_schedule[i];
1276 ir_node *prev = i>=1 ? block_schedule[i-1] : NULL;
1277 if (block == get_irg_end_block(irg))
1279 sparc_emit_block(block, prev);
1282 /* emit function epilog */
1283 sparc_emit_func_epilog(irg);
1285 heights_free(heights);
1288 void sparc_init_emitter(void)
1290 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.emit");