2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
23 * @author Hannes Rapp, Matthias Braun
30 #include "bitfiddle.h"
42 #include "raw_bitset.h"
47 #include "beblocksched.h"
50 #include "be_dbgout.h"
54 #include "sparc_emitter.h"
55 #include "gen_sparc_emitter.h"
56 #include "sparc_nodes_attr.h"
57 #include "sparc_new_nodes.h"
58 #include "gen_sparc_regalloc_if.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 static ir_heights_t *heights;
63 static const ir_node *delay_slot_filler; /**< this node has been choosen to fill
64 the next delay slot */
66 static void sparc_emit_node(const ir_node *node);
68 void sparc_emit_immediate(const ir_node *node)
70 const sparc_attr_t *attr = get_sparc_attr_const(node);
71 ir_entity *entity = attr->immediate_value_entity;
74 int32_t value = attr->immediate_value;
75 assert(sparc_is_value_imm_encodeable(value));
76 be_emit_irprintf("%d", value);
78 if (get_entity_owner(entity) == get_tls_type()) {
79 be_emit_cstring("%tle_lox10(");
81 be_emit_cstring("%lo(");
83 be_gas_emit_entity(entity);
84 if (attr->immediate_value != 0) {
85 be_emit_irprintf("%+d", attr->immediate_value);
91 void sparc_emit_high_immediate(const ir_node *node)
93 const sparc_attr_t *attr = get_sparc_attr_const(node);
94 ir_entity *entity = attr->immediate_value_entity;
97 uint32_t value = (uint32_t) attr->immediate_value;
98 be_emit_irprintf("%%hi(0x%X)", value);
100 if (get_entity_owner(entity) == get_tls_type()) {
101 be_emit_cstring("%tle_hix22(");
103 be_emit_cstring("%hi(");
105 be_gas_emit_entity(entity);
106 if (attr->immediate_value != 0) {
107 be_emit_irprintf("%+d", attr->immediate_value);
113 void sparc_emit_source_register(const ir_node *node, int pos)
115 const arch_register_t *reg = arch_get_irn_register_in(node, pos);
117 be_emit_string(arch_register_get_name(reg));
120 void sparc_emit_dest_register(const ir_node *node, int pos)
122 const arch_register_t *reg = arch_get_irn_register_out(node, pos);
124 be_emit_string(arch_register_get_name(reg));
128 * Emits either a imm or register depending on arity of node
130 * @param register no (-1 if no register)
132 void sparc_emit_reg_or_imm(const ir_node *node, int pos)
134 if (arch_get_irn_flags(node) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
135 // we have a imm input
136 sparc_emit_immediate(node);
139 sparc_emit_source_register(node, pos);
146 void sparc_emit_offset(const ir_node *node, int offset_node_pos)
148 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
150 if (attr->is_reg_reg) {
151 assert(!attr->is_frame_entity);
152 assert(attr->base.immediate_value == 0);
153 assert(attr->base.immediate_value_entity == NULL);
155 sparc_emit_source_register(node, offset_node_pos);
156 } else if (attr->is_frame_entity) {
157 int32_t offset = attr->base.immediate_value;
159 assert(sparc_is_value_imm_encodeable(offset));
160 be_emit_irprintf("%+ld", offset);
162 } else if (attr->base.immediate_value != 0
163 || attr->base.immediate_value_entity != NULL) {
165 sparc_emit_immediate(node);
169 void sparc_emit_source_reg_and_offset(const ir_node *node, int regpos,
172 const arch_register_t *reg = arch_get_irn_register_in(node, regpos);
173 const sparc_load_store_attr_t *attr;
175 if (reg == &sparc_registers[REG_SP]) {
176 attr = get_sparc_load_store_attr_const(node);
177 if (!attr->is_reg_reg
178 && attr->base.immediate_value < SPARC_MIN_STACKSIZE) {
180 ir_fprintf(stderr, "warning: emitting stack pointer relative load/store with offset < %d\n", SPARC_MIN_STACKSIZE);
184 sparc_emit_source_register(node, regpos);
185 sparc_emit_offset(node, offpos);
188 void sparc_emit_float_load_store_mode(const ir_node *node)
190 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
191 ir_mode *mode = attr->load_store_mode;
192 int bits = get_mode_size_bits(mode);
194 assert(mode_is_float(mode));
198 case 64: be_emit_char('d'); return;
199 case 128: be_emit_char('q'); return;
201 panic("invalid float load/store mode %+F", mode);
205 * Emit load mode char
207 void sparc_emit_load_mode(const ir_node *node)
209 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
210 ir_mode *mode = attr->load_store_mode;
211 int bits = get_mode_size_bits(mode);
212 bool is_signed = mode_is_signed(mode);
215 be_emit_string(is_signed ? "sh" : "uh");
216 } else if (bits == 8) {
217 be_emit_string(is_signed ? "sb" : "ub");
218 } else if (bits == 64) {
226 * Emit store mode char
228 void sparc_emit_store_mode(const ir_node *node)
230 const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
231 ir_mode *mode = attr->load_store_mode;
232 int bits = get_mode_size_bits(mode);
236 } else if (bits == 8) {
238 } else if (bits == 64) {
245 static void emit_fp_suffix(const ir_mode *mode)
247 unsigned bits = get_mode_size_bits(mode);
248 assert(mode_is_float(mode));
252 } else if (bits == 64) {
254 } else if (bits == 128) {
257 panic("invalid FP mode");
261 void sparc_emit_fp_conv_source(const ir_node *node)
263 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
264 emit_fp_suffix(attr->src_mode);
267 void sparc_emit_fp_conv_destination(const ir_node *node)
269 const sparc_fp_conv_attr_t *attr = get_sparc_fp_conv_attr_const(node);
270 emit_fp_suffix(attr->dest_mode);
274 * emits the FP mode suffix char
276 void sparc_emit_fp_mode_suffix(const ir_node *node)
278 const sparc_fp_attr_t *attr = get_sparc_fp_attr_const(node);
279 emit_fp_suffix(attr->fp_mode);
282 static ir_node *get_jump_target(const ir_node *jump)
284 return (ir_node*)get_irn_link(jump);
288 * Returns the target label for a control flow node.
290 static void sparc_emit_cfop_target(const ir_node *node)
292 ir_node *block = get_jump_target(node);
293 be_gas_emit_block_name(block);
296 static int get_sparc_Call_dest_addr_pos(const ir_node *node)
298 return get_irn_arity(node)-1;
301 static bool ba_is_fallthrough(const ir_node *node)
303 ir_node *block = get_nodes_block(node);
304 ir_node *next_block = (ir_node*)get_irn_link(block);
305 return get_irn_link(node) == next_block;
308 static bool is_no_instruction(const ir_node *node)
310 /* copies are nops if src_reg == dest_reg */
311 if (be_is_Copy(node) || be_is_CopyKeep(node)) {
312 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
313 const arch_register_t *dest_reg = arch_get_irn_register_out(node, 0);
315 if (src_reg == dest_reg)
318 if (be_is_IncSP(node) && be_get_IncSP_offset(node) == 0)
320 /* Ba is not emitted if it is a simple fallthrough */
321 if (is_sparc_Ba(node) && ba_is_fallthrough(node))
324 return be_is_Keep(node) || be_is_Start(node) || is_Phi(node);
327 static bool has_delay_slot(const ir_node *node)
329 if (is_sparc_Ba(node)) {
330 return !ba_is_fallthrough(node);
333 return arch_get_irn_flags(node) & sparc_arch_irn_flag_has_delay_slot;
336 /** returns true if the emitter for this sparc node can produce more than one
337 * actual sparc instruction.
338 * Usually it is a bad sign if we have to add instructions here. We should
339 * rather try to get them lowered down. So we can actually put them into
340 * delay slots and make them more accessible to the scheduler.
342 static bool emits_multiple_instructions(const ir_node *node)
344 if (has_delay_slot(node))
347 if (is_sparc_Call(node)) {
348 return arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return;
351 return is_sparc_SMulh(node) || is_sparc_UMulh(node)
352 || is_sparc_SDiv(node) || is_sparc_UDiv(node)
353 || be_is_MemPerm(node) || be_is_Perm(node);
357 * search for an instruction that can fill the delay slot of @p node
359 static const ir_node *pick_delay_slot_for(const ir_node *node)
361 const ir_node *check = node;
362 const ir_node *schedpoint = node;
364 /* currently we don't track which registers are still alive, so we can't
365 * pick any other instructions other than the one directly preceding */
366 static const unsigned PICK_DELAY_SLOT_MAX_DISTANCE = 1;
368 assert(has_delay_slot(node));
370 if (is_sparc_Call(node)) {
371 const sparc_attr_t *attr = get_sparc_attr_const(node);
372 ir_entity *entity = attr->immediate_value_entity;
373 if (entity != NULL) {
374 check = NULL; /* pick any instruction, dependencies on Call
377 /* we only need to check the value for the call destination */
378 check = get_irn_n(node, get_sparc_Call_dest_addr_pos(node));
381 /* the Call also destroys the value of %o7, but since this is currently
382 * marked as ignore register in the backend, it should never be used by
383 * the instruction in the delay slot. */
384 } else if (is_sparc_Return(node)) {
385 /* we only have to check the jump destination value */
386 int arity = get_irn_arity(node);
390 for (i = 0; i < arity; ++i) {
391 ir_node *in = get_irn_n(node, i);
392 const arch_register_t *reg = arch_get_irn_register(in);
393 if (reg == &sparc_registers[REG_O7]) {
394 check = skip_Proj(in);
402 while (sched_has_prev(schedpoint)) {
403 schedpoint = sched_prev(schedpoint);
405 if (has_delay_slot(schedpoint))
408 /* skip things which don't really result in instructions */
409 if (is_no_instruction(schedpoint))
412 if (tries++ >= PICK_DELAY_SLOT_MAX_DISTANCE)
415 if (emits_multiple_instructions(schedpoint))
418 /* if check and schedpoint are not in the same block, give up. */
420 && get_nodes_block(check) != get_nodes_block(schedpoint))
423 /* allowed for delayslot: any instruction which is not necessary to
424 * compute an input to the branch. */
426 && heights_reachable_in_block(heights, check, schedpoint))
429 /* found something */
437 * Emits code for stack space management
439 static void emit_be_IncSP(const ir_node *irn)
441 int offset = be_get_IncSP_offset(irn);
446 /* SPARC stack grows downwards */
448 be_emit_cstring("\tsub ");
451 be_emit_cstring("\tadd ");
454 sparc_emit_source_register(irn, 0);
455 be_emit_irprintf(", %d", -offset);
456 be_emit_cstring(", ");
457 sparc_emit_dest_register(irn, 0);
458 be_emit_finish_line_gas(irn);
462 * emits code for mulh
464 static void emit_sparc_Mulh(const ir_node *irn)
466 be_emit_cstring("\t");
467 if (is_sparc_UMulh(irn)) {
470 assert(is_sparc_SMulh(irn));
473 be_emit_cstring("mul ");
475 sparc_emit_source_register(irn, 0);
476 be_emit_cstring(", ");
477 sparc_emit_reg_or_imm(irn, 1);
478 be_emit_cstring(", ");
479 sparc_emit_dest_register(irn, 0);
480 be_emit_finish_line_gas(irn);
482 // our result is in the y register now
483 // we just copy it to the assigned target reg
484 be_emit_cstring("\tmov %y, ");
485 sparc_emit_dest_register(irn, 0);
486 be_emit_finish_line_gas(irn);
489 static void fill_delay_slot(void)
491 if (delay_slot_filler != NULL) {
492 sparc_emit_node(delay_slot_filler);
493 delay_slot_filler = NULL;
495 be_emit_cstring("\tnop\n");
496 be_emit_write_line();
500 static void emit_sparc_Div(const ir_node *node, bool is_signed)
502 /* can we get the delay count of the wr instruction somewhere? */
503 unsigned wry_delay_count = 3;
506 be_emit_cstring("\twr ");
507 sparc_emit_source_register(node, 0);
508 be_emit_cstring(", 0, %y");
509 be_emit_finish_line_gas(node);
511 for (i = 0; i < wry_delay_count; ++i) {
515 be_emit_irprintf("\t%s ", is_signed ? "sdiv" : "udiv");
516 sparc_emit_source_register(node, 1);
517 be_emit_cstring(", ");
518 sparc_emit_reg_or_imm(node, 2);
519 be_emit_cstring(", ");
520 sparc_emit_dest_register(node, 0);
521 be_emit_finish_line_gas(node);
524 static void emit_sparc_SDiv(const ir_node *node)
526 emit_sparc_Div(node, true);
529 static void emit_sparc_UDiv(const ir_node *node)
531 emit_sparc_Div(node, false);
535 * Emits code for Call node
537 static void emit_sparc_Call(const ir_node *node)
539 const sparc_attr_t *attr = get_sparc_attr_const(node);
540 ir_entity *entity = attr->immediate_value_entity;
542 be_emit_cstring("\tcall ");
543 if (entity != NULL) {
544 be_gas_emit_entity(entity);
545 if (attr->immediate_value != 0) {
546 be_emit_irprintf("%+d", attr->immediate_value);
548 be_emit_cstring(", 0");
550 int dest_addr = get_sparc_Call_dest_addr_pos(node);
551 sparc_emit_source_register(node, dest_addr);
553 be_emit_finish_line_gas(node);
557 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_aggregate_return) {
558 be_emit_cstring("\tunimp 8\n");
559 be_emit_write_line();
564 * Emit code for Perm node
566 static void emit_be_Perm(const ir_node *irn)
568 be_emit_cstring("\txor ");
569 sparc_emit_source_register(irn, 1);
570 be_emit_cstring(", ");
571 sparc_emit_source_register(irn, 0);
572 be_emit_cstring(", ");
573 sparc_emit_source_register(irn, 0);
574 be_emit_finish_line_gas(NULL);
576 be_emit_cstring("\txor ");
577 sparc_emit_source_register(irn, 1);
578 be_emit_cstring(", ");
579 sparc_emit_source_register(irn, 0);
580 be_emit_cstring(", ");
581 sparc_emit_source_register(irn, 1);
582 be_emit_finish_line_gas(NULL);
584 be_emit_cstring("\txor ");
585 sparc_emit_source_register(irn, 1);
586 be_emit_cstring(", ");
587 sparc_emit_source_register(irn, 0);
588 be_emit_cstring(", ");
589 sparc_emit_source_register(irn, 0);
590 be_emit_finish_line_gas(irn);
593 /* The stack pointer must always be SPARC_STACK_ALIGNMENT bytes aligned, so get
594 * the next bigger integer that's evenly divisible by it. */
595 static unsigned get_aligned_sp_change(const unsigned num_regs)
597 const unsigned bytes = num_regs * SPARC_REGISTER_SIZE;
598 return round_up2(bytes, SPARC_STACK_ALIGNMENT);
601 /* Spill register l0 or both l0 and l1, depending on n_spilled and n_to_spill.*/
602 static void memperm_emit_spill_registers(const ir_node *node, int n_spilled,
605 assert(n_spilled < n_to_spill);
607 if (n_spilled == 0) {
608 /* We always reserve stack space for two registers because during copy
609 * processing we don't know yet if we also need to handle a cycle which
610 * needs two registers. More complicated code in emit_MemPerm would
611 * prevent wasting SPARC_REGISTER_SIZE bytes of stack space but
612 * it is not worth the worse readability of emit_MemPerm. */
614 /* Keep stack pointer aligned. */
615 unsigned sp_change = get_aligned_sp_change(2);
616 be_emit_irprintf("\tsub %%sp, %u, %%sp", sp_change);
617 be_emit_finish_line_gas(node);
619 /* Spill register l0. */
620 be_emit_irprintf("\tst %%l0, [%%sp%+d]", SPARC_MIN_STACKSIZE);
621 be_emit_finish_line_gas(node);
624 if (n_to_spill == 2) {
625 /* Spill register l1. */
626 be_emit_irprintf("\tst %%l1, [%%sp%+d]", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
627 be_emit_finish_line_gas(node);
631 /* Restore register l0 or both l0 and l1, depending on n_spilled. */
632 static void memperm_emit_restore_registers(const ir_node *node, int n_spilled)
636 if (n_spilled == 2) {
637 /* Restore register l1. */
638 be_emit_irprintf("\tld [%%sp%+d], %%l1", SPARC_MIN_STACKSIZE + SPARC_REGISTER_SIZE);
639 be_emit_finish_line_gas(node);
642 /* Restore register l0. */
643 be_emit_irprintf("\tld [%%sp%+d], %%l0", SPARC_MIN_STACKSIZE);
644 be_emit_finish_line_gas(node);
646 /* Restore stack pointer. */
647 sp_change = get_aligned_sp_change(2);
648 be_emit_irprintf("\tadd %%sp, %u, %%sp", sp_change);
649 be_emit_finish_line_gas(node);
652 /* Emit code to copy in_ent to out_ent. Only uses l0. */
653 static void memperm_emit_copy(const ir_node *node, ir_entity *in_ent,
656 ir_graph *irg = get_irn_irg(node);
657 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
658 int off_in = be_get_stack_entity_offset(layout, in_ent, 0);
659 int off_out = be_get_stack_entity_offset(layout, out_ent, 0);
661 /* Load from input entity. */
662 be_emit_irprintf("\tld [%%fp%+d], %%l0", off_in);
663 be_emit_finish_line_gas(node);
665 /* Store to output entity. */
666 be_emit_irprintf("\tst %%l0, [%%fp%+d]", off_out);
667 be_emit_finish_line_gas(node);
670 /* Emit code to swap ent1 and ent2. Uses l0 and l1. */
671 static void memperm_emit_swap(const ir_node *node, ir_entity *ent1,
674 ir_graph *irg = get_irn_irg(node);
675 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
676 int off1 = be_get_stack_entity_offset(layout, ent1, 0);
677 int off2 = be_get_stack_entity_offset(layout, ent2, 0);
679 /* Load from first input entity. */
680 be_emit_irprintf("\tld [%%fp%+d], %%l0", off1);
681 be_emit_finish_line_gas(node);
683 /* Load from second input entity. */
684 be_emit_irprintf("\tld [%%fp%+d], %%l1", off2);
685 be_emit_finish_line_gas(node);
687 /* Store first value to second output entity. */
688 be_emit_irprintf("\tst %%l0, [%%fp%+d]", off2);
689 be_emit_finish_line_gas(node);
691 /* Store second value to first output entity. */
692 be_emit_irprintf("\tst %%l1, [%%fp%+d]", off1);
693 be_emit_finish_line_gas(node);
696 /* Find the index of ent in ents or return -1 if not found. */
697 static int get_index(ir_entity **ents, int n, ir_entity *ent)
701 for (i = 0; i < n; ++i)
709 * Emit code for a MemPerm node.
711 * Analyze MemPerm for copy chains and cyclic swaps and resolve them using
713 * This function is conceptually very similar to permute_values in
716 static void emit_be_MemPerm(const ir_node *node)
718 int memperm_arity = be_get_MemPerm_entity_arity(node);
719 /* Upper limit for the number of participating entities is twice the
720 * arity, e.g., for a simple copying MemPerm node with one input/output. */
721 int max_size = 2 * memperm_arity;
722 ir_entity **entities = ALLOCANZ(ir_entity *, max_size);
723 /* sourceof contains the input entity for each entity. If an entity is
724 * never used as an output, its entry in sourceof is a fix point. */
725 int *sourceof = ALLOCANZ(int, max_size);
726 /* n_users counts how many output entities use this entity as their input.*/
727 int *n_users = ALLOCANZ(int, max_size);
728 /* n_spilled records the number of spilled registers, either 1 or 2. */
732 for (i = 0; i < max_size; ++i) {
736 for (i = n = 0; i < memperm_arity; ++i) {
737 ir_entity *out = be_get_MemPerm_out_entity(node, i);
738 ir_entity *in = be_get_MemPerm_in_entity(node, i);
739 int oidx; /* Out index */
740 int iidx; /* In index */
742 /* Insert into entities to be able to operate on unique indices. */
743 if (get_index(entities, n, out) == -1)
745 if (get_index(entities, n, in) == -1)
748 oidx = get_index(entities, n, out);
749 iidx = get_index(entities, n, in);
751 sourceof[oidx] = iidx; /* Remember the source. */
752 ++n_users[iidx]; /* Increment number of users of this entity. */
755 /* First do all the copies. */
756 for (oidx = 0; oidx < n; /* empty */) {
757 int iidx = sourceof[oidx];
759 /* Nothing to do for fix points.
760 * Also, if entities[oidx] is used as an input by another copy, we
761 * can't overwrite entities[oidx] yet.*/
762 if (iidx == oidx || n_users[oidx] > 0) {
767 /* We found the end of a 'chain', so do the copy. */
768 if (n_spilled == 0) {
769 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/1);
772 memperm_emit_copy(node, entities[iidx], entities[oidx]);
775 sourceof[oidx] = oidx;
777 assert(n_users[iidx] > 0);
778 /* Decrementing the number of users might enable us to do another
782 if (iidx < oidx && n_users[iidx] == 0) {
789 /* The rest are cycles. */
790 for (oidx = 0; oidx < n; /* empty */) {
791 int iidx = sourceof[oidx];
794 /* Nothing to do for fix points. */
800 assert(n_users[iidx] == 1);
802 /* Swap the two values to resolve the cycle. */
804 memperm_emit_spill_registers(node, n_spilled, /*n_to_spill=*/2);
807 memperm_emit_swap(node, entities[iidx], entities[oidx]);
809 tidx = sourceof[iidx];
811 sourceof[iidx] = iidx;
813 /* The source of oidx is now the old source of iidx, because we swapped
814 * the two entities. */
815 sourceof[oidx] = tidx;
819 /* Only fix points should remain. */
820 for (i = 0; i < max_size; ++i) {
821 assert(sourceof[i] == i);
825 assert(n_spilled > 0 && "Useless MemPerm node");
827 memperm_emit_restore_registers(node, n_spilled);
830 static void emit_sparc_Return(const ir_node *node)
832 ir_graph *irg = get_irn_irg(node);
833 ir_entity *entity = get_irg_entity(irg);
834 ir_type *type = get_entity_type(entity);
836 const char *destreg = "%o7";
838 /* hack: we don't explicitely model register changes because of the
839 * restore node. So we have to do it manually here */
840 if (delay_slot_filler != NULL &&
841 (is_sparc_Restore(delay_slot_filler)
842 || is_sparc_RestoreZero(delay_slot_filler))) {
845 be_emit_cstring("\tjmp ");
846 be_emit_string(destreg);
847 if (get_method_calling_convention(type) & cc_compound_ret) {
848 be_emit_cstring("+12");
850 be_emit_cstring("+8");
852 be_emit_finish_line_gas(node);
856 static void emit_sparc_FrameAddr(const ir_node *node)
858 const sparc_attr_t *attr = get_sparc_attr_const(node);
859 int32_t offset = attr->immediate_value;
862 be_emit_cstring("\tadd ");
863 sparc_emit_source_register(node, 0);
864 be_emit_cstring(", ");
865 assert(sparc_is_value_imm_encodeable(offset));
866 be_emit_irprintf("%ld", offset);
868 be_emit_cstring("\tsub ");
869 sparc_emit_source_register(node, 0);
870 be_emit_cstring(", ");
871 assert(sparc_is_value_imm_encodeable(-offset));
872 be_emit_irprintf("%ld", -offset);
875 be_emit_cstring(", ");
876 sparc_emit_dest_register(node, 0);
877 be_emit_finish_line_gas(node);
880 static const char *get_icc_unsigned(ir_relation relation)
882 switch (relation & (ir_relation_less_equal_greater)) {
883 case ir_relation_false: return "bn";
884 case ir_relation_equal: return "be";
885 case ir_relation_less: return "blu";
886 case ir_relation_less_equal: return "bleu";
887 case ir_relation_greater: return "bgu";
888 case ir_relation_greater_equal: return "bgeu";
889 case ir_relation_less_greater: return "bne";
890 case ir_relation_less_equal_greater: return "ba";
891 default: panic("Cmp has unsupported relation");
895 static const char *get_icc_signed(ir_relation relation)
897 switch (relation & (ir_relation_less_equal_greater)) {
898 case ir_relation_false: return "bn";
899 case ir_relation_equal: return "be";
900 case ir_relation_less: return "bl";
901 case ir_relation_less_equal: return "ble";
902 case ir_relation_greater: return "bg";
903 case ir_relation_greater_equal: return "bge";
904 case ir_relation_less_greater: return "bne";
905 case ir_relation_less_equal_greater: return "ba";
906 default: panic("Cmp has unsupported relation");
910 static const char *get_fcc(ir_relation relation)
913 case ir_relation_false: return "fbn";
914 case ir_relation_equal: return "fbe";
915 case ir_relation_less: return "fbl";
916 case ir_relation_less_equal: return "fble";
917 case ir_relation_greater: return "fbg";
918 case ir_relation_greater_equal: return "fbge";
919 case ir_relation_less_greater: return "fblg";
920 case ir_relation_less_equal_greater: return "fbo";
921 case ir_relation_unordered: return "fbu";
922 case ir_relation_unordered_equal: return "fbue";
923 case ir_relation_unordered_less: return "fbul";
924 case ir_relation_unordered_less_equal: return "fbule";
925 case ir_relation_unordered_greater: return "fbug";
926 case ir_relation_unordered_greater_equal: return "fbuge";
927 case ir_relation_unordered_less_greater: return "fbne";
928 case ir_relation_true: return "fba";
930 panic("invalid relation");
933 typedef const char* (*get_cc_func)(ir_relation relation);
935 static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
937 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
938 ir_relation relation = attr->relation;
939 const ir_node *proj_true = NULL;
940 const ir_node *proj_false = NULL;
941 const ir_edge_t *edge;
942 const ir_node *block;
943 const ir_node *next_block;
945 foreach_out_edge(node, edge) {
946 ir_node *proj = get_edge_src_irn(edge);
947 long nr = get_Proj_proj(proj);
948 if (nr == pn_Cond_true) {
955 /* for now, the code works for scheduled and non-schedules blocks */
956 block = get_nodes_block(node);
958 /* we have a block schedule */
959 next_block = (ir_node*)get_irn_link(block);
961 if (get_irn_link(proj_true) == next_block) {
962 /* exchange both proj's so the second one can be omitted */
963 const ir_node *t = proj_true;
965 proj_true = proj_false;
967 relation = get_negated_relation(relation);
970 /* emit the true proj */
971 be_emit_cstring("\t");
972 be_emit_string(get_cc(relation));
974 sparc_emit_cfop_target(proj_true);
975 be_emit_finish_line_gas(proj_true);
979 if (get_irn_link(proj_false) == next_block) {
980 be_emit_cstring("\t/* fallthrough to ");
981 sparc_emit_cfop_target(proj_false);
982 be_emit_cstring(" */");
983 be_emit_finish_line_gas(proj_false);
985 be_emit_cstring("\tba ");
986 sparc_emit_cfop_target(proj_false);
987 be_emit_finish_line_gas(proj_false);
992 static void emit_sparc_Bicc(const ir_node *node)
994 const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
995 bool is_unsigned = attr->is_unsigned;
996 emit_sparc_branch(node, is_unsigned ? get_icc_unsigned : get_icc_signed);
999 static void emit_sparc_fbfcc(const ir_node *node)
1001 /* if the flags producing node was immediately in front of us, emit
1003 ir_node *flags = get_irn_n(node, n_sparc_fbfcc_flags);
1004 ir_node *prev = sched_prev(node);
1005 if (is_Block(prev)) {
1006 /* TODO: when the flags come from another block, then we have to do
1007 * more complicated tests to see wether the flag producing node is
1008 * potentially in front of us (could happen for fallthroughs) */
1009 panic("TODO: fbfcc flags come from other block");
1011 if (skip_Proj(flags) == prev) {
1012 be_emit_cstring("\tnop\n");
1014 emit_sparc_branch(node, get_fcc);
1017 static void emit_sparc_Ba(const ir_node *node)
1019 if (ba_is_fallthrough(node)) {
1020 be_emit_cstring("\t/* fallthrough to ");
1021 sparc_emit_cfop_target(node);
1022 be_emit_cstring(" */");
1023 be_emit_finish_line_gas(node);
1025 be_emit_cstring("\tba ");
1026 sparc_emit_cfop_target(node);
1027 be_emit_finish_line_gas(node);
1032 static void emit_sparc_SwitchJmp(const ir_node *node)
1034 const sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr_const(node);
1036 be_emit_cstring("\tjmp ");
1037 sparc_emit_source_register(node, 0);
1038 be_emit_finish_line_gas(node);
1041 emit_jump_table(node, attr->default_proj_num, attr->jump_table,
1045 static void emit_fmov(const ir_node *node, const arch_register_t *src_reg,
1046 const arch_register_t *dst_reg)
1048 be_emit_cstring("\tfmovs %");
1049 be_emit_string(arch_register_get_name(src_reg));
1050 be_emit_cstring(", %");
1051 be_emit_string(arch_register_get_name(dst_reg));
1052 be_emit_finish_line_gas(node);
1055 static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
1057 unsigned idx = reg->global_index;
1058 assert(reg == &sparc_registers[idx]);
1060 assert(idx - REG_F0 < N_sparc_fp_REGS);
1061 return &sparc_registers[idx];
1064 static void emit_be_Copy(const ir_node *node)
1066 ir_mode *mode = get_irn_mode(node);
1067 const arch_register_t *src_reg = arch_get_irn_register_in(node, 0);
1068 const arch_register_t *dst_reg = arch_get_irn_register_out(node, 0);
1070 if (src_reg == dst_reg)
1073 if (mode_is_float(mode)) {
1074 unsigned bits = get_mode_size_bits(mode);
1075 int n = bits > 32 ? bits > 64 ? 3 : 1 : 0;
1077 emit_fmov(node, src_reg, dst_reg);
1078 for (i = 0; i < n; ++i) {
1079 src_reg = get_next_fp_reg(src_reg);
1080 dst_reg = get_next_fp_reg(dst_reg);
1081 emit_fmov(node, src_reg, dst_reg);
1083 } else if (mode_is_data(mode)) {
1084 be_emit_cstring("\tmov ");
1085 sparc_emit_source_register(node, 0);
1086 be_emit_cstring(", ");
1087 sparc_emit_dest_register(node, 0);
1088 be_emit_finish_line_gas(node);
1090 panic("emit_be_Copy: invalid mode");
1094 static void emit_nothing(const ir_node *irn)
1099 typedef void (*emit_func) (const ir_node *);
1101 static inline void set_emitter(ir_op *op, emit_func sparc_emit_node)
1103 op->ops.generic = (op_func)sparc_emit_node;
1107 * Enters the emitter functions for handled nodes into the generic
1108 * pointer of an opcode.
1110 static void sparc_register_emitters(void)
1112 /* first clear the generic function pointer for all ops */
1113 clear_irp_opcodes_generic_func();
1114 /* register all emitter functions defined in spec */
1115 sparc_register_spec_emitters();
1117 /* custom emitter */
1118 set_emitter(op_be_Copy, emit_be_Copy);
1119 set_emitter(op_be_CopyKeep, emit_be_Copy);
1120 set_emitter(op_be_IncSP, emit_be_IncSP);
1121 set_emitter(op_be_MemPerm, emit_be_MemPerm);
1122 set_emitter(op_be_Perm, emit_be_Perm);
1123 set_emitter(op_sparc_Ba, emit_sparc_Ba);
1124 set_emitter(op_sparc_Bicc, emit_sparc_Bicc);
1125 set_emitter(op_sparc_Call, emit_sparc_Call);
1126 set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc);
1127 set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr);
1128 set_emitter(op_sparc_SMulh, emit_sparc_Mulh);
1129 set_emitter(op_sparc_UMulh, emit_sparc_Mulh);
1130 set_emitter(op_sparc_Return, emit_sparc_Return);
1131 set_emitter(op_sparc_SDiv, emit_sparc_SDiv);
1132 set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp);
1133 set_emitter(op_sparc_UDiv, emit_sparc_UDiv);
1135 /* no need to emit anything for the following nodes */
1136 set_emitter(op_be_Keep, emit_nothing);
1137 set_emitter(op_sparc_Start, emit_nothing);
1138 set_emitter(op_Phi, emit_nothing);
1142 * Emits code for a node.
1144 static void sparc_emit_node(const ir_node *node)
1146 ir_op *op = get_irn_op(node);
1148 if (op->ops.generic) {
1149 emit_func func = (emit_func) op->ops.generic;
1150 be_dbg_set_dbg_info(get_irn_dbg_info(node));
1153 panic("No emit handler for node %+F (graph %+F)\n", node,
1158 static ir_node *find_next_delay_slot(ir_node *from)
1160 ir_node *schedpoint = from;
1161 while (!has_delay_slot(schedpoint)) {
1162 if (!sched_has_next(schedpoint))
1164 schedpoint = sched_next(schedpoint);
1169 static bool block_needs_label(const ir_node *block, const ir_node *sched_prev)
1173 if (has_Block_entity(block))
1176 n_cfgpreds = get_Block_n_cfgpreds(block);
1177 if (n_cfgpreds == 0) {
1179 } else if (n_cfgpreds > 1) {
1182 ir_node *cfgpred = get_Block_cfgpred(block, 0);
1183 ir_node *cfgpred_block = get_nodes_block(cfgpred);
1184 if (is_Proj(cfgpred) && is_sparc_SwitchJmp(get_Proj_pred(cfgpred)))
1186 return sched_prev != cfgpred_block || get_irn_link(cfgpred) != block;
1191 * Walks over the nodes in a block connected by scheduling edges
1192 * and emits code for each node.
1194 static void sparc_emit_block(ir_node *block, ir_node *prev)
1197 ir_node *next_delay_slot;
1199 assert(is_Block(block));
1201 if (block_needs_label(block, prev)) {
1202 be_gas_emit_block_name(block);
1203 be_emit_cstring(":\n");
1204 be_emit_write_line();
1207 next_delay_slot = find_next_delay_slot(sched_first(block));
1208 if (next_delay_slot != NULL)
1209 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1211 sched_foreach(block, node) {
1212 if (node == delay_slot_filler) {
1216 sparc_emit_node(node);
1218 if (node == next_delay_slot) {
1219 assert(delay_slot_filler == NULL);
1220 next_delay_slot = find_next_delay_slot(sched_next(node));
1221 if (next_delay_slot != NULL)
1222 delay_slot_filler = pick_delay_slot_for(next_delay_slot);
1228 * Emits code for function start.
1230 static void sparc_emit_func_prolog(ir_graph *irg)
1232 ir_entity *ent = get_irg_entity(irg);
1233 be_gas_emit_function_prolog(ent, 4);
1234 be_emit_write_line();
1238 * Emits code for function end
1240 static void sparc_emit_func_epilog(ir_graph *irg)
1242 ir_entity *ent = get_irg_entity(irg);
1243 const char *irg_name = get_entity_ld_name(ent);
1244 be_emit_write_line();
1245 be_emit_irprintf("\t.size %s, .-%s\n", irg_name, irg_name);
1246 be_emit_cstring("# -- End ");
1247 be_emit_string(irg_name);
1248 be_emit_cstring("\n");
1249 be_emit_write_line();
1252 static void sparc_gen_labels(ir_node *block, void *env)
1255 int n = get_Block_n_cfgpreds(block);
1258 for (n--; n >= 0; n--) {
1259 pred = get_Block_cfgpred(block, n);
1260 set_irn_link(pred, block); // link the pred of a block (which is a jmp)
1264 void sparc_emit_routine(ir_graph *irg)
1266 ir_entity *entity = get_irg_entity(irg);
1267 ir_node **block_schedule;
1271 heights = heights_new(irg);
1273 /* register all emitter functions */
1274 sparc_register_emitters();
1275 be_dbg_method_begin(entity);
1277 /* create the block schedule. For now, we don't need it earlier. */
1278 block_schedule = be_create_block_schedule(irg);
1280 sparc_emit_func_prolog(irg);
1281 irg_block_walk_graph(irg, sparc_gen_labels, NULL, NULL);
1283 /* inject block scheduling links & emit code of each block */
1284 n = ARR_LEN(block_schedule);
1285 for (i = 0; i < n; ++i) {
1286 ir_node *block = block_schedule[i];
1287 ir_node *next_block = i+1 < n ? block_schedule[i+1] : NULL;
1288 set_irn_link(block, next_block);
1291 for (i = 0; i < n; ++i) {
1292 ir_node *block = block_schedule[i];
1293 ir_node *prev = i>=1 ? block_schedule[i-1] : NULL;
1294 if (block == get_irg_end_block(irg))
1296 sparc_emit_block(block, prev);
1299 /* emit function epilog */
1300 sparc_emit_func_epilog(irg);
1302 heights_free(heights);
1305 void sparc_init_emitter(void)
1307 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.emit");