+static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
+{
+ const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
+ ir_relation relation = attr->relation;
+ const ir_node *proj_true = NULL;
+ const ir_node *proj_false = NULL;
+ const ir_edge_t *edge;
+ const ir_node *block;
+ const ir_node *next_block;
+
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ long nr = get_Proj_proj(proj);
+ if (nr == pn_Cond_true) {
+ proj_true = proj;
+ } else {
+ proj_false = proj;
+ }
+ }
+
+ /* for now, the code works for scheduled and non-schedules blocks */
+ block = get_nodes_block(node);
+
+ /* we have a block schedule */
+ next_block = (ir_node*)get_irn_link(block);
+
+ if (get_irn_link(proj_true) == next_block) {
+ /* exchange both proj's so the second one can be omitted */
+ const ir_node *t = proj_true;
+
+ proj_true = proj_false;
+ proj_false = t;
+ relation = get_negated_relation(relation);
+ }
+
+ /* emit the true proj */
+ be_emit_cstring("\t");
+ be_emit_string(get_cc(relation));
+ be_emit_char(' ');
+ sparc_emit_cfop_target(proj_true);
+ be_emit_finish_line_gas(proj_true);
+
+ fill_delay_slot();
+
+ if (get_irn_link(proj_false) == next_block) {
+ be_emit_cstring("\t/* fallthrough to ");
+ sparc_emit_cfop_target(proj_false);
+ be_emit_cstring(" */");
+ be_emit_finish_line_gas(proj_false);
+ } else {
+ be_emit_cstring("\tba ");
+ sparc_emit_cfop_target(proj_false);
+ be_emit_finish_line_gas(proj_false);
+ fill_delay_slot();
+ }
+}
+
+static void emit_sparc_Bicc(const ir_node *node)
+{
+ const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
+ bool is_unsigned = attr->is_unsigned;
+ emit_sparc_branch(node, is_unsigned ? get_icc_unsigned : get_icc_signed);
+}
+
+static void emit_sparc_fbfcc(const ir_node *node)
+{
+ emit_sparc_branch(node, get_fcc);
+}
+
+static void emit_sparc_Ba(const ir_node *node)
+{
+ if (ba_is_fallthrough(node)) {
+ be_emit_cstring("\t/* fallthrough to ");
+ sparc_emit_cfop_target(node);
+ be_emit_cstring(" */");
+ } else {
+ be_emit_cstring("\tba ");
+ sparc_emit_cfop_target(node);
+ be_emit_finish_line_gas(node);
+ fill_delay_slot();
+ }
+ be_emit_finish_line_gas(node);
+}
+
+static void emit_jump_table(const ir_node *node)
+{
+ const sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr_const(node);
+ long switch_max = LONG_MIN;
+ long default_pn = attr->default_proj_num;
+ ir_entity *entity = attr->jump_table;
+ ir_node *default_block = NULL;
+ unsigned long length;
+ const ir_edge_t *edge;
+ unsigned i;
+ ir_node **table;
+
+ /* go over all proj's and collect them */
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ long pn = get_Proj_proj(proj);
+
+ /* check for default proj */
+ if (pn == default_pn) {
+ assert(default_block == NULL); /* more than 1 default_pn? */
+ default_block = get_jump_target(proj);
+ } else {
+ switch_max = pn > switch_max ? pn : switch_max;
+ }
+ }
+ assert(switch_max > LONG_MIN);
+
+ length = (unsigned long) switch_max + 1;
+ /* the 16000 isn't a real limit of the architecture. But should protect us
+ * from seamingly endless compiler runs */
+ if (length > 16000) {
+ /* switch lowerer should have broken this monster to pieces... */
+ panic("too large switch encountered");
+ }
+
+ table = XMALLOCNZ(ir_node*, length);
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ long pn = get_Proj_proj(proj);
+ if (pn == default_pn)
+ continue;
+
+ table[pn] = get_jump_target(proj);
+ }
+
+ /* emit table */
+ be_gas_emit_switch_section(GAS_SECTION_RODATA);
+ be_emit_cstring("\t.align 4\n");
+ be_gas_emit_entity(entity);
+ be_emit_cstring(":\n");
+ for (i = 0; i < length; ++i) {
+ ir_node *block = table[i];
+ if (block == NULL)
+ block = default_block;
+ be_emit_cstring("\t.long ");
+ be_gas_emit_block_name(block);
+ be_emit_char('\n');
+ be_emit_write_line();
+ }
+ be_gas_emit_switch_section(GAS_SECTION_TEXT);
+
+ xfree(table);
+}
+
+static void emit_sparc_SwitchJmp(const ir_node *node)
+{
+ be_emit_cstring("\tjmp ");
+ sparc_emit_source_register(node, 0);
+ be_emit_finish_line_gas(node);
+ fill_delay_slot();
+
+ emit_jump_table(node);
+}
+
+static void emit_fmov(const ir_node *node, const arch_register_t *src_reg,
+ const arch_register_t *dst_reg)
+{
+ be_emit_cstring("\tfmovs %");
+ be_emit_string(arch_register_get_name(src_reg));
+ be_emit_cstring(", %");
+ be_emit_string(arch_register_get_name(dst_reg));
+ be_emit_finish_line_gas(node);
+}
+
+static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
+{
+ unsigned index = reg->global_index;
+ assert(reg == &sparc_registers[index]);
+ index++;
+ assert(index - REG_F0 < N_sparc_fp_REGS);
+ return &sparc_registers[index];
+}
+
+static void emit_be_Copy(const ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ const arch_register_t *src_reg = get_in_reg(node, 0);
+ const arch_register_t *dst_reg = get_out_reg(node, 0);
+
+ if (src_reg == dst_reg)
+ return;
+
+ if (mode_is_float(mode)) {
+ unsigned bits = get_mode_size_bits(mode);
+ int n = bits > 32 ? bits > 64 ? 3 : 1 : 0;
+ int i;
+ emit_fmov(node, src_reg, dst_reg);
+ for (i = 0; i < n; ++i) {
+ src_reg = get_next_fp_reg(src_reg);
+ dst_reg = get_next_fp_reg(dst_reg);
+ emit_fmov(node, src_reg, dst_reg);
+ }
+ } else if (mode_is_data(mode)) {
+ be_emit_cstring("\tmov ");
+ sparc_emit_source_register(node, 0);
+ be_emit_cstring(", ");
+ sparc_emit_dest_register(node, 0);
+ be_emit_finish_line_gas(node);
+ } else {
+ panic("emit_be_Copy: invalid mode");
+ }
+}
+
+static void emit_nothing(const ir_node *irn)
+{
+ (void) irn;
+}