static unsigned *delay_slot_fillers;
static pmap *delay_slots;
-static void sparc_emit_node(const ir_node *node);
static bool emitting_delay_slot;
/**
{
const arch_register_t *reg = arch_get_irn_register_in(node, pos);
be_emit_char('%');
- be_emit_string(arch_register_get_name(reg));
+ be_emit_string(reg->name);
}
static void sparc_emit_dest_register(ir_node const *const node, int const pos)
{
const arch_register_t *reg = arch_get_irn_register_out(node, pos);
be_emit_char('%');
- be_emit_string(arch_register_get_name(reg));
+ be_emit_string(reg->name);
}
/**
static bool writes_reg(const ir_node *node, unsigned reg_index, unsigned width)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
- for (unsigned o = 0; o < n_outs; ++o) {
+ be_foreach_out(node, o) {
const arch_register_t *out_reg = arch_get_irn_register_out(node, o);
if (out_reg == NULL)
continue;
/* register window cycling effects at Restore aren't correctly represented
* in the graph yet so we need this exception here */
- if (is_sparc_Restore(to) || is_sparc_RestoreZero(to)) {
+ if (is_sparc_Restore(node) || is_sparc_RestoreZero(node)) {
return false;
} else if (is_sparc_Call(to)) {
/* node must not overwrite any of the inputs of the call,
}
/* node must not write to one of the call outputs */
- unsigned n_call_outs = arch_get_irn_n_outs(to);
- for (unsigned o = 0; o < n_call_outs; ++o) {
+ be_foreach_out(to, o) {
const arch_register_t *reg = arch_get_irn_register_out(to, o);
if (reg == NULL)
continue;
case 'R': {
arch_register_t const *const reg = va_arg(ap, const arch_register_t*);
be_emit_char('%');
- be_emit_string(arch_register_get_name(reg));
+ be_emit_string(reg->name);
break;
}
if (filler != NULL) {
assert(!is_no_instruction(filler));
assert(!emits_multiple_instructions(filler));
- sparc_emit_node(filler);
+ be_emit_node(filler);
} else {
sparc_emitf(NULL, "nop");
}
static void emit_be_Perm(const ir_node *irn)
{
- sparc_emitf(irn, "xor %S1, %S0, %S0");
- sparc_emitf(irn, "xor %S1, %S0, %S1");
- sparc_emitf(irn, "xor %S1, %S0, %S0");
+ ir_mode *mode = get_irn_mode(get_irn_n(irn, 0));
+ if (mode_is_float(mode)) {
+ const arch_register_t *reg0 = arch_get_irn_register_in(irn, 0);
+ const arch_register_t *reg1 = arch_get_irn_register_in(irn, 1);
+ unsigned reg_idx0 = reg0->global_index;
+ unsigned reg_idx1 = reg1->global_index;
+ unsigned width = arch_get_irn_register_req_in(irn, 0)->width;
+ for (unsigned i = 0; i < width; ++i) {
+ const arch_register_t *r0 = &sparc_registers[reg_idx0+i];
+ const arch_register_t *r1 = &sparc_registers[reg_idx1+i];
+ sparc_emitf(irn, "fmovs %R, %%f31", r0);
+ sparc_emitf(irn, "fmovs %R, %R", r1, r0);
+ sparc_emitf(irn, "fmovs %%f31, %R", r1);
+ }
+ } else {
+ sparc_emitf(irn, "xor %S1, %S0, %S0");
+ sparc_emitf(irn, "xor %S1, %S0, %S1");
+ sparc_emitf(irn, "xor %S1, %S0, %S0");
+ }
}
/* The stack pointer must always be SPARC_STACK_ALIGNMENT bytes aligned, so get
}
}
-static void emit_nothing(const ir_node *irn)
-{
- (void) irn;
-}
-
-typedef void (*emit_func) (const ir_node *);
-
-static inline void set_emitter(ir_op *op, emit_func sparc_emit_node)
-{
- op->ops.generic = (op_func)sparc_emit_node;
-}
-
/**
* Enters the emitter functions for handled nodes into the generic
* pointer of an opcode.
sparc_register_spec_emitters();
/* custom emitter */
- set_emitter(op_be_Copy, emit_be_Copy);
- set_emitter(op_be_CopyKeep, emit_be_Copy);
- set_emitter(op_be_IncSP, emit_be_IncSP);
- set_emitter(op_be_MemPerm, emit_be_MemPerm);
- set_emitter(op_be_Perm, emit_be_Perm);
- set_emitter(op_sparc_Ba, emit_sparc_Ba);
- set_emitter(op_sparc_Bicc, emit_sparc_Bicc);
- set_emitter(op_sparc_Call, emit_sparc_Call);
- set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc);
- set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr);
- set_emitter(op_sparc_SubSP, emit_sparc_SubSP);
- set_emitter(op_sparc_Restore, emit_sparc_Restore);
- set_emitter(op_sparc_Return, emit_sparc_Return);
- set_emitter(op_sparc_SDiv, emit_sparc_SDiv);
- set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp);
- set_emitter(op_sparc_UDiv, emit_sparc_UDiv);
+ be_set_emitter(op_be_Copy, emit_be_Copy);
+ be_set_emitter(op_be_CopyKeep, emit_be_Copy);
+ be_set_emitter(op_be_IncSP, emit_be_IncSP);
+ be_set_emitter(op_be_MemPerm, emit_be_MemPerm);
+ be_set_emitter(op_be_Perm, emit_be_Perm);
+ be_set_emitter(op_sparc_Ba, emit_sparc_Ba);
+ be_set_emitter(op_sparc_Bicc, emit_sparc_Bicc);
+ be_set_emitter(op_sparc_Call, emit_sparc_Call);
+ be_set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr);
+ be_set_emitter(op_sparc_Restore, emit_sparc_Restore);
+ be_set_emitter(op_sparc_Return, emit_sparc_Return);
+ be_set_emitter(op_sparc_SDiv, emit_sparc_SDiv);
+ be_set_emitter(op_sparc_SubSP, emit_sparc_SubSP);
+ be_set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp);
+ be_set_emitter(op_sparc_UDiv, emit_sparc_UDiv);
+ be_set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc);
/* no need to emit anything for the following nodes */
- set_emitter(op_be_Keep, emit_nothing);
- set_emitter(op_sparc_Start, emit_nothing);
- set_emitter(op_Phi, emit_nothing);
-}
-
-/**
- * Emits code for a node.
- */
-static void sparc_emit_node(const ir_node *node)
-{
- ir_op *op = get_irn_op(node);
-
- if (op->ops.generic) {
- emit_func func = (emit_func) op->ops.generic;
- be_dwarf_location(get_irn_dbg_info(node));
- (*func) (node);
- } else {
- panic("No emit handler for node %+F (graph %+F)\n", node,
- get_irn_irg(node));
- }
+ be_set_emitter(op_Phi, be_emit_nothing);
+ be_set_emitter(op_be_Keep, be_emit_nothing);
+ be_set_emitter(op_sparc_Start, be_emit_nothing);
}
static bool block_needs_label(const ir_node *block, const ir_node *sched_prev)
sched_foreach(block, node) {
if (rbitset_is_set(delay_slot_fillers, get_irn_idx(node)))
continue;
- sparc_emit_node(node);
+ be_emit_node(node);
}
}
cmp_block_execfreqs);
for (size_t i = 0; i < n_blocks; ++i) {
- const ir_node *block = blocks[i];
+ const ir_node *block = sorted_blocks[i];
sched_foreach(block, node) {
if (!has_delay_slot(node))
continue;