X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;ds=sidebyside;f=ir%2Fbe%2Fsparc%2Fsparc_emitter.c;h=15de55d3a254f8e10b052cbc7bc7f19abff4983f;hb=df2faee01a5832057bb3ca0ba5f67e979c916e19;hp=b8ad1c38ff7c8c2153b30fe9354878d2ff84c01d;hpb=b8dd50e584e9747da3d0bdc83dff71e13e9deb08;p=libfirm diff --git a/ir/be/sparc/sparc_emitter.c b/ir/be/sparc/sparc_emitter.c index b8ad1c38f..15de55d3a 100644 --- a/ir/be/sparc/sparc_emitter.c +++ b/ir/be/sparc/sparc_emitter.c @@ -65,7 +65,6 @@ static ir_heights_t *heights; static unsigned *delay_slot_fillers; static pmap *delay_slots; -static void sparc_emit_node(const ir_node *node); static bool emitting_delay_slot; /** @@ -128,14 +127,14 @@ static void sparc_emit_source_register(ir_node const *node, int const pos) { const arch_register_t *reg = arch_get_irn_register_in(node, pos); be_emit_char('%'); - be_emit_string(arch_register_get_name(reg)); + be_emit_string(reg->name); } static void sparc_emit_dest_register(ir_node const *const node, int const pos) { const arch_register_t *reg = arch_get_irn_register_out(node, pos); be_emit_char('%'); - be_emit_string(arch_register_get_name(reg)); + be_emit_string(reg->name); } /** @@ -320,8 +319,7 @@ static bool uses_reg(const ir_node *node, unsigned reg_index, unsigned width) static bool writes_reg(const ir_node *node, unsigned reg_index, unsigned width) { - unsigned n_outs = arch_get_irn_n_outs(node); - for (unsigned o = 0; o < n_outs; ++o) { + be_foreach_out(node, o) { const arch_register_t *out_reg = arch_get_irn_register_out(node, o); if (out_reg == NULL) continue; @@ -402,7 +400,7 @@ static bool can_move_up_into_delayslot(const ir_node *node, const ir_node *to) /* register window cycling effects at Restore aren't correctly represented * in the graph yet so we need this exception here */ - if (is_sparc_Restore(to) || is_sparc_RestoreZero(to)) { + if (is_sparc_Restore(node) || is_sparc_RestoreZero(node)) { return false; } else if (is_sparc_Call(to)) { /* node must not overwrite any of the inputs of the call, @@ -423,8 +421,7 @@ static bool can_move_up_into_delayslot(const ir_node *node, const ir_node *to) } /* node must not write to one of the call outputs */ - unsigned n_call_outs = arch_get_irn_n_outs(to); - for (unsigned o = 0; o < n_call_outs; ++o) { + be_foreach_out(to, o) { const arch_register_t *reg = arch_get_irn_register_out(to, o); if (reg == NULL) continue; @@ -664,7 +661,7 @@ void sparc_emitf(ir_node const *const node, char const *fmt, ...) case 'R': { arch_register_t const *const reg = va_arg(ap, const arch_register_t*); be_emit_char('%'); - be_emit_string(arch_register_get_name(reg)); + be_emit_string(reg->name); break; } @@ -743,7 +740,7 @@ static void fill_delay_slot(const ir_node *node) if (filler != NULL) { assert(!is_no_instruction(filler)); assert(!emits_multiple_instructions(filler)); - sparc_emit_node(filler); + be_emit_node(filler); } else { sparc_emitf(NULL, "nop"); } @@ -1268,18 +1265,6 @@ static void emit_be_Copy(const ir_node *node) } } -static void emit_nothing(const ir_node *irn) -{ - (void) irn; -} - -typedef void (*emit_func) (const ir_node *); - -static inline void set_emitter(ir_op *op, emit_func sparc_emit_node) -{ - op->ops.generic = (op_func)sparc_emit_node; -} - /** * Enters the emitter functions for handled nodes into the generic * pointer of an opcode. @@ -1292,44 +1277,27 @@ static void sparc_register_emitters(void) sparc_register_spec_emitters(); /* custom emitter */ - set_emitter(op_be_Copy, emit_be_Copy); - set_emitter(op_be_CopyKeep, emit_be_Copy); - set_emitter(op_be_IncSP, emit_be_IncSP); - set_emitter(op_be_MemPerm, emit_be_MemPerm); - set_emitter(op_be_Perm, emit_be_Perm); - set_emitter(op_sparc_Ba, emit_sparc_Ba); - set_emitter(op_sparc_Bicc, emit_sparc_Bicc); - set_emitter(op_sparc_Call, emit_sparc_Call); - set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc); - set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr); - set_emitter(op_sparc_SubSP, emit_sparc_SubSP); - set_emitter(op_sparc_Restore, emit_sparc_Restore); - set_emitter(op_sparc_Return, emit_sparc_Return); - set_emitter(op_sparc_SDiv, emit_sparc_SDiv); - set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp); - set_emitter(op_sparc_UDiv, emit_sparc_UDiv); + be_set_emitter(op_be_Copy, emit_be_Copy); + be_set_emitter(op_be_CopyKeep, emit_be_Copy); + be_set_emitter(op_be_IncSP, emit_be_IncSP); + be_set_emitter(op_be_MemPerm, emit_be_MemPerm); + be_set_emitter(op_be_Perm, emit_be_Perm); + be_set_emitter(op_sparc_Ba, emit_sparc_Ba); + be_set_emitter(op_sparc_Bicc, emit_sparc_Bicc); + be_set_emitter(op_sparc_Call, emit_sparc_Call); + be_set_emitter(op_sparc_FrameAddr, emit_sparc_FrameAddr); + be_set_emitter(op_sparc_Restore, emit_sparc_Restore); + be_set_emitter(op_sparc_Return, emit_sparc_Return); + be_set_emitter(op_sparc_SDiv, emit_sparc_SDiv); + be_set_emitter(op_sparc_SubSP, emit_sparc_SubSP); + be_set_emitter(op_sparc_SwitchJmp, emit_sparc_SwitchJmp); + be_set_emitter(op_sparc_UDiv, emit_sparc_UDiv); + be_set_emitter(op_sparc_fbfcc, emit_sparc_fbfcc); /* no need to emit anything for the following nodes */ - set_emitter(op_be_Keep, emit_nothing); - set_emitter(op_sparc_Start, emit_nothing); - set_emitter(op_Phi, emit_nothing); -} - -/** - * Emits code for a node. - */ -static void sparc_emit_node(const ir_node *node) -{ - ir_op *op = get_irn_op(node); - - if (op->ops.generic) { - emit_func func = (emit_func) op->ops.generic; - be_dwarf_location(get_irn_dbg_info(node)); - (*func) (node); - } else { - panic("No emit handler for node %+F (graph %+F)\n", node, - get_irn_irg(node)); - } + be_set_emitter(op_Phi, be_emit_nothing); + be_set_emitter(op_be_Keep, be_emit_nothing); + be_set_emitter(op_sparc_Start, be_emit_nothing); } static bool block_needs_label(const ir_node *block, const ir_node *sched_prev) @@ -1363,7 +1331,7 @@ static void sparc_emit_block(ir_node *block, ir_node *prev) sched_foreach(block, node) { if (rbitset_is_set(delay_slot_fillers, get_irn_idx(node))) continue; - sparc_emit_node(node); + be_emit_node(node); } } @@ -1418,7 +1386,7 @@ static void pick_delay_slots(size_t n_blocks, ir_node **blocks) cmp_block_execfreqs); for (size_t i = 0; i < n_blocks; ++i) { - const ir_node *block = blocks[i]; + const ir_node *block = sorted_blocks[i]; sched_foreach(block, node) { if (!has_delay_slot(node)) continue;