X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fsparc%2Fbearch_sparc.c;h=4dc330fe2c1158b3e22481e09350d1d8a89a6ce3;hb=5474a1c188c9d59eea2c915515980cd9cbab58d8;hp=7108dbdb923b6e28efd8fc8bcef354fb9788f51c;hpb=f46792d0e49d452ef92de2a802ae894c0ce30bbb;p=libfirm diff --git a/ir/be/sparc/bearch_sparc.c b/ir/be/sparc/bearch_sparc.c index 7108dbdb9..4dc330fe2 100644 --- a/ir/be/sparc/bearch_sparc.c +++ b/ir/be/sparc/bearch_sparc.c @@ -20,7 +20,7 @@ /** * @file * @brief The main sparc backend driver file. - * @version $Id$ + * @author Hannes Rapp, Matthias Braun */ #include "config.h" @@ -36,7 +36,14 @@ #include "iroptimize.h" #include "irtools.h" #include "irdump.h" +#include "iropt_t.h" #include "lowering.h" +#include "lower_dw.h" +#include "lower_alloc.h" +#include "lower_builtins.h" +#include "lower_calls.h" +#include "lower_mode_b.h" +#include "lower_softfloat.h" #include "bitset.h" #include "debug.h" @@ -44,18 +51,17 @@ #include "error.h" #include "util.h" -#include "../bearch.h" -#include "../benode.h" -#include "../belower.h" -#include "../besched.h" +#include "bearch.h" +#include "benode.h" +#include "belower.h" +#include "besched.h" #include "be.h" -#include "../bemachine.h" -#include "../bemodule.h" -#include "../beirg.h" -#include "../bespillslots.h" -#include "../begnuas.h" -#include "../belistsched.h" -#include "../beflags.h" +#include "bemodule.h" +#include "beirg.h" +#include "begnuas.h" +#include "belistsched.h" +#include "beflags.h" +#include "beutil.h" #include "bearch_sparc_t.h" @@ -63,15 +69,10 @@ #include "gen_sparc_regalloc_if.h" #include "sparc_transform.h" #include "sparc_emitter.h" +#include "sparc_cconv.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) -static arch_irn_class_t sparc_classify(const ir_node *node) -{ - (void) node; - return 0; -} - static ir_entity *sparc_get_frame_entity(const ir_node *node) { if (is_sparc_FrameAddr(node)) { @@ -107,20 +108,20 @@ static void sparc_set_frame_offset(ir_node *node, int offset) static int sparc_get_sp_bias(const ir_node *node) { if (is_sparc_Save(node)) { - const sparc_save_attr_t *attr = get_sparc_save_attr_const(node); - /* Note we do not retport the change of the SPARC_MIN_STACKSIZE - * size, since we have additional magic in the emitter which - * calculates that! */ - assert(attr->initial_stacksize >= SPARC_MIN_STACKSIZE); - return attr->initial_stacksize - SPARC_MIN_STACKSIZE; + const sparc_attr_t *attr = get_sparc_attr_const(node); + if (get_irn_arity(node) == 3) + panic("no support for _reg variant yet"); + + return -attr->immediate_value; + } else if (is_sparc_RestoreZero(node)) { + return SP_BIAS_RESET; } return 0; } /* fill register allocator interface */ -static const arch_irn_ops_t sparc_irn_ops = { - sparc_classify, +const arch_irn_ops_t sparc_irn_ops = { sparc_get_frame_entity, sparc_set_frame_offset, sparc_get_sp_bias, @@ -141,12 +142,12 @@ static void sparc_prepare_graph(ir_graph *irg) static bool sparc_modifies_flags(const ir_node *node) { - return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_flags; + return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_flags; } static bool sparc_modifies_fp_flags(const ir_node *node) { - return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_fp_flags; + return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_fp_flags; } static void sparc_before_ra(ir_graph *irg) @@ -158,146 +159,22 @@ static void sparc_before_ra(ir_graph *irg) NULL, sparc_modifies_fp_flags); } -/** - * transform reload node => load - */ -static void transform_Reload(ir_node *node) -{ - ir_node *block = get_nodes_block(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *ptr = get_irn_n(node, be_pos_Spill_frame); - ir_node *mem = get_irn_n(node, be_pos_Reload_mem); - ir_mode *mode = get_irn_mode(node); - ir_entity *entity = be_get_frame_entity(node); - const arch_register_t *reg; - ir_node *proj; - ir_node *load; - - ir_node *sched_point = sched_prev(node); - - load = new_bd_sparc_Ld_imm(dbgi, block, ptr, mem, mode, entity, 0, true); - sched_add_after(sched_point, load); - sched_remove(node); - - proj = new_rd_Proj(dbgi, load, mode, pn_sparc_Ld_res); - - reg = arch_get_irn_register(node); - arch_set_irn_register(proj, reg); - - exchange(node, proj); -} - -/** - * transform spill node => store - */ -static void transform_Spill(ir_node *node) -{ - ir_node *block = get_nodes_block(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *ptr = get_irn_n(node, be_pos_Spill_frame); - ir_node *mem = new_NoMem(); - ir_node *val = get_irn_n(node, be_pos_Spill_val); - ir_mode *mode = get_irn_mode(val); - ir_entity *entity = be_get_frame_entity(node); - ir_node *sched_point; - ir_node *store; - - sched_point = sched_prev(node); - store = new_bd_sparc_St_imm(dbgi, block, val, ptr, mem, mode, entity, 0, true); - sched_remove(node); - sched_add_after(sched_point, store); - - exchange(node, store); -} - -/** - * walker to transform be_Spill and be_Reload nodes - */ -static void sparc_after_ra_walker(ir_node *block, void *data) -{ - ir_node *node, *prev; - (void) data; - - for (node = sched_last(block); !sched_is_begin(node); node = prev) { - prev = sched_prev(node); - - if (be_is_Reload(node)) { - transform_Reload(node); - } else if (be_is_Spill(node)) { - transform_Spill(node); - } - } -} - -static void sparc_collect_frame_entity_nodes(ir_node *node, void *data) -{ - be_fec_env_t *env = data; - const ir_mode *mode; - int align; - ir_entity *entity; - const sparc_load_store_attr_t *attr; - - if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) { - mode = get_irn_mode(node); - align = get_mode_size_bytes(mode); - be_node_needs_frame_entity(env, node, mode, align); - return; - } - - if (!is_sparc_Ld(node) && !is_sparc_Ldf(node)) - return; - - attr = get_sparc_load_store_attr_const(node); - entity = attr->base.immediate_value_entity; - mode = attr->load_store_mode; - if (entity != NULL) - return; - if (!attr->is_frame_entity) - return; - if (arch_irn_get_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot) - mode = mode_Lu; - align = get_mode_size_bytes(mode); - be_node_needs_frame_entity(env, node, mode, align); -} - -static void sparc_set_frame_entity(ir_node *node, ir_entity *entity) -{ - if (is_be_node(node)) { - be_node_set_frame_entity(node, entity); - } else { - /* we only say be_node_needs_frame_entity on nodes with load_store - * attributes, so this should be fine */ - sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node); - assert(attr->is_frame_entity); - assert(attr->base.immediate_value_entity == NULL); - attr->base.immediate_value_entity = entity; - } -} - -static void sparc_after_ra(ir_graph *irg) -{ - be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg); - - irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env); - be_assign_entities(fec_env, sparc_set_frame_entity); - be_free_frame_entity_coalescer(fec_env); - - irg_block_walk_graph(irg, NULL, sparc_after_ra_walker, NULL); -} - static void sparc_init_graph(ir_graph *irg) { (void) irg; } -const arch_isa_if_t sparc_isa_if; +extern const arch_isa_if_t sparc_isa_if; static sparc_isa_t sparc_isa_template = { { &sparc_isa_if, /* isa interface implementation */ - &sparc_gp_regs[REG_SP], /* stack pointer register */ - &sparc_gp_regs[REG_FRAME_POINTER], /* base pointer register */ + N_SPARC_REGISTERS, + sparc_registers, + N_SPARC_CLASSES, + sparc_reg_classes, + &sparc_registers[REG_SP], /* stack pointer register */ + &sparc_registers[REG_FRAME_POINTER],/* base pointer register */ &sparc_reg_classes[CLASS_sparc_gp], /* link pointer register class */ - -1, /* stack direction */ 3, /* power of two stack alignment for calls */ NULL, /* main environment */ @@ -305,7 +182,8 @@ static sparc_isa_t sparc_isa_template = { 5, /* costs for a reload instruction */ true, /* custom abi handling */ }, - NULL, /* constants */ + NULL, /* constants */ + SPARC_FPU_ARCH_FPU, /* FPU architecture */ }; /** @@ -327,28 +205,28 @@ static void rewrite_unsigned_float_Conv(ir_node *node) part_block(node); { - ir_node *block = get_nodes_block(node); - ir_node *unsigned_x = get_Conv_op(node); - ir_mode *mode_u = get_irn_mode(unsigned_x); - ir_mode *mode_s = find_signed_mode(mode_u); - ir_mode *mode_d = mode_D; - ir_node *signed_x = new_rd_Conv(dbgi, block, unsigned_x, mode_s); - ir_node *res = new_rd_Conv(dbgi, block, signed_x, mode_d); - ir_node *zero = new_r_Const(irg, get_mode_null(mode_s)); - ir_node *cmp = new_rd_Cmp(dbgi, block, signed_x, zero); - ir_node *proj_lt = new_r_Proj(cmp, mode_b, pn_Cmp_Lt); - ir_node *cond = new_rd_Cond(dbgi, block, proj_lt); - ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true); - ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false); - ir_node *in_true[1] = { proj_true }; - ir_node *in_false[1] = { proj_false }; - ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true); - ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false); - ir_node *true_jmp = new_r_Jmp(true_block); - ir_node *false_jmp = new_r_Jmp(false_block); - tarval *correction = new_tarval_from_double(4294967296., mode_d); - ir_node *c_const = new_r_Const(irg, correction); - ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const, + ir_node *block = get_nodes_block(node); + ir_node *unsigned_x = get_Conv_op(node); + ir_mode *mode_u = get_irn_mode(unsigned_x); + ir_mode *mode_s = find_signed_mode(mode_u); + ir_mode *mode_d = mode_D; + ir_node *signed_x = new_rd_Conv(dbgi, block, unsigned_x, mode_s); + ir_node *res = new_rd_Conv(dbgi, block, signed_x, mode_d); + ir_node *zero = new_r_Const(irg, get_mode_null(mode_s)); + ir_node *cmp = new_rd_Cmp(dbgi, block, signed_x, zero, + ir_relation_less); + ir_node *cond = new_rd_Cond(dbgi, block, cmp); + ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true); + ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false); + ir_node *in_true[1] = { proj_true }; + ir_node *in_false[1] = { proj_false }; + ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true); + ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false); + ir_node *true_jmp = new_r_Jmp(true_block); + ir_node *false_jmp = new_r_Jmp(false_block); + ir_tarval *correction = new_tarval_from_double(4294967296., mode_d); + ir_node *c_const = new_r_Const(irg, correction); + ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const, mode_d); ir_node *lower_in[2] = { true_jmp, false_jmp }; @@ -369,19 +247,90 @@ static void rewrite_unsigned_float_Conv(ir_node *node) } } +/** + * rewrite float->unsigned conversions. + * Sparc has no instruction for this so instead we do the following: + * + * if (x >= 2147483648.) { + * converted ^= (int)(x-2147483648.) ^ 0x80000000; + * } else { + * converted = (int)x; + * } + * return (unsigned)converted; + */ +static void rewrite_float_unsigned_Conv(ir_node *node) +{ + ir_graph *irg = get_irn_irg(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *lower_block = get_nodes_block(node); + + part_block(node); + + { + ir_node *block = get_nodes_block(node); + ir_node *float_x = get_Conv_op(node); + ir_mode *mode_u = get_irn_mode(node); + ir_mode *mode_s = find_signed_mode(mode_u); + ir_mode *mode_f = get_irn_mode(float_x); + ir_tarval *limit = new_tarval_from_double(2147483648., mode_f); + ir_node *limitc = new_r_Const(irg, limit); + ir_node *cmp = new_rd_Cmp(dbgi, block, float_x, limitc, + ir_relation_greater_equal); + ir_node *cond = new_rd_Cond(dbgi, block, cmp); + ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true); + ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false); + ir_node *in_true[1] = { proj_true }; + ir_node *in_false[1] = { proj_false }; + ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true); + ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false); + ir_node *true_jmp = new_r_Jmp(true_block); + ir_node *false_jmp = new_r_Jmp(false_block); + + ir_tarval *correction = new_tarval_from_long(0x80000000l, mode_s); + ir_node *c_const = new_r_Const(irg, correction); + ir_node *sub = new_rd_Sub(dbgi, true_block, float_x, limitc, + mode_f); + ir_node *sub_conv = new_rd_Conv(dbgi, true_block, sub, mode_s); + ir_node *xor = new_rd_Eor(dbgi, true_block, sub_conv, c_const, + mode_s); + + ir_node *converted = new_rd_Conv(dbgi, false_block, float_x,mode_s); + + ir_node *lower_in[2] = { true_jmp, false_jmp }; + ir_node *phi_in[2] = { xor, converted }; + ir_node *phi; + ir_node *res_conv; + + set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in); + phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_s); + assert(get_Block_phis(lower_block) == NULL); + set_Block_phis(lower_block, phi); + set_Phi_next(phi, NULL); + + res_conv = new_rd_Conv(dbgi, lower_block, phi, mode_u); + exchange(node, res_conv); + } +} + static int sparc_rewrite_Conv(ir_node *node, void *ctx) { - (void) ctx; ir_mode *to_mode = get_irn_mode(node); ir_node *op = get_Conv_op(node); ir_mode *from_mode = get_irn_mode(op); + (void) ctx; if (mode_is_float(to_mode) && mode_is_int(from_mode) - && get_mode_size_bits(from_mode) == 32 - && !mode_is_signed(from_mode)) { + && get_mode_size_bits(from_mode) == 32 + && !mode_is_signed(from_mode)) { rewrite_unsigned_float_Conv(node); return 1; } + if (mode_is_float(from_mode) && mode_is_int(to_mode) + && get_mode_size_bits(to_mode) == 32 + && !mode_is_signed(to_mode)) { + rewrite_float_unsigned_Conv(node); + return 1; + } return 0; } @@ -423,7 +372,6 @@ static void sparc_handle_intrinsics(void) rt_iMod.mem_proj_nr = pn_Mod_M; rt_iMod.regular_proj_nr = pn_Mod_X_regular; rt_iMod.exc_proj_nr = pn_Mod_X_except; - rt_iMod.exc_mem_proj_nr = pn_Mod_M; rt_iMod.res_proj_nr = pn_Mod_res; set_entity_visibility(rt_iMod.ent, ir_visibility_external); @@ -449,7 +397,6 @@ static void sparc_handle_intrinsics(void) rt_uMod.mem_proj_nr = pn_Mod_M; rt_uMod.regular_proj_nr = pn_Mod_X_regular; rt_uMod.exc_proj_nr = pn_Mod_X_except; - rt_uMod.exc_mem_proj_nr = pn_Mod_M; rt_uMod.res_proj_nr = pn_Mod_res; set_entity_visibility(rt_uMod.ent, ir_visibility_external); @@ -464,27 +411,30 @@ static void sparc_handle_intrinsics(void) lower_intrinsics(records, n_records, /*part_block_used=*/ true); } -/** - * Initializes the backend ISA - */ -static arch_env_t *sparc_init(FILE *outfile) +static void sparc_init(void) { - static int run_once = 0; - sparc_isa_t *isa; + sparc_register_init(); + sparc_create_opcodes(&sparc_irn_ops); + sparc_cconv_init(); +} - if (run_once) - return NULL; - run_once = 1; +static void sparc_finish(void) +{ + sparc_free_opcodes(); +} - isa = XMALLOC(sparc_isa_t); - memcpy(isa, &sparc_isa_template, sizeof(*isa)); +static arch_env_t *sparc_begin_codegeneration(const be_main_env_t *env) +{ + sparc_isa_t *isa = XMALLOC(sparc_isa_t); + *isa = sparc_isa_template; isa->constants = pmap_create(); - be_emit_init(outfile); + be_gas_elf_type_char = '#'; + be_gas_object_file_format = OBJECT_FILE_FORMAT_ELF; + be_gas_elf_variant = ELF_VARIANT_SPARC; - sparc_register_init(); - sparc_create_opcodes(&sparc_irn_ops); - sparc_handle_intrinsics(); + be_emit_init(env->file_handle); + be_gas_begin_compilation_unit(env); return &isa->base; } @@ -492,67 +442,52 @@ static arch_env_t *sparc_init(FILE *outfile) /** * Closes the output file and frees the ISA structure. */ -static void sparc_done(void *self) +static void sparc_end_codegeneration(void *self) { - sparc_isa_t *isa = self; + sparc_isa_t *isa = (sparc_isa_t*)self; /* emit now all global declarations */ - be_gas_emit_decls(isa->base.main_env); + be_gas_end_compilation_unit(isa->base.main_env); pmap_destroy(isa->constants); be_emit_exit(); free(isa); } -static unsigned sparc_get_n_reg_class(void) -{ - return N_CLASSES; -} - -static const arch_register_class_t *sparc_get_reg_class(unsigned i) +static void sparc_lower_for_target(void) { - assert(i < N_CLASSES); - return &sparc_reg_classes[i]; -} + size_t i, n_irgs = get_irp_n_irgs(); + lower_calls_with_compounds(LF_RETURN_HIDDEN); + for (i = 0; i < n_irgs; ++i) { + ir_graph *irg = get_irp_irg(i); + /* Turn all small CopyBs into loads/stores and all bigger CopyBs into + * memcpy calls. */ + lower_CopyB(irg, 31, 32, false); + } -/** - * Get the register class which shall be used to store a value of a given mode. - * @param self The this pointer. - * @param mode The mode in question. - * @return A register class which can hold values of the given mode. - */ -static const arch_register_class_t *sparc_get_reg_class_for_mode(const ir_mode *mode) -{ - if (mode_is_float(mode)) - return &sparc_reg_classes[CLASS_sparc_fp]; - else - return &sparc_reg_classes[CLASS_sparc_gp]; -} + if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT) + lower_floating_point(); -/** - * Returns the necessary byte alignment for storing a register of given class. - */ -static int sparc_get_reg_class_alignment(const arch_register_class_t *cls) -{ - ir_mode *mode = arch_register_class_mode(cls); - return get_mode_size_bytes(mode); -} + lower_builtins(0, NULL); -static void sparc_lower_for_target(void) -{ - int i; - int n_irgs = get_irp_n_irgs(); - - /* TODO, doubleword lowering and others */ + sparc_lower_64bit(); for (i = 0; i < n_irgs; ++i) { ir_graph *irg = get_irp_irg(i); - lower_switch(irg, 256, false); + ir_lower_mode_b(irg, mode_Iu); + lower_switch(irg, 4, 256, false); + lower_alloc(irg, SPARC_STACK_ALIGNMENT, false, SPARC_MIN_STACKSIZE); } } +static int sparc_is_mux_allowed(ir_node *sel, ir_node *mux_false, + ir_node *mux_true) +{ + return ir_is_optimizable_mux(sel, mux_false, mux_true); +} + /** * Returns the libFirm configuration parameter for this backend. */ @@ -571,24 +506,43 @@ static const backend_params *sparc_get_backend_params(void) 0, /* no inline assembly */ 0, /* no support for RotL nodes */ 1, /* big endian */ - sparc_lower_for_target, /* lowering callback */ + 1, /* modulo shift efficient */ + 0, /* non-modulo shift not efficient */ &arch_dep, /* will be set later */ - NULL, /* parameter for if conversion */ + sparc_is_mux_allowed, /* parameter for if conversion */ + 32, /* machine size */ NULL, /* float arithmetic mode */ + NULL, /* long long type */ + NULL, /* usigned long long type */ + NULL, /* long double type */ 0, /* no trampoline support: size 0 */ 0, /* no trampoline support: align 0 */ NULL, /* no trampoline support: no trampoline builder */ 4 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */ }; - return &p; -} -static ir_graph **sparc_get_backend_irg_list(const void *self, - ir_graph ***irgs) -{ - (void) self; - (void) irgs; - return NULL; + ir_mode *mode_long_long + = new_int_mode("long long", irma_twos_complement, 64, 1, 64); + ir_type *type_long_long = new_type_primitive(mode_long_long); + ir_mode *mode_unsigned_long_long + = new_int_mode("unsigned long long", irma_twos_complement, 64, 0, 64); + ir_type *type_unsigned_long_long + = new_type_primitive(mode_unsigned_long_long); + + p.type_long_long = type_long_long; + p.type_unsigned_long_long = type_unsigned_long_long; + + if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT) { + p.mode_float_arithmetic = NULL; + p.type_long_double = NULL; + } else { + ir_type *type_long_double = new_type_primitive(mode_Q); + + set_type_alignment_bytes(type_long_double, 8); + set_type_size_bytes(type_long_double, 16); + p.type_long_double = type_long_double; + } + return &p; } static asm_constraint_flags_t sparc_parse_asm_constraint(const char **c) @@ -603,34 +557,98 @@ static int sparc_is_valid_clobber(const char *clobber) return 0; } +/* fpu set architectures. */ +static const lc_opt_enum_int_items_t sparc_fpu_items[] = { + { "fpu", SPARC_FPU_ARCH_FPU }, + { "softfloat", SPARC_FPU_ARCH_SOFTFLOAT }, + { NULL, 0 } +}; + +static lc_opt_enum_int_var_t arch_fpu_var = { + &sparc_isa_template.fpu_arch, sparc_fpu_items +}; + +static const lc_opt_table_entry_t sparc_options[] = { + LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var), + LC_OPT_LAST +}; + +static ir_node *sparc_new_spill(ir_node *value, ir_node *after) +{ + ir_node *block = get_block(after); + ir_graph *irg = get_irn_irg(value); + ir_node *frame = get_irg_frame(irg); + ir_node *mem = get_irg_no_mem(irg); + ir_mode *mode = get_irn_mode(value); + ir_node *store; + + if (mode_is_float(mode)) { + store = create_stf(NULL, block, value, frame, mem, mode, NULL, 0, true); + } else { + store = new_bd_sparc_St_imm(NULL, block, value, frame, mem, mode, NULL, + 0, true); + } + sched_add_after(after, store); + return store; +} + +static ir_node *sparc_new_reload(ir_node *value, ir_node *spill, + ir_node *before) +{ + ir_node *block = get_block(before); + ir_graph *irg = get_irn_irg(value); + ir_node *frame = get_irg_frame(irg); + ir_mode *mode = get_irn_mode(value); + ir_node *load; + ir_node *res; + + if (mode_is_float(mode)) { + load = create_ldf(NULL, block, frame, spill, mode, NULL, 0, true); + } else { + load = new_bd_sparc_Ld_imm(NULL, block, frame, spill, mode, NULL, 0, + true); + } + sched_add_before(before, load); + assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res); + res = new_r_Proj(load, mode, pn_sparc_Ld_res); + + return res; +} + const arch_isa_if_t sparc_isa_if = { sparc_init, - sparc_done, - NULL, /* handle intrinsics */ - sparc_get_n_reg_class, - sparc_get_reg_class, - sparc_get_reg_class_for_mode, - NULL, - sparc_get_reg_class_alignment, + sparc_finish, sparc_get_backend_params, - sparc_get_backend_irg_list, - NULL, /* mark remat */ + sparc_lower_for_target, sparc_parse_asm_constraint, sparc_is_valid_clobber, + sparc_begin_codegeneration, + sparc_end_codegeneration, sparc_init_graph, - NULL, /* get_pic_base */ - NULL, /* before_abi */ + NULL, /* get call abi */ + NULL, /* mark remat */ + NULL, /* get_pic_base */ + sparc_new_spill, + sparc_new_reload, + NULL, /* register_saved_by */ + + sparc_handle_intrinsics, + NULL, /* before_abi */ sparc_prepare_graph, sparc_before_ra, - sparc_after_ra, - NULL, /* finish */ + sparc_finish_graph, sparc_emit_routine, }; -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc); +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc) void be_init_arch_sparc(void) { + lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); + lc_opt_entry_t *sparc_grp = lc_opt_get_grp(be_grp, "sparc"); + + lc_opt_add_table(sparc_grp, sparc_options); + be_register_isa_if("sparc", &sparc_isa_if); FIRM_DBG_REGISTER(dbg, "firm.be.sparc.cg"); sparc_init_transform();