X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fsparc%2Fbearch_sparc.c;h=9224f7242680b8291ddee8bad42bf3d8869ad644;hb=f8cc15664f571aa7ef89d6f6bc8d5bd2b8ca7d53;hp=56e4a7d60cc948f09e141fab5548e52510bd308f;hpb=a24d3e41bc7fe1fe1bb42e9982f0021f23ab8fb8;p=libfirm diff --git a/ir/be/sparc/bearch_sparc.c b/ir/be/sparc/bearch_sparc.c index 56e4a7d60..9224f7242 100644 --- a/ir/be/sparc/bearch_sparc.c +++ b/ir/be/sparc/bearch_sparc.c @@ -20,7 +20,7 @@ /** * @file * @brief The main sparc backend driver file. - * @version $Id$ + * @author Hannes Rapp, Matthias Braun */ #include "config.h" @@ -36,26 +36,33 @@ #include "iroptimize.h" #include "irtools.h" #include "irdump.h" +#include "iropt_t.h" #include "lowering.h" +#include "lower_dw.h" +#include "lower_alloc.h" +#include "lower_builtins.h" +#include "lower_calls.h" +#include "lower_mode_b.h" +#include "lower_softfloat.h" #include "bitset.h" #include "debug.h" #include "array_t.h" #include "error.h" +#include "util.h" -#include "../bearch.h" -#include "../benode.h" -#include "../belower.h" -#include "../besched.h" +#include "bearch.h" +#include "benode.h" +#include "belower.h" +#include "besched.h" #include "be.h" -#include "../bemachine.h" -#include "../beilpsched.h" -#include "../bemodule.h" -#include "../beirg.h" -#include "../bespillslots.h" -#include "../begnuas.h" -#include "../belistsched.h" -#include "../beflags.h" +#include "bemachine.h" +#include "bemodule.h" +#include "beirg.h" +#include "begnuas.h" +#include "belistsched.h" +#include "beflags.h" +#include "beutil.h" #include "bearch_sparc_t.h" @@ -63,13 +70,14 @@ #include "gen_sparc_regalloc_if.h" #include "sparc_transform.h" #include "sparc_emitter.h" +#include "sparc_cconv.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) static arch_irn_class_t sparc_classify(const ir_node *node) { (void) node; - return 0; + return arch_irn_class_none; } static ir_entity *sparc_get_frame_entity(const ir_node *node) @@ -107,20 +115,20 @@ static void sparc_set_frame_offset(ir_node *node, int offset) static int sparc_get_sp_bias(const ir_node *node) { if (is_sparc_Save(node)) { - const sparc_save_attr_t *attr = get_sparc_save_attr_const(node); - /* Note we do not retport the change of the SPARC_MIN_STACKSIZE - * size, since we have additional magic in the emitter which - * calculates that! */ - assert(attr->initial_stacksize >= SPARC_MIN_STACKSIZE); - return attr->initial_stacksize - SPARC_MIN_STACKSIZE; + const sparc_attr_t *attr = get_sparc_attr_const(node); + if (get_irn_arity(node) == 3) + panic("no support for _reg variant yet"); + + return -attr->immediate_value; + } else if (is_sparc_RestoreZero(node)) { + return SP_BIAS_RESET; } return 0; } /* fill register allocator interface */ -static const arch_irn_ops_t sparc_irn_ops = { - get_sparc_in_req, +const arch_irn_ops_t sparc_irn_ops = { sparc_classify, sparc_get_frame_entity, sparc_set_frame_offset, @@ -131,248 +139,144 @@ static const arch_irn_ops_t sparc_irn_ops = { NULL, /* perform_memory_operand */ }; - - /** * Transforms the standard firm graph into * a SPARC firm graph */ -static void sparc_prepare_graph(void *self) +static void sparc_prepare_graph(ir_graph *irg) { - sparc_code_gen_t *cg = self; - - /* transform FIRM into SPARC asm nodes */ - sparc_transform_graph(cg); - - if (cg->dump) - dump_ir_graph(cg->irg, "transformed"); + sparc_transform_graph(irg); } static bool sparc_modifies_flags(const ir_node *node) { - return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_flags; + return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_flags; } static bool sparc_modifies_fp_flags(const ir_node *node) { - return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_fp_flags; + return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_fp_flags; } -static void sparc_before_ra(void *self) +static void sparc_before_ra(ir_graph *irg) { - sparc_code_gen_t *cg = self; /* fixup flags register */ - be_sched_fix_flags(cg->irg, &sparc_reg_classes[CLASS_sparc_flags_class], + be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_flags_class], NULL, sparc_modifies_flags); - be_sched_fix_flags(cg->irg, &sparc_reg_classes[CLASS_sparc_fpflags_class], + be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_fpflags_class], NULL, sparc_modifies_fp_flags); } -/** - * transform reload node => load - */ -static void transform_Reload(ir_node *node) +static void sparc_init_graph(ir_graph *irg) { - ir_node *block = get_nodes_block(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *ptr = get_irn_n(node, be_pos_Spill_frame); - ir_node *mem = get_irn_n(node, be_pos_Reload_mem); - ir_mode *mode = get_irn_mode(node); - ir_entity *entity = be_get_frame_entity(node); - const arch_register_t *reg; - ir_node *proj; - ir_node *load; - - ir_node *sched_point = sched_prev(node); - - load = new_bd_sparc_Ld_imm(dbgi, block, ptr, mem, mode, entity, 0, true); - sched_add_after(sched_point, load); - sched_remove(node); - - proj = new_rd_Proj(dbgi, load, mode, pn_sparc_Ld_res); - - reg = arch_get_irn_register(node); - arch_set_irn_register(proj, reg); - - exchange(node, proj); + (void) irg; } -/** - * transform spill node => store - */ -static void transform_Spill(ir_node *node) -{ - ir_node *block = get_nodes_block(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *ptr = get_irn_n(node, be_pos_Spill_frame); - ir_node *mem = new_NoMem(); - ir_node *val = get_irn_n(node, be_pos_Spill_val); - ir_mode *mode = get_irn_mode(val); - ir_entity *entity = be_get_frame_entity(node); - ir_node *sched_point; - ir_node *store; - - sched_point = sched_prev(node); - store = new_bd_sparc_St_imm(dbgi, block, val, ptr, mem, mode, entity, 0, true); - sched_remove(node); - sched_add_after(sched_point, store); - - exchange(node, store); -} +extern const arch_isa_if_t sparc_isa_if; +static sparc_isa_t sparc_isa_template = { + { + &sparc_isa_if, /* isa interface implementation */ + N_SPARC_REGISTERS, + sparc_registers, + N_SPARC_CLASSES, + sparc_reg_classes, + &sparc_registers[REG_SP], /* stack pointer register */ + &sparc_registers[REG_FRAME_POINTER],/* base pointer register */ + &sparc_reg_classes[CLASS_sparc_gp], /* link pointer register class */ + 3, /* power of two stack alignment + for calls */ + NULL, /* main environment */ + 7, /* costs for a spill instruction */ + 5, /* costs for a reload instruction */ + true, /* custom abi handling */ + }, + NULL, /* constants */ + SPARC_FPU_ARCH_FPU, /* FPU architecture */ +}; /** - * walker to transform be_Spill and be_Reload nodes + * rewrite unsigned->float conversion. + * Sparc has no instruction for this so instead we do the following: + * + * int signed_x = unsigned_value_x; + * double res = signed_x; + * if (signed_x < 0) + * res += 4294967296. ; + * return (float) res; */ -static void sparc_after_ra_walker(ir_node *block, void *data) +static void rewrite_unsigned_float_Conv(ir_node *node) { - ir_node *node, *prev; - (void) data; + ir_graph *irg = get_irn_irg(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *lower_block = get_nodes_block(node); - for (node = sched_last(block); !sched_is_begin(node); node = prev) { - prev = sched_prev(node); + part_block(node); - if (be_is_Reload(node)) { - transform_Reload(node); - } else if (be_is_Spill(node)) { - transform_Spill(node); - } + { + ir_node *block = get_nodes_block(node); + ir_node *unsigned_x = get_Conv_op(node); + ir_mode *mode_u = get_irn_mode(unsigned_x); + ir_mode *mode_s = find_signed_mode(mode_u); + ir_mode *mode_d = mode_D; + ir_node *signed_x = new_rd_Conv(dbgi, block, unsigned_x, mode_s); + ir_node *res = new_rd_Conv(dbgi, block, signed_x, mode_d); + ir_node *zero = new_r_Const(irg, get_mode_null(mode_s)); + ir_node *cmp = new_rd_Cmp(dbgi, block, signed_x, zero, + ir_relation_less); + ir_node *cond = new_rd_Cond(dbgi, block, cmp); + ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true); + ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false); + ir_node *in_true[1] = { proj_true }; + ir_node *in_false[1] = { proj_false }; + ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true); + ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false); + ir_node *true_jmp = new_r_Jmp(true_block); + ir_node *false_jmp = new_r_Jmp(false_block); + ir_tarval *correction = new_tarval_from_double(4294967296., mode_d); + ir_node *c_const = new_r_Const(irg, correction); + ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const, + mode_d); + + ir_node *lower_in[2] = { true_jmp, false_jmp }; + ir_node *phi_in[2] = { fadd, res }; + ir_mode *dest_mode = get_irn_mode(node); + ir_node *phi; + ir_node *res_conv; + + set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in); + phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_d); + assert(get_Block_phis(lower_block) == NULL); + set_Block_phis(lower_block, phi); + set_Phi_next(phi, NULL); + + res_conv = new_rd_Conv(dbgi, lower_block, phi, dest_mode); + + exchange(node, res_conv); } } -static void sparc_collect_frame_entity_nodes(ir_node *node, void *data) +static int sparc_rewrite_Conv(ir_node *node, void *ctx) { - be_fec_env_t *env = data; - const ir_mode *mode; - int align; - ir_entity *entity; - const sparc_load_store_attr_t *attr; - - if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) { - mode = get_irn_mode(node); - align = get_mode_size_bytes(mode); - be_node_needs_frame_entity(env, node, mode, align); - return; - } - - if (!is_sparc_Ld(node) && !is_sparc_Ldf(node)) - return; - - attr = get_sparc_load_store_attr_const(node); - entity = attr->base.immediate_value_entity; - mode = attr->load_store_mode; - if (entity != NULL) - return; - if (!attr->is_frame_entity) - return; - if (arch_irn_get_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot) - mode = mode_Lu; - align = get_mode_size_bytes(mode); - be_node_needs_frame_entity(env, node, mode, align); -} + ir_mode *to_mode = get_irn_mode(node); + ir_node *op = get_Conv_op(node); + ir_mode *from_mode = get_irn_mode(op); + (void) ctx; -static void sparc_set_frame_entity(ir_node *node, ir_entity *entity) -{ - if (is_be_node(node)) { - be_node_set_frame_entity(node, entity); - } else { - /* we only say be_node_needs_frame_entity on nodes with load_store - * attributes, so this should be fine */ - sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node); - assert(attr->is_frame_entity); - assert(attr->base.immediate_value_entity == NULL); - attr->base.immediate_value_entity = entity; + if (mode_is_float(to_mode) && mode_is_int(from_mode) + && get_mode_size_bits(from_mode) == 32 + && !mode_is_signed(from_mode)) { + rewrite_unsigned_float_Conv(node); + return 1; } -} - - -static void sparc_after_ra(void *self) -{ - sparc_code_gen_t *cg = self; - ir_graph *irg = cg->irg; - be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg); - - irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env); - be_assign_entities(fec_env, sparc_set_frame_entity); - be_free_frame_entity_coalescer(fec_env); - - irg_block_walk_graph(cg->irg, NULL, sparc_after_ra_walker, NULL); -} - - - -/** - * Emits the code, closes the output file and frees - * the code generator interface. - */ -static void sparc_emit_and_done(void *self) -{ - sparc_code_gen_t *cg = self; - ir_graph *irg = cg->irg; - - sparc_gen_routine(cg, irg); - /* de-allocate code generator */ - free(cg); -} - -static void *sparc_cg_init(ir_graph *irg); - -static const arch_code_generator_if_t sparc_code_gen_if = { - sparc_cg_init, - NULL, /* get_pic_base hook */ - NULL, /* before abi introduce hook */ - sparc_prepare_graph, - NULL, /* spill hook */ - sparc_before_ra, /* before register allocation hook */ - sparc_after_ra, /* after register allocation hook */ - NULL, - sparc_emit_and_done -}; - -/** - * Initializes the code generator. - */ -static void *sparc_cg_init(ir_graph *irg) -{ - sparc_isa_t *isa = (sparc_isa_t *) be_get_irg_arch_env(irg); - sparc_code_gen_t *cg = XMALLOCZ(sparc_code_gen_t); - - cg->impl = &sparc_code_gen_if; - cg->irg = irg; - cg->isa = isa; - cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) != 0; - cg->constants = pmap_create(); - - /* enter the current code generator */ - isa->cg = cg; - - return (arch_code_generator_t*) cg; + return 0; } -const arch_isa_if_t sparc_isa_if; -static sparc_isa_t sparc_isa_template = { - { - &sparc_isa_if, /* isa interface implementation */ - &sparc_gp_regs[REG_SP], /* stack pointer register */ - &sparc_gp_regs[REG_FRAME_POINTER], /* base pointer register */ - &sparc_reg_classes[CLASS_sparc_gp], /* link pointer register class */ - -1, /* stack direction */ - 3, /* power of two stack alignment - for calls */ - NULL, /* main environment */ - 7, /* costs for a spill instruction */ - 5, /* costs for a reload instruction */ - true, /* custom abi handling */ - }, - NULL /* current code generator */ -}; - - static void sparc_handle_intrinsics(void) { ir_type *tp, *int_tp, *uint_tp; i_record records[8]; - int n_records = 0; + size_t n_records = 0; runtime_rt rt_iMod, rt_uMod; @@ -381,7 +285,14 @@ static void sparc_handle_intrinsics(void) int_tp = new_type_primitive(mode_Is); uint_tp = new_type_primitive(mode_Iu); + /* we need to rewrite some forms of int->float conversions */ + { + i_instr_record *map_Conv = &records[n_records++].i_instr; + map_Conv->kind = INTRINSIC_INSTR; + map_Conv->op = op_Conv; + map_Conv->i_mapper = sparc_rewrite_Conv; + } /* SPARC has no signed mod instruction ... */ { i_instr_record *map_Mod = &records[n_records++].i_instr; @@ -398,7 +309,6 @@ static void sparc_handle_intrinsics(void) rt_iMod.mem_proj_nr = pn_Mod_M; rt_iMod.regular_proj_nr = pn_Mod_X_regular; rt_iMod.exc_proj_nr = pn_Mod_X_except; - rt_iMod.exc_mem_proj_nr = pn_Mod_M; rt_iMod.res_proj_nr = pn_Mod_res; set_entity_visibility(rt_iMod.ent, ir_visibility_external); @@ -424,7 +334,6 @@ static void sparc_handle_intrinsics(void) rt_uMod.mem_proj_nr = pn_Mod_M; rt_uMod.regular_proj_nr = pn_Mod_X_regular; rt_uMod.exc_proj_nr = pn_Mod_X_except; - rt_uMod.exc_mem_proj_nr = pn_Mod_M; rt_uMod.res_proj_nr = pn_Mod_res; set_entity_visibility(rt_uMod.ent, ir_visibility_external); @@ -435,65 +344,50 @@ static void sparc_handle_intrinsics(void) map_Mod->ctx = &rt_uMod; } - if (n_records > 0) - lower_intrinsics(records, n_records, /*part_block_used=*/0); + assert(n_records < ARRAY_SIZE(records)); + lower_intrinsics(records, n_records, /*part_block_used=*/ true); } - /** * Initializes the backend ISA */ -static arch_env_t *sparc_init(FILE *outfile) +static arch_env_t *sparc_init(const be_main_env_t *env) { - static int run_once = 0; - sparc_isa_t *isa; - - if (run_once) - return NULL; - run_once = 1; - - isa = XMALLOC(sparc_isa_t); - memcpy(isa, &sparc_isa_template, sizeof(*isa)); + sparc_isa_t *isa = XMALLOC(sparc_isa_t); + *isa = sparc_isa_template; + isa->constants = pmap_create(); - be_emit_init(outfile); + be_gas_elf_type_char = '#'; + be_gas_object_file_format = OBJECT_FILE_FORMAT_ELF; + be_gas_elf_variant = ELF_VARIANT_SPARC; sparc_register_init(); sparc_create_opcodes(&sparc_irn_ops); sparc_handle_intrinsics(); + sparc_cconv_init(); + + be_emit_init(env->file_handle); + be_gas_begin_compilation_unit(env); return &isa->base; } - - /** * Closes the output file and frees the ISA structure. */ static void sparc_done(void *self) { - sparc_isa_t *isa = self; + sparc_isa_t *isa = (sparc_isa_t*)self; /* emit now all global declarations */ - be_gas_emit_decls(isa->base.main_env); + be_gas_end_compilation_unit(isa->base.main_env); + pmap_destroy(isa->constants); be_emit_exit(); - free(self); + free(isa); } -static unsigned sparc_get_n_reg_class(void) -{ - return N_CLASSES; -} - -static const arch_register_class_t *sparc_get_reg_class(unsigned i) -{ - assert(i < N_CLASSES); - return &sparc_reg_classes[i]; -} - - - /** * Get the register class which shall be used to store a value of a given mode. * @param self The this pointer. @@ -508,61 +402,47 @@ static const arch_register_class_t *sparc_get_reg_class_for_mode(const ir_mode * return &sparc_reg_classes[CLASS_sparc_gp]; } -static int sparc_to_appear_in_schedule(void *block_env, const ir_node *irn) -{ - (void) block_env; - - if (!is_sparc_irn(irn)) - return -1; - - return 1; -} - /** - * Initializes the code generator interface. + * Returns the necessary byte alignment for storing a register of given class. */ -static const arch_code_generator_if_t *sparc_get_code_generator_if( - void *self) +static int sparc_get_reg_class_alignment(const arch_register_class_t *cls) { - (void) self; - return &sparc_code_gen_if; + ir_mode *mode = arch_register_class_mode(cls); + return get_mode_size_bytes(mode); } -list_sched_selector_t sparc_sched_selector; - -/** - * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded - */ -static const list_sched_selector_t *sparc_get_list_sched_selector( - const void *self, list_sched_selector_t *selector) +static void sparc_lower_for_target(void) { - (void) self; - (void) selector; + size_t i, n_irgs = get_irp_n_irgs(); - sparc_sched_selector = trivial_selector; - sparc_sched_selector.to_appear_in_schedule = sparc_to_appear_in_schedule; - return &sparc_sched_selector; -} + lower_calls_with_compounds(LF_RETURN_HIDDEN); -static const ilp_sched_selector_t *sparc_get_ilp_sched_selector( - const void *self) -{ - (void) self; - return NULL; -} + for (i = 0; i < n_irgs; ++i) { + ir_graph *irg = get_irp_irg(i); + /* Turn all small CopyBs into loads/stores and all bigger CopyBs into + * memcpy calls. */ + lower_CopyB(irg, 31, 32, false); + } -/** - * Returns the necessary byte alignment for storing a register of given class. - */ -static int sparc_get_reg_class_alignment(const arch_register_class_t *cls) -{ - ir_mode *mode = arch_register_class_mode(cls); - return get_mode_size_bytes(mode); + if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT) + lower_floating_point(); + + lower_builtins(0, NULL); + + sparc_lower_64bit(); + + for (i = 0; i < n_irgs; ++i) { + ir_graph *irg = get_irp_irg(i); + ir_lower_mode_b(irg, mode_Iu); + lower_switch(irg, 4, 256, false); + lower_alloc(irg, SPARC_STACK_ALIGNMENT, false, SPARC_MIN_STACKSIZE); + } } -static void sparc_lower_for_target(void) +static int sparc_is_mux_allowed(ir_node *sel, ir_node *mux_false, + ir_node *mux_true) { - /* TODO, doubleword lowering and others */ + return ir_is_optimizable_mux(sel, mux_false, mux_true); } /** @@ -570,34 +450,56 @@ static void sparc_lower_for_target(void) */ static const backend_params *sparc_get_backend_params(void) { + static const ir_settings_arch_dep_t arch_dep = { + 1, /* also_use_subs */ + 1, /* maximum_shifts */ + 31, /* highest_shift_amount */ + NULL, /* evaluate_cost_func */ + 1, /* allow mulhs */ + 1, /* allow mulhu */ + 32, /* max_bits_for_mulh */ + }; static backend_params p = { 0, /* no inline assembly */ 0, /* no support for RotL nodes */ - sparc_lower_for_target, /* lowering callback */ - NULL, /* will be set later */ - NULL, /* parameter for if conversion */ + 1, /* big endian */ + 1, /* modulo shift efficient */ + 0, /* non-modulo shift not efficient */ + &arch_dep, /* will be set later */ + sparc_is_mux_allowed, /* parameter for if conversion */ + 32, /* machine size */ NULL, /* float arithmetic mode */ + NULL, /* long long type */ + NULL, /* usigned long long type */ + NULL, /* long double type */ 0, /* no trampoline support: size 0 */ 0, /* no trampoline support: align 0 */ NULL, /* no trampoline support: no trampoline builder */ 4 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */ }; - return &p; -} -static const be_execution_unit_t ***sparc_get_allowed_execution_units( - const ir_node *irn) -{ - (void) irn; - /* TODO */ - panic("sparc_get_allowed_execution_units not implemented yet"); -} + ir_mode *mode_long_long + = new_int_mode("long long", irma_twos_complement, 64, 1, 64); + ir_type *type_long_long = new_type_primitive(mode_long_long); + ir_mode *mode_unsigned_long_long + = new_int_mode("unsigned long long", irma_twos_complement, 64, 0, 64); + ir_type *type_unsigned_long_long + = new_type_primitive(mode_unsigned_long_long); -static const be_machine_t *sparc_get_machine(const void *self) -{ - (void) self; - /* TODO */ - panic("sparc_get_machine not implemented yet"); + p.type_long_long = type_long_long; + p.type_unsigned_long_long = type_unsigned_long_long; + + if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT) { + p.mode_float_arithmetic = NULL; + p.type_long_double = NULL; + } else { + ir_type *type_long_double = new_type_primitive(mode_Q); + + set_type_alignment_bytes(type_long_double, 8); + set_type_size_bytes(type_long_double, 16); + p.type_long_double = type_long_double; + } + return &p; } static ir_graph **sparc_get_backend_irg_list(const void *self, @@ -620,30 +522,98 @@ static int sparc_is_valid_clobber(const char *clobber) return 0; } +/* fpu set architectures. */ +static const lc_opt_enum_int_items_t sparc_fpu_items[] = { + { "fpu", SPARC_FPU_ARCH_FPU }, + { "softfloat", SPARC_FPU_ARCH_SOFTFLOAT }, + { NULL, 0 } +}; + +static lc_opt_enum_int_var_t arch_fpu_var = { + &sparc_isa_template.fpu_arch, sparc_fpu_items +}; + +static const lc_opt_table_entry_t sparc_options[] = { + LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var), + LC_OPT_LAST +}; + +static ir_node *sparc_new_spill(ir_node *value, ir_node *after) +{ + ir_node *block = get_block(after); + ir_graph *irg = get_irn_irg(value); + ir_node *frame = get_irg_frame(irg); + ir_node *mem = get_irg_no_mem(irg); + ir_mode *mode = get_irn_mode(value); + ir_node *store; + + if (mode_is_float(mode)) { + store = create_stf(NULL, block, value, frame, mem, mode, NULL, 0, true); + } else { + store = new_bd_sparc_St_imm(NULL, block, value, frame, mem, mode, NULL, + 0, true); + } + sched_add_after(after, store); + return store; +} + +static ir_node *sparc_new_reload(ir_node *value, ir_node *spill, + ir_node *before) +{ + ir_node *block = get_block(before); + ir_graph *irg = get_irn_irg(value); + ir_node *frame = get_irg_frame(irg); + ir_mode *mode = get_irn_mode(value); + ir_node *load; + ir_node *res; + + if (mode_is_float(mode)) { + load = create_ldf(NULL, block, frame, spill, mode, NULL, 0, true); + } else { + load = new_bd_sparc_Ld_imm(NULL, block, frame, spill, mode, NULL, 0, + true); + } + sched_add_before(before, load); + assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res); + res = new_r_Proj(load, mode, pn_sparc_Ld_res); + + return res; +} + const arch_isa_if_t sparc_isa_if = { sparc_init, + sparc_lower_for_target, sparc_done, NULL, /* handle intrinsics */ - sparc_get_n_reg_class, - sparc_get_reg_class, sparc_get_reg_class_for_mode, NULL, - sparc_get_code_generator_if, - sparc_get_list_sched_selector, - sparc_get_ilp_sched_selector, sparc_get_reg_class_alignment, sparc_get_backend_params, - sparc_get_allowed_execution_units, - sparc_get_machine, sparc_get_backend_irg_list, NULL, /* mark remat */ sparc_parse_asm_constraint, - sparc_is_valid_clobber + sparc_is_valid_clobber, + + sparc_init_graph, + NULL, /* get_pic_base */ + NULL, /* before_abi */ + sparc_prepare_graph, + sparc_before_ra, + sparc_finish, + sparc_emit_routine, + NULL, /* register_saved_by */ + sparc_new_spill, + sparc_new_reload }; -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc); +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc) void be_init_arch_sparc(void) { + lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); + lc_opt_entry_t *sparc_grp = lc_opt_get_grp(be_grp, "sparc"); + + lc_opt_add_table(sparc_grp, sparc_options); + be_register_isa_if("sparc", &sparc_isa_if); FIRM_DBG_REGISTER(dbg, "firm.be.sparc.cg"); sparc_init_transform();