/*
- * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
- *
* This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @file
* @brief The main sparc backend driver file.
* @author Hannes Rapp, Matthias Braun
- * @version $Id$
*/
#include "config.h"
#include "iroptimize.h"
#include "irtools.h"
#include "irdump.h"
+#include "iropt_t.h"
#include "lowering.h"
#include "lower_dw.h"
+#include "lower_alloc.h"
+#include "lower_builtins.h"
+#include "lower_calls.h"
+#include "lower_mode_b.h"
+#include "lower_softfloat.h"
#include "bitset.h"
#include "debug.h"
#include "array_t.h"
#include "error.h"
#include "util.h"
-
-#include "../bearch.h"
-#include "../benode.h"
-#include "../belower.h"
-#include "../besched.h"
-#include "be.h"
-#include "../bemachine.h"
-#include "../bemodule.h"
-#include "../beirg.h"
-#include "../bespillslots.h"
-#include "../begnuas.h"
-#include "../belistsched.h"
-#include "../beflags.h"
+#include "be_t.h"
+#include "bearch.h"
+#include "benode.h"
+#include "belower.h"
+#include "besched.h"
+#include "bemodule.h"
+#include "begnuas.h"
+#include "belistsched.h"
+#include "beflags.h"
+#include "beutil.h"
#include "bearch_sparc_t.h"
#include "gen_sparc_regalloc_if.h"
#include "sparc_transform.h"
#include "sparc_emitter.h"
+#include "sparc_cconv.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static arch_irn_class_t sparc_classify(const ir_node *node)
-{
- (void) node;
- return arch_irn_class_none;
-}
-
static ir_entity *sparc_get_frame_entity(const ir_node *node)
{
if (is_sparc_FrameAddr(node)) {
if (get_irn_arity(node) == 3)
panic("no support for _reg variant yet");
- /* Note we do not report the change of the SPARC_MIN_STACKSIZE
- * size, since we have additional magic in the emitter which
- * calculates that! */
- assert(attr->immediate_value <= -SPARC_MIN_STACKSIZE);
- return attr->immediate_value + SPARC_MIN_STACKSIZE;
+ return -attr->immediate_value;
} else if (is_sparc_RestoreZero(node)) {
return SP_BIAS_RESET;
}
/* fill register allocator interface */
const arch_irn_ops_t sparc_irn_ops = {
- sparc_classify,
sparc_get_frame_entity,
sparc_set_frame_offset,
sparc_get_sp_bias,
- NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
NULL, /* perform_memory_operand */
static bool sparc_modifies_flags(const ir_node *node)
{
- return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_flags;
+ be_foreach_out(node, o) {
+ const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
+ if (req->cls == &sparc_reg_classes[CLASS_sparc_flags_class])
+ return true;
+ }
+ return false;
}
static bool sparc_modifies_fp_flags(const ir_node *node)
{
- return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_fp_flags;
+ be_foreach_out(node, o) {
+ const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
+ if (req->cls == &sparc_reg_classes[CLASS_sparc_fpflags_class])
+ return true;
+ }
+ return false;
}
static void sparc_before_ra(ir_graph *irg)
NULL, sparc_modifies_fp_flags);
}
-/**
- * transform reload node => load
- */
-static void transform_Reload(ir_node *node)
-{
- ir_node *block = get_nodes_block(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
- ir_node *mem = get_irn_n(node, n_be_Reload_mem);
- ir_mode *mode = get_irn_mode(node);
- ir_entity *entity = be_get_frame_entity(node);
- const arch_register_t *reg;
- ir_node *proj;
- ir_node *load;
-
- ir_node *sched_point = sched_prev(node);
-
- load = new_bd_sparc_Ld_imm(dbgi, block, ptr, mem, mode, entity, 0, true);
- sched_add_after(sched_point, load);
- sched_remove(node);
-
- proj = new_rd_Proj(dbgi, load, mode, pn_sparc_Ld_res);
-
- reg = arch_get_irn_register(node);
- arch_set_irn_register(proj, reg);
-
- exchange(node, proj);
-}
-
-/**
- * transform spill node => store
- */
-static void transform_Spill(ir_node *node)
-{
- ir_node *block = get_nodes_block(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
- ir_graph *irg = get_irn_irg(node);
- ir_node *mem = get_irg_no_mem(irg);
- ir_node *val = get_irn_n(node, n_be_Spill_val);
- ir_mode *mode = get_irn_mode(val);
- ir_entity *entity = be_get_frame_entity(node);
- ir_node *sched_point;
- ir_node *store;
-
- sched_point = sched_prev(node);
- store = new_bd_sparc_St_imm(dbgi, block, val, ptr, mem, mode, entity, 0, true);
- sched_remove(node);
- sched_add_after(sched_point, store);
-
- exchange(node, store);
-}
-
-/**
- * walker to transform be_Spill and be_Reload nodes
- */
-static void sparc_after_ra_walker(ir_node *block, void *data)
-{
- ir_node *node, *prev;
- (void) data;
-
- for (node = sched_last(block); !sched_is_begin(node); node = prev) {
- prev = sched_prev(node);
-
- if (be_is_Reload(node)) {
- transform_Reload(node);
- } else if (be_is_Spill(node)) {
- transform_Spill(node);
- }
- }
-}
-
-static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
-{
- be_fec_env_t *env = (be_fec_env_t*)data;
- const ir_mode *mode;
- int align;
- ir_entity *entity;
- const sparc_load_store_attr_t *attr;
-
- if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
- mode = get_irn_mode(node);
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- return;
- }
-
- if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
- return;
-
- attr = get_sparc_load_store_attr_const(node);
- entity = attr->base.immediate_value_entity;
- mode = attr->load_store_mode;
- if (entity != NULL)
- return;
- if (!attr->is_frame_entity)
- return;
- if (arch_irn_get_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
- mode = mode_Lu;
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
-}
-
-static void sparc_set_frame_entity(ir_node *node, ir_entity *entity)
-{
- if (is_be_node(node)) {
- be_node_set_frame_entity(node, entity);
- } else {
- /* we only say be_node_needs_frame_entity on nodes with load_store
- * attributes, so this should be fine */
- sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
- assert(attr->is_frame_entity);
- assert(attr->base.immediate_value_entity == NULL);
- attr->base.immediate_value_entity = entity;
- }
-}
-
-static void sparc_after_ra(ir_graph *irg)
-{
- be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
- bool at_begin = stack_layout->sp_relative ? true : false;
- be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
-
- irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
- be_assign_entities(fec_env, sparc_set_frame_entity, at_begin);
- be_free_frame_entity_coalescer(fec_env);
-
- irg_block_walk_graph(irg, NULL, sparc_after_ra_walker, NULL);
-
- sparc_introduce_prolog_epilog(irg);
-}
-
-static void sparc_init_graph(ir_graph *irg)
-{
- (void) irg;
-}
-
extern const arch_isa_if_t sparc_isa_if;
static sparc_isa_t sparc_isa_template = {
{
- &sparc_isa_if, /* isa interface implementation */
+ &sparc_isa_if, /* isa interface implementation */
N_SPARC_REGISTERS,
sparc_registers,
N_SPARC_CLASSES,
sparc_reg_classes,
- &sparc_registers[REG_SP], /* stack pointer register */
- &sparc_registers[REG_FRAME_POINTER],/* base pointer register */
- &sparc_reg_classes[CLASS_sparc_gp], /* link pointer register class */
- 3, /* power of two stack alignment
- for calls */
- NULL, /* main environment */
- 7, /* costs for a spill instruction */
- 5, /* costs for a reload instruction */
- true, /* custom abi handling */
+ &sparc_registers[REG_SP], /* stack pointer register */
+ &sparc_registers[REG_FRAME_POINTER], /* base pointer register */
+ 3, /* power of two stack alignment
+ for calls */
+ 7, /* costs for a spill instruction */
+ 5, /* costs for a reload instruction */
+ true, /* custom abi handling */
},
- NULL, /* constants */
+ NULL, /* constants */
+ SPARC_FPU_ARCH_FPU, /* FPU architecture */
};
/**
}
}
+/**
+ * rewrite float->unsigned conversions.
+ * Sparc has no instruction for this so instead we do the following:
+ *
+ * if (x >= 2147483648.) {
+ * converted ^= (int)(x-2147483648.) ^ 0x80000000;
+ * } else {
+ * converted = (int)x;
+ * }
+ * return (unsigned)converted;
+ */
+static void rewrite_float_unsigned_Conv(ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *lower_block = get_nodes_block(node);
+
+ part_block(node);
+
+ {
+ ir_node *block = get_nodes_block(node);
+ ir_node *float_x = get_Conv_op(node);
+ ir_mode *mode_u = get_irn_mode(node);
+ ir_mode *mode_s = find_signed_mode(mode_u);
+ ir_mode *mode_f = get_irn_mode(float_x);
+ ir_tarval *limit = new_tarval_from_double(2147483648., mode_f);
+ ir_node *limitc = new_r_Const(irg, limit);
+ ir_node *cmp = new_rd_Cmp(dbgi, block, float_x, limitc,
+ ir_relation_greater_equal);
+ ir_node *cond = new_rd_Cond(dbgi, block, cmp);
+ ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
+ ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
+ ir_node *in_true[1] = { proj_true };
+ ir_node *in_false[1] = { proj_false };
+ ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
+ ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false);
+ ir_node *true_jmp = new_r_Jmp(true_block);
+ ir_node *false_jmp = new_r_Jmp(false_block);
+
+ ir_tarval *correction = new_tarval_from_long(0x80000000l, mode_s);
+ ir_node *c_const = new_r_Const(irg, correction);
+ ir_node *sub = new_rd_Sub(dbgi, true_block, float_x, limitc,
+ mode_f);
+ ir_node *sub_conv = new_rd_Conv(dbgi, true_block, sub, mode_s);
+ ir_node *xorn = new_rd_Eor(dbgi, true_block, sub_conv, c_const,
+ mode_s);
+
+ ir_node *converted = new_rd_Conv(dbgi, false_block, float_x,mode_s);
+
+ ir_node *lower_in[2] = { true_jmp, false_jmp };
+ ir_node *phi_in[2] = { xorn, converted };
+ ir_node *phi;
+ ir_node *res_conv;
+
+ set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
+ phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_s);
+ assert(get_Block_phis(lower_block) == NULL);
+ set_Block_phis(lower_block, phi);
+ set_Phi_next(phi, NULL);
+
+ res_conv = new_rd_Conv(dbgi, lower_block, phi, mode_u);
+ exchange(node, res_conv);
+ }
+}
+
static int sparc_rewrite_Conv(ir_node *node, void *ctx)
{
ir_mode *to_mode = get_irn_mode(node);
(void) ctx;
if (mode_is_float(to_mode) && mode_is_int(from_mode)
- && get_mode_size_bits(from_mode) == 32
- && !mode_is_signed(from_mode)) {
+ && get_mode_size_bits(from_mode) == 32
+ && !mode_is_signed(from_mode)) {
rewrite_unsigned_float_Conv(node);
return 1;
}
+ if (mode_is_float(from_mode) && mode_is_int(to_mode)
+ && get_mode_size_bits(to_mode) <= 32
+ && !mode_is_signed(to_mode)) {
+ rewrite_float_unsigned_Conv(node);
+ return 1;
+ }
return 0;
}
rt_iMod.mem_proj_nr = pn_Mod_M;
rt_iMod.regular_proj_nr = pn_Mod_X_regular;
rt_iMod.exc_proj_nr = pn_Mod_X_except;
- rt_iMod.exc_mem_proj_nr = pn_Mod_M;
rt_iMod.res_proj_nr = pn_Mod_res;
set_entity_visibility(rt_iMod.ent, ir_visibility_external);
rt_uMod.mem_proj_nr = pn_Mod_M;
rt_uMod.regular_proj_nr = pn_Mod_X_regular;
rt_uMod.exc_proj_nr = pn_Mod_X_except;
- rt_uMod.exc_mem_proj_nr = pn_Mod_M;
rt_uMod.res_proj_nr = pn_Mod_res;
set_entity_visibility(rt_uMod.ent, ir_visibility_external);
lower_intrinsics(records, n_records, /*part_block_used=*/ true);
}
-/**
- * Initializes the backend ISA
- */
-static arch_env_t *sparc_init(FILE *outfile)
+static void sparc_init(void)
+{
+ sparc_register_init();
+ sparc_create_opcodes(&sparc_irn_ops);
+ sparc_cconv_init();
+}
+
+static void sparc_finish(void)
+{
+ sparc_free_opcodes();
+}
+
+static arch_env_t *sparc_begin_codegeneration(void)
{
sparc_isa_t *isa = XMALLOC(sparc_isa_t);
*isa = sparc_isa_template;
isa->constants = pmap_create();
- be_emit_init(outfile);
-
- sparc_register_init();
- sparc_create_opcodes(&sparc_irn_ops);
- sparc_handle_intrinsics();
+ be_gas_elf_type_char = '#';
+ be_gas_elf_variant = ELF_VARIANT_SPARC;
return &isa->base;
}
/**
* Closes the output file and frees the ISA structure.
*/
-static void sparc_done(void *self)
+static void sparc_end_codegeneration(void *self)
{
sparc_isa_t *isa = (sparc_isa_t*)self;
-
- /* emit now all global declarations */
- be_gas_emit_decls(isa->base.main_env);
-
pmap_destroy(isa->constants);
- be_emit_exit();
free(isa);
}
-
-/**
- * Get the register class which shall be used to store a value of a given mode.
- * @param self The this pointer.
- * @param mode The mode in question.
- * @return A register class which can hold values of the given mode.
- */
-static const arch_register_class_t *sparc_get_reg_class_for_mode(const ir_mode *mode)
+static void sparc_lower_for_target(void)
{
- if (mode_is_float(mode))
- return &sparc_reg_classes[CLASS_sparc_fp];
- else
- return &sparc_reg_classes[CLASS_sparc_gp];
-}
+ ir_mode *mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode;
+ size_t i, n_irgs = get_irp_n_irgs();
-/**
- * Returns the necessary byte alignment for storing a register of given class.
- */
-static int sparc_get_reg_class_alignment(const arch_register_class_t *cls)
-{
- ir_mode *mode = arch_register_class_mode(cls);
- return get_mode_size_bytes(mode);
-}
+ lower_calls_with_compounds(LF_RETURN_HIDDEN);
-static ir_node *sparc_create_set(ir_node *cond)
-{
- return ir_create_cond_set(cond, mode_Iu);
-}
+ for (i = 0; i < n_irgs; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+ /* Turn all small CopyBs into loads/stores and all bigger CopyBs into
+ * memcpy calls. */
+ lower_CopyB(irg, 31, 32, false);
+ }
-static void sparc_lower_for_target(void)
-{
- size_t i, n_irgs = get_irp_n_irgs();
- lower_mode_b_config_t lower_mode_b_config = {
- mode_Iu,
- sparc_create_set,
- 0,
- };
- lower_params_t params = {
- 4, /* def_ptr_alignment */
- LF_COMPOUND_RETURN | LF_RETURN_HIDDEN, /* flags */
- ADD_HIDDEN_ALWAYS_IN_FRONT, /* hidden_params */
- NULL, /* find pointer type */
- NULL, /* ret_compound_in_regs */
- };
- lower_calls_with_compounds(¶ms);
+ if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT)
+ lower_floating_point();
+
+ lower_builtins(0, NULL);
sparc_lower_64bit();
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
- ir_lower_mode_b(irg, &lower_mode_b_config);
- lower_switch(irg, 4, 256, false);
+ ir_lower_mode_b(irg, mode_Iu);
+ lower_switch(irg, 4, 256, mode_gp);
+ /* TODO: Pass SPARC_MIN_STACKSIZE as addr_delta as soon as
+ * Alloc nodes are implemented more efficiently. */
+ lower_alloc(irg, SPARC_STACK_ALIGNMENT, true, 0);
}
}
static int sparc_is_mux_allowed(ir_node *sel, ir_node *mux_false,
ir_node *mux_true)
{
- ir_graph *irg = get_irn_irg(sel);
- ir_mode *mode = get_irn_mode(mux_true);
-
- if (get_irg_phase_state(irg) == phase_low)
- return false;
-
- if (!mode_is_int(mode) && !mode_is_reference(mode) && mode != mode_b)
- return false;
- if (is_Const(mux_true) && is_Const_one(mux_true) &&
- is_Const(mux_false) && is_Const_null(mux_false))
- return true;
- return false;
+ return ir_is_optimizable_mux(sel, mux_false, mux_true);
}
/**
0, /* no inline assembly */
0, /* no support for RotL nodes */
1, /* big endian */
+ 1, /* modulo shift efficient */
+ 0, /* non-modulo shift not efficient */
&arch_dep, /* will be set later */
sparc_is_mux_allowed, /* parameter for if conversion */
+ 32, /* machine size */
NULL, /* float arithmetic mode */
+ NULL, /* long long type */
+ NULL, /* usigned long long type */
+ NULL, /* long double type */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
4 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
};
- return &p;
-}
-static ir_graph **sparc_get_backend_irg_list(const void *self,
- ir_graph ***irgs)
-{
- (void) self;
- (void) irgs;
- return NULL;
+ ir_mode *mode_long_long
+ = new_int_mode("long long", irma_twos_complement, 64, 1, 64);
+ ir_type *type_long_long = new_type_primitive(mode_long_long);
+ ir_mode *mode_unsigned_long_long
+ = new_int_mode("unsigned long long", irma_twos_complement, 64, 0, 64);
+ ir_type *type_unsigned_long_long
+ = new_type_primitive(mode_unsigned_long_long);
+
+ p.type_long_long = type_long_long;
+ p.type_unsigned_long_long = type_unsigned_long_long;
+
+ ir_type *type_long_double = new_type_primitive(mode_Q);
+
+ set_type_alignment_bytes(type_long_double, 8);
+ set_type_size_bytes(type_long_double, 16);
+ p.type_long_double = type_long_double;
+ return &p;
}
static asm_constraint_flags_t sparc_parse_asm_constraint(const char **c)
return 0;
}
+/* fpu set architectures. */
+static const lc_opt_enum_int_items_t sparc_fpu_items[] = {
+ { "fpu", SPARC_FPU_ARCH_FPU },
+ { "softfloat", SPARC_FPU_ARCH_SOFTFLOAT },
+ { NULL, 0 }
+};
+
+static lc_opt_enum_int_var_t arch_fpu_var = {
+ &sparc_isa_template.fpu_arch, sparc_fpu_items
+};
+
+static const lc_opt_table_entry_t sparc_options[] = {
+ LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
+ LC_OPT_LAST
+};
+
+static ir_node *sparc_new_spill(ir_node *value, ir_node *after)
+{
+ ir_node *block = get_block(after);
+ ir_graph *irg = get_irn_irg(value);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *mem = get_irg_no_mem(irg);
+ ir_mode *mode = get_irn_mode(value);
+ ir_node *store;
+
+ if (mode_is_float(mode)) {
+ store = create_stf(NULL, block, value, frame, mem, mode, NULL, 0, true);
+ } else {
+ store = new_bd_sparc_St_imm(NULL, block, value, frame, mem, mode, NULL,
+ 0, true);
+ }
+ sched_add_after(after, store);
+ return store;
+}
+
+static ir_node *sparc_new_reload(ir_node *value, ir_node *spill,
+ ir_node *before)
+{
+ ir_node *block = get_block(before);
+ ir_graph *irg = get_irn_irg(value);
+ ir_node *frame = get_irg_frame(irg);
+ ir_mode *mode = get_irn_mode(value);
+ ir_node *load;
+ ir_node *res;
+
+ if (mode_is_float(mode)) {
+ load = create_ldf(NULL, block, frame, spill, mode, NULL, 0, true);
+ } else {
+ load = new_bd_sparc_Ld_imm(NULL, block, frame, spill, mode, NULL, 0,
+ true);
+ }
+ sched_add_before(before, load);
+ assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res);
+ res = new_r_Proj(load, mode, pn_sparc_Ld_res);
+
+ return res;
+}
+
const arch_isa_if_t sparc_isa_if = {
sparc_init,
- sparc_lower_for_target,
- sparc_done,
- NULL, /* handle intrinsics */
- sparc_get_reg_class_for_mode,
- NULL,
- sparc_get_reg_class_alignment,
+ sparc_finish,
sparc_get_backend_params,
- sparc_get_backend_irg_list,
- NULL, /* mark remat */
+ sparc_lower_for_target,
sparc_parse_asm_constraint,
sparc_is_valid_clobber,
- sparc_init_graph,
- NULL, /* get_pic_base */
- NULL, /* before_abi */
+ sparc_begin_codegeneration,
+ sparc_end_codegeneration,
+ NULL,
+ NULL, /* get call abi */
+ NULL, /* mark remat */
+ NULL, /* get_pic_base */
+ sparc_new_spill,
+ sparc_new_reload,
+ NULL, /* register_saved_by */
+
+ sparc_handle_intrinsics,
+ NULL, /* before_abi */
sparc_prepare_graph,
sparc_before_ra,
- sparc_after_ra,
- sparc_finish,
+ sparc_finish_graph,
sparc_emit_routine,
- NULL, /* register_saved_by */
};
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc)
void be_init_arch_sparc(void)
{
+ lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
+ lc_opt_entry_t *sparc_grp = lc_opt_get_grp(be_grp, "sparc");
+
+ lc_opt_add_table(sparc_grp, sparc_options);
+
be_register_isa_if("sparc", &sparc_isa_if);
FIRM_DBG_REGISTER(dbg, "firm.be.sparc.cg");
sparc_init_transform();