/**
* @file
* @brief The main sparc backend driver file.
+ * @author Hannes Rapp, Matthias Braun
* @version $Id$
*/
#include "config.h"
#include "iroptimize.h"
#include "irtools.h"
#include "irdump.h"
+#include "iropt_t.h"
#include "lowering.h"
+#include "lower_dw.h"
+#include "lower_alloc.h"
+#include "lower_builtins.h"
+#include "lower_calls.h"
+#include "lower_mode_b.h"
+#include "lower_softfloat.h"
#include "bitset.h"
#include "debug.h"
#include "error.h"
#include "util.h"
-#include "../bearch.h"
-#include "../benode.h"
-#include "../belower.h"
-#include "../besched.h"
+#include "bearch.h"
+#include "benode.h"
+#include "belower.h"
+#include "besched.h"
#include "be.h"
-#include "../bemachine.h"
-#include "../bemodule.h"
-#include "../beirg.h"
-#include "../bespillslots.h"
-#include "../begnuas.h"
-#include "../belistsched.h"
-#include "../beflags.h"
+#include "bemachine.h"
+#include "bemodule.h"
+#include "beirg.h"
+#include "begnuas.h"
+#include "belistsched.h"
+#include "beflags.h"
+#include "beutil.h"
#include "bearch_sparc_t.h"
#include "gen_sparc_regalloc_if.h"
#include "sparc_transform.h"
#include "sparc_emitter.h"
+#include "sparc_cconv.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static arch_irn_class_t sparc_classify(const ir_node *node)
{
(void) node;
- return 0;
+ return arch_irn_class_none;
}
static ir_entity *sparc_get_frame_entity(const ir_node *node)
static int sparc_get_sp_bias(const ir_node *node)
{
if (is_sparc_Save(node)) {
- const sparc_save_attr_t *attr = get_sparc_save_attr_const(node);
- /* Note we do not retport the change of the SPARC_MIN_STACKSIZE
- * size, since we have additional magic in the emitter which
- * calculates that! */
- assert(attr->initial_stacksize >= SPARC_MIN_STACKSIZE);
- return attr->initial_stacksize - SPARC_MIN_STACKSIZE;
+ const sparc_attr_t *attr = get_sparc_attr_const(node);
+ if (get_irn_arity(node) == 3)
+ panic("no support for _reg variant yet");
+
+ return -attr->immediate_value;
+ } else if (is_sparc_RestoreZero(node)) {
+ return SP_BIAS_RESET;
}
return 0;
}
/* fill register allocator interface */
-static const arch_irn_ops_t sparc_irn_ops = {
+const arch_irn_ops_t sparc_irn_ops = {
sparc_classify,
sparc_get_frame_entity,
sparc_set_frame_offset,
static bool sparc_modifies_flags(const ir_node *node)
{
- return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_flags;
+ return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_flags;
}
static bool sparc_modifies_fp_flags(const ir_node *node)
{
- return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_fp_flags;
+ return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_fp_flags;
}
static void sparc_before_ra(ir_graph *irg)
NULL, sparc_modifies_fp_flags);
}
-/**
- * transform reload node => load
- */
-static void transform_Reload(ir_node *node)
-{
- ir_node *block = get_nodes_block(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *ptr = get_irn_n(node, be_pos_Spill_frame);
- ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
- ir_mode *mode = get_irn_mode(node);
- ir_entity *entity = be_get_frame_entity(node);
- const arch_register_t *reg;
- ir_node *proj;
- ir_node *load;
-
- ir_node *sched_point = sched_prev(node);
-
- load = new_bd_sparc_Ld_imm(dbgi, block, ptr, mem, mode, entity, 0, true);
- sched_add_after(sched_point, load);
- sched_remove(node);
-
- proj = new_rd_Proj(dbgi, load, mode, pn_sparc_Ld_res);
-
- reg = arch_get_irn_register(node);
- arch_set_irn_register(proj, reg);
-
- exchange(node, proj);
-}
-
-/**
- * transform spill node => store
- */
-static void transform_Spill(ir_node *node)
-{
- ir_node *block = get_nodes_block(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *ptr = get_irn_n(node, be_pos_Spill_frame);
- ir_graph *irg = get_irn_irg(node);
- ir_node *mem = new_r_NoMem(irg);
- ir_node *val = get_irn_n(node, be_pos_Spill_val);
- ir_mode *mode = get_irn_mode(val);
- ir_entity *entity = be_get_frame_entity(node);
- ir_node *sched_point;
- ir_node *store;
-
- sched_point = sched_prev(node);
- store = new_bd_sparc_St_imm(dbgi, block, val, ptr, mem, mode, entity, 0, true);
- sched_remove(node);
- sched_add_after(sched_point, store);
-
- exchange(node, store);
-}
-
-/**
- * walker to transform be_Spill and be_Reload nodes
- */
-static void sparc_after_ra_walker(ir_node *block, void *data)
-{
- ir_node *node, *prev;
- (void) data;
-
- for (node = sched_last(block); !sched_is_begin(node); node = prev) {
- prev = sched_prev(node);
-
- if (be_is_Reload(node)) {
- transform_Reload(node);
- } else if (be_is_Spill(node)) {
- transform_Spill(node);
- }
- }
-}
-
-static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
-{
- be_fec_env_t *env = data;
- const ir_mode *mode;
- int align;
- ir_entity *entity;
- const sparc_load_store_attr_t *attr;
-
- if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
- mode = get_irn_mode(node);
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- return;
- }
-
- if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
- return;
-
- attr = get_sparc_load_store_attr_const(node);
- entity = attr->base.immediate_value_entity;
- mode = attr->load_store_mode;
- if (entity != NULL)
- return;
- if (!attr->is_frame_entity)
- return;
- if (arch_irn_get_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
- mode = mode_Lu;
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
-}
-
-static void sparc_set_frame_entity(ir_node *node, ir_entity *entity)
-{
- if (is_be_node(node)) {
- be_node_set_frame_entity(node, entity);
- } else {
- /* we only say be_node_needs_frame_entity on nodes with load_store
- * attributes, so this should be fine */
- sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
- assert(attr->is_frame_entity);
- assert(attr->base.immediate_value_entity == NULL);
- attr->base.immediate_value_entity = entity;
- }
-}
-
-static void sparc_after_ra(ir_graph *irg)
-{
- be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
-
- irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
- be_assign_entities(fec_env, sparc_set_frame_entity);
- be_free_frame_entity_coalescer(fec_env);
-
- irg_block_walk_graph(irg, NULL, sparc_after_ra_walker, NULL);
-}
-
static void sparc_init_graph(ir_graph *irg)
{
(void) irg;
}
-const arch_isa_if_t sparc_isa_if;
+extern const arch_isa_if_t sparc_isa_if;
static sparc_isa_t sparc_isa_template = {
{
&sparc_isa_if, /* isa interface implementation */
- &sparc_gp_regs[REG_SP], /* stack pointer register */
- &sparc_gp_regs[REG_FRAME_POINTER], /* base pointer register */
+ N_SPARC_REGISTERS,
+ sparc_registers,
+ N_SPARC_CLASSES,
+ sparc_reg_classes,
+ &sparc_registers[REG_SP], /* stack pointer register */
+ &sparc_registers[REG_FRAME_POINTER],/* base pointer register */
&sparc_reg_classes[CLASS_sparc_gp], /* link pointer register class */
- -1, /* stack direction */
3, /* power of two stack alignment
for calls */
NULL, /* main environment */
5, /* costs for a reload instruction */
true, /* custom abi handling */
},
- NULL, /* constants */
+ NULL, /* constants */
+ SPARC_FPU_ARCH_FPU, /* FPU architecture */
};
/**
part_block(node);
{
- ir_node *block = get_nodes_block(node);
- ir_node *unsigned_x = get_Conv_op(node);
- ir_mode *mode_u = get_irn_mode(unsigned_x);
- ir_mode *mode_s = find_signed_mode(mode_u);
- ir_mode *mode_d = mode_D;
- ir_node *signed_x = new_rd_Conv(dbgi, block, unsigned_x, mode_s);
- ir_node *res = new_rd_Conv(dbgi, block, signed_x, mode_d);
- ir_node *zero = new_r_Const(irg, get_mode_null(mode_s));
- ir_node *cmp = new_rd_Cmp(dbgi, block, signed_x, zero);
- ir_node *proj_lt = new_r_Proj(cmp, mode_b, pn_Cmp_Lt);
- ir_node *cond = new_rd_Cond(dbgi, block, proj_lt);
- ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
- ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
- ir_node *in_true[1] = { proj_true };
- ir_node *in_false[1] = { proj_false };
- ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
- ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false);
- ir_node *true_jmp = new_r_Jmp(true_block);
- ir_node *false_jmp = new_r_Jmp(false_block);
- tarval *correction = new_tarval_from_double(4294967296., mode_d);
- ir_node *c_const = new_r_Const(irg, correction);
- ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const,
+ ir_node *block = get_nodes_block(node);
+ ir_node *unsigned_x = get_Conv_op(node);
+ ir_mode *mode_u = get_irn_mode(unsigned_x);
+ ir_mode *mode_s = find_signed_mode(mode_u);
+ ir_mode *mode_d = mode_D;
+ ir_node *signed_x = new_rd_Conv(dbgi, block, unsigned_x, mode_s);
+ ir_node *res = new_rd_Conv(dbgi, block, signed_x, mode_d);
+ ir_node *zero = new_r_Const(irg, get_mode_null(mode_s));
+ ir_node *cmp = new_rd_Cmp(dbgi, block, signed_x, zero,
+ ir_relation_less);
+ ir_node *cond = new_rd_Cond(dbgi, block, cmp);
+ ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
+ ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
+ ir_node *in_true[1] = { proj_true };
+ ir_node *in_false[1] = { proj_false };
+ ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
+ ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false);
+ ir_node *true_jmp = new_r_Jmp(true_block);
+ ir_node *false_jmp = new_r_Jmp(false_block);
+ ir_tarval *correction = new_tarval_from_double(4294967296., mode_d);
+ ir_node *c_const = new_r_Const(irg, correction);
+ ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const,
mode_d);
ir_node *lower_in[2] = { true_jmp, false_jmp };
rt_iMod.mem_proj_nr = pn_Mod_M;
rt_iMod.regular_proj_nr = pn_Mod_X_regular;
rt_iMod.exc_proj_nr = pn_Mod_X_except;
- rt_iMod.exc_mem_proj_nr = pn_Mod_M;
rt_iMod.res_proj_nr = pn_Mod_res;
set_entity_visibility(rt_iMod.ent, ir_visibility_external);
rt_uMod.mem_proj_nr = pn_Mod_M;
rt_uMod.regular_proj_nr = pn_Mod_X_regular;
rt_uMod.exc_proj_nr = pn_Mod_X_except;
- rt_uMod.exc_mem_proj_nr = pn_Mod_M;
rt_uMod.res_proj_nr = pn_Mod_res;
set_entity_visibility(rt_uMod.ent, ir_visibility_external);
*/
static arch_env_t *sparc_init(FILE *outfile)
{
- static int run_once = 0;
- sparc_isa_t *isa;
-
- if (run_once)
- return NULL;
- run_once = 1;
-
- isa = XMALLOC(sparc_isa_t);
- memcpy(isa, &sparc_isa_template, sizeof(*isa));
+ sparc_isa_t *isa = XMALLOC(sparc_isa_t);
+ *isa = sparc_isa_template;
isa->constants = pmap_create();
+ be_gas_elf_type_char = '#';
+ be_gas_object_file_format = OBJECT_FILE_FORMAT_ELF;
+ be_gas_elf_variant = ELF_VARIANT_SPARC;
+
be_emit_init(outfile);
sparc_register_init();
sparc_create_opcodes(&sparc_irn_ops);
sparc_handle_intrinsics();
+ sparc_cconv_init();
return &isa->base;
}
*/
static void sparc_done(void *self)
{
- sparc_isa_t *isa = self;
+ sparc_isa_t *isa = (sparc_isa_t*)self;
/* emit now all global declarations */
be_gas_emit_decls(isa->base.main_env);
free(isa);
}
-static unsigned sparc_get_n_reg_class(void)
-{
- return N_CLASSES;
-}
-
-static const arch_register_class_t *sparc_get_reg_class(unsigned i)
-{
- assert(i < N_CLASSES);
- return &sparc_reg_classes[i];
-}
-
-
/**
* Get the register class which shall be used to store a value of a given mode.
static void sparc_lower_for_target(void)
{
- int i;
- int n_irgs = get_irp_n_irgs();
+ size_t i, n_irgs = get_irp_n_irgs();
- /* TODO, doubleword lowering and others */
+ lower_calls_with_compounds(LF_RETURN_HIDDEN);
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
- lower_switch(irg, 256, false);
+ /* Turn all small CopyBs into loads/stores and all bigger CopyBs into
+ * memcpy calls. */
+ lower_CopyB(irg, 31, 32, false);
+ }
+
+ if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT)
+ lower_floating_point();
+
+ lower_builtins(0, NULL);
+
+ sparc_lower_64bit();
+
+ for (i = 0; i < n_irgs; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+ ir_lower_mode_b(irg, mode_Iu);
+ lower_switch(irg, 4, 256, false);
+ lower_alloc(irg, SPARC_STACK_ALIGNMENT, false, -SPARC_MIN_STACKSIZE);
}
}
static int sparc_is_mux_allowed(ir_node *sel, ir_node *mux_false,
ir_node *mux_true)
{
- (void) sel;
- (void) mux_false;
- (void) mux_true;
- return false;
+ return ir_is_optimizable_mux(sel, mux_false, mux_true);
}
/**
0, /* no inline assembly */
0, /* no support for RotL nodes */
1, /* big endian */
- sparc_lower_for_target, /* lowering callback */
+ 1, /* modulo shift efficient */
+ 0, /* non-modulo shift not efficient */
&arch_dep, /* will be set later */
sparc_is_mux_allowed, /* parameter for if conversion */
+ 32, /* machine size */
NULL, /* float arithmetic mode */
+ NULL, /* long long type */
+ NULL, /* usigned long long type */
+ NULL, /* long double type */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
4 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
};
+
+ ir_mode *mode_long_long
+ = new_ir_mode("long long", irms_int_number, 64, 1, irma_twos_complement,
+ 64);
+ ir_type *type_long_long = new_type_primitive(mode_long_long);
+ ir_mode *mode_unsigned_long_long
+ = new_ir_mode("unsigned long long", irms_int_number, 64, 0,
+ irma_twos_complement, 64);
+ ir_type *type_unsigned_long_long
+ = new_type_primitive(mode_unsigned_long_long);
+
+ p.type_long_long = type_long_long;
+ p.type_unsigned_long_long = type_unsigned_long_long;
+
+ if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT) {
+ p.mode_float_arithmetic = NULL;
+ p.type_long_double = NULL;
+ } else {
+ ir_mode *mode_long_double
+ = new_ir_mode("long double", irms_float_number, 128, 1,
+ irma_ieee754, 0);
+ ir_type *type_long_double = new_type_primitive(mode_long_double);
+
+ set_type_alignment_bytes(type_long_double, 8);
+ p.type_long_double = type_long_double;
+ }
return &p;
}
return 0;
}
+/* fpu set architectures. */
+static const lc_opt_enum_int_items_t sparc_fpu_items[] = {
+ { "fpu", SPARC_FPU_ARCH_FPU },
+ { "softfloat", SPARC_FPU_ARCH_SOFTFLOAT },
+ { NULL, 0 }
+};
+
+static lc_opt_enum_int_var_t arch_fpu_var = {
+ &sparc_isa_template.fpu_arch, sparc_fpu_items
+};
+
+static const lc_opt_table_entry_t sparc_options[] = {
+ LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
+ LC_OPT_LAST
+};
+
+static ir_node *sparc_new_spill(ir_node *value, ir_node *after)
+{
+ ir_node *block = get_block(after);
+ ir_graph *irg = get_irn_irg(value);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *mem = get_irg_no_mem(irg);
+ ir_mode *mode = get_irn_mode(value);
+ ir_node *store;
+
+ if (mode_is_float(mode)) {
+ store = create_stf(NULL, block, value, frame, mem, mode, NULL, 0, true);
+ } else {
+ store = new_bd_sparc_St_imm(NULL, block, value, frame, mem, mode, NULL,
+ 0, true);
+ }
+ sched_add_after(after, store);
+ return store;
+}
+
+static ir_node *sparc_new_reload(ir_node *value, ir_node *spill,
+ ir_node *before)
+{
+ ir_node *block = get_block(before);
+ ir_graph *irg = get_irn_irg(value);
+ ir_node *frame = get_irg_frame(irg);
+ ir_mode *mode = get_irn_mode(value);
+ ir_node *load;
+ ir_node *res;
+
+ if (mode_is_float(mode)) {
+ load = create_ldf(NULL, block, frame, spill, mode, NULL, 0, true);
+ } else {
+ load = new_bd_sparc_Ld_imm(NULL, block, frame, spill, mode, NULL, 0,
+ true);
+ }
+ sched_add_before(before, load);
+ assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res);
+ res = new_r_Proj(load, mode, pn_sparc_Ld_res);
+
+ return res;
+}
+
const arch_isa_if_t sparc_isa_if = {
sparc_init,
+ sparc_lower_for_target,
sparc_done,
NULL, /* handle intrinsics */
- sparc_get_n_reg_class,
- sparc_get_reg_class,
sparc_get_reg_class_for_mode,
NULL,
sparc_get_reg_class_alignment,
NULL, /* before_abi */
sparc_prepare_graph,
sparc_before_ra,
- sparc_after_ra,
- NULL, /* finish */
+ sparc_finish,
sparc_emit_routine,
+ NULL, /* register_saved_by */
+ sparc_new_spill,
+ sparc_new_reload
};
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc)
void be_init_arch_sparc(void)
{
+ lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
+ lc_opt_entry_t *sparc_grp = lc_opt_get_grp(be_grp, "sparc");
+
+ lc_opt_add_table(sparc_grp, sparc_options);
+
be_register_isa_if("sparc", &sparc_isa_if);
FIRM_DBG_REGISTER(dbg, "firm.be.sparc.cg");
sparc_init_transform();