2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief The main sparc backend driver file.
9 * @author Hannes Rapp, Matthias Braun
14 #include "lc_opts_enum.h"
22 #include "iroptimize.h"
28 #include "lower_alloc.h"
29 #include "lower_builtins.h"
30 #include "lower_calls.h"
31 #include "lower_mode_b.h"
32 #include "lower_softfloat.h"
46 #include "belistsched.h"
50 #include "bearch_sparc_t.h"
52 #include "sparc_new_nodes.h"
53 #include "gen_sparc_regalloc_if.h"
54 #include "sparc_transform.h"
55 #include "sparc_emitter.h"
56 #include "sparc_cconv.h"
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 static ir_entity *sparc_get_frame_entity(const ir_node *node)
62 if (is_sparc_FrameAddr(node)) {
63 const sparc_attr_t *attr = get_sparc_attr_const(node);
64 return attr->immediate_value_entity;
67 if (sparc_has_load_store_attr(node)) {
68 const sparc_load_store_attr_t *load_store_attr
69 = get_sparc_load_store_attr_const(node);
70 if (load_store_attr->is_frame_entity) {
71 return load_store_attr->base.immediate_value_entity;
79 * This function is called by the generic backend to correct offsets for
80 * nodes accessing the stack.
82 static void sparc_set_frame_offset(ir_node *node, int offset)
84 sparc_attr_t *attr = get_sparc_attr(node);
85 attr->immediate_value += offset;
87 /* must be a FrameAddr or a load/store node with frame_entity */
88 assert(is_sparc_FrameAddr(node) ||
89 get_sparc_load_store_attr_const(node)->is_frame_entity);
92 static int sparc_get_sp_bias(const ir_node *node)
94 if (is_sparc_Save(node)) {
95 const sparc_attr_t *attr = get_sparc_attr_const(node);
96 if (get_irn_arity(node) == 3)
97 panic("no support for _reg variant yet");
99 return -attr->immediate_value;
100 } else if (is_sparc_RestoreZero(node)) {
101 return SP_BIAS_RESET;
106 /* fill register allocator interface */
108 const arch_irn_ops_t sparc_irn_ops = {
109 sparc_get_frame_entity,
110 sparc_set_frame_offset,
112 NULL, /* get_op_estimated_cost */
113 NULL, /* possible_memory_operand */
114 NULL, /* perform_memory_operand */
118 * Transforms the standard firm graph into
121 static void sparc_prepare_graph(ir_graph *irg)
123 sparc_transform_graph(irg);
126 static bool sparc_modifies_flags(const ir_node *node)
128 be_foreach_out(node, o) {
129 const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
130 if (req->cls == &sparc_reg_classes[CLASS_sparc_flags_class])
136 static bool sparc_modifies_fp_flags(const ir_node *node)
138 be_foreach_out(node, o) {
139 const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
140 if (req->cls == &sparc_reg_classes[CLASS_sparc_fpflags_class])
146 static void sparc_before_ra(ir_graph *irg)
148 /* fixup flags register */
149 be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_flags_class],
150 NULL, sparc_modifies_flags);
151 be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_fpflags_class],
152 NULL, sparc_modifies_fp_flags);
155 extern const arch_isa_if_t sparc_isa_if;
156 static sparc_isa_t sparc_isa_template = {
158 &sparc_isa_if, /* isa interface implementation */
163 &sparc_registers[REG_SP], /* stack pointer register */
164 &sparc_registers[REG_FRAME_POINTER], /* base pointer register */
165 3, /* power of two stack alignment
167 7, /* costs for a spill instruction */
168 5, /* costs for a reload instruction */
169 true, /* custom abi handling */
171 NULL, /* constants */
172 SPARC_FPU_ARCH_FPU, /* FPU architecture */
176 * rewrite unsigned->float conversion.
177 * Sparc has no instruction for this so instead we do the following:
179 * int signed_x = unsigned_value_x;
180 * double res = signed_x;
182 * res += 4294967296. ;
183 * return (float) res;
185 static void rewrite_unsigned_float_Conv(ir_node *node)
187 ir_graph *irg = get_irn_irg(node);
188 dbg_info *dbgi = get_irn_dbg_info(node);
189 ir_node *lower_block = get_nodes_block(node);
194 ir_node *block = get_nodes_block(node);
195 ir_node *unsigned_x = get_Conv_op(node);
196 ir_mode *mode_u = get_irn_mode(unsigned_x);
197 ir_mode *mode_s = find_signed_mode(mode_u);
198 ir_mode *mode_d = mode_D;
199 ir_node *signed_x = new_rd_Conv(dbgi, block, unsigned_x, mode_s);
200 ir_node *res = new_rd_Conv(dbgi, block, signed_x, mode_d);
201 ir_node *zero = new_r_Const(irg, get_mode_null(mode_s));
202 ir_node *cmp = new_rd_Cmp(dbgi, block, signed_x, zero,
204 ir_node *cond = new_rd_Cond(dbgi, block, cmp);
205 ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
206 ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
207 ir_node *in_true[1] = { proj_true };
208 ir_node *in_false[1] = { proj_false };
209 ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
210 ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false);
211 ir_node *true_jmp = new_r_Jmp(true_block);
212 ir_node *false_jmp = new_r_Jmp(false_block);
213 ir_tarval *correction = new_tarval_from_double(4294967296., mode_d);
214 ir_node *c_const = new_r_Const(irg, correction);
215 ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const,
218 ir_node *lower_in[2] = { true_jmp, false_jmp };
219 ir_node *phi_in[2] = { fadd, res };
220 ir_mode *dest_mode = get_irn_mode(node);
224 set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
225 phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_d);
226 assert(get_Block_phis(lower_block) == NULL);
227 set_Block_phis(lower_block, phi);
228 set_Phi_next(phi, NULL);
230 res_conv = new_rd_Conv(dbgi, lower_block, phi, dest_mode);
232 exchange(node, res_conv);
237 * rewrite float->unsigned conversions.
238 * Sparc has no instruction for this so instead we do the following:
240 * if (x >= 2147483648.) {
241 * converted ^= (int)(x-2147483648.) ^ 0x80000000;
243 * converted = (int)x;
245 * return (unsigned)converted;
247 static void rewrite_float_unsigned_Conv(ir_node *node)
249 ir_graph *irg = get_irn_irg(node);
250 dbg_info *dbgi = get_irn_dbg_info(node);
251 ir_node *lower_block = get_nodes_block(node);
256 ir_node *block = get_nodes_block(node);
257 ir_node *float_x = get_Conv_op(node);
258 ir_mode *mode_u = get_irn_mode(node);
259 ir_mode *mode_s = find_signed_mode(mode_u);
260 ir_mode *mode_f = get_irn_mode(float_x);
261 ir_tarval *limit = new_tarval_from_double(2147483648., mode_f);
262 ir_node *limitc = new_r_Const(irg, limit);
263 ir_node *cmp = new_rd_Cmp(dbgi, block, float_x, limitc,
264 ir_relation_greater_equal);
265 ir_node *cond = new_rd_Cond(dbgi, block, cmp);
266 ir_node *proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
267 ir_node *proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
268 ir_node *in_true[1] = { proj_true };
269 ir_node *in_false[1] = { proj_false };
270 ir_node *true_block = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
271 ir_node *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false);
272 ir_node *true_jmp = new_r_Jmp(true_block);
273 ir_node *false_jmp = new_r_Jmp(false_block);
275 ir_tarval *correction = new_tarval_from_long(0x80000000l, mode_s);
276 ir_node *c_const = new_r_Const(irg, correction);
277 ir_node *sub = new_rd_Sub(dbgi, true_block, float_x, limitc,
279 ir_node *sub_conv = new_rd_Conv(dbgi, true_block, sub, mode_s);
280 ir_node *xorn = new_rd_Eor(dbgi, true_block, sub_conv, c_const,
283 ir_node *converted = new_rd_Conv(dbgi, false_block, float_x,mode_s);
285 ir_node *lower_in[2] = { true_jmp, false_jmp };
286 ir_node *phi_in[2] = { xorn, converted };
290 set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
291 phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_s);
292 assert(get_Block_phis(lower_block) == NULL);
293 set_Block_phis(lower_block, phi);
294 set_Phi_next(phi, NULL);
296 res_conv = new_rd_Conv(dbgi, lower_block, phi, mode_u);
297 exchange(node, res_conv);
301 static int sparc_rewrite_Conv(ir_node *node, void *ctx)
303 ir_mode *to_mode = get_irn_mode(node);
304 ir_node *op = get_Conv_op(node);
305 ir_mode *from_mode = get_irn_mode(op);
308 if (mode_is_float(to_mode) && mode_is_int(from_mode)
309 && get_mode_size_bits(from_mode) == 32
310 && !mode_is_signed(from_mode)) {
311 rewrite_unsigned_float_Conv(node);
314 if (mode_is_float(from_mode) && mode_is_int(to_mode)
315 && get_mode_size_bits(to_mode) <= 32
316 && !mode_is_signed(to_mode)) {
317 rewrite_float_unsigned_Conv(node);
324 static void sparc_handle_intrinsics(void)
326 ir_type *tp, *int_tp, *uint_tp;
328 size_t n_records = 0;
330 runtime_rt rt_iMod, rt_uMod;
332 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
334 int_tp = new_type_primitive(mode_Is);
335 uint_tp = new_type_primitive(mode_Iu);
337 /* we need to rewrite some forms of int->float conversions */
339 i_instr_record *map_Conv = &records[n_records++].i_instr;
341 map_Conv->kind = INTRINSIC_INSTR;
342 map_Conv->op = op_Conv;
343 map_Conv->i_mapper = sparc_rewrite_Conv;
345 /* SPARC has no signed mod instruction ... */
347 i_instr_record *map_Mod = &records[n_records++].i_instr;
349 tp = new_type_method(2, 1);
350 set_method_param_type(tp, 0, int_tp);
351 set_method_param_type(tp, 1, int_tp);
352 set_method_res_type(tp, 0, int_tp);
354 rt_iMod.ent = new_entity(get_glob_type(), ID(".rem"), tp);
355 set_entity_ld_ident(rt_iMod.ent, ID(".rem"));
356 rt_iMod.mode = mode_T;
357 rt_iMod.res_mode = mode_Is;
358 rt_iMod.mem_proj_nr = pn_Mod_M;
359 rt_iMod.regular_proj_nr = pn_Mod_X_regular;
360 rt_iMod.exc_proj_nr = pn_Mod_X_except;
361 rt_iMod.res_proj_nr = pn_Mod_res;
363 set_entity_visibility(rt_iMod.ent, ir_visibility_external);
365 map_Mod->kind = INTRINSIC_INSTR;
366 map_Mod->op = op_Mod;
367 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
368 map_Mod->ctx = &rt_iMod;
370 /* ... nor an unsigned mod. */
372 i_instr_record *map_Mod = &records[n_records++].i_instr;
374 tp = new_type_method(2, 1);
375 set_method_param_type(tp, 0, uint_tp);
376 set_method_param_type(tp, 1, uint_tp);
377 set_method_res_type(tp, 0, uint_tp);
379 rt_uMod.ent = new_entity(get_glob_type(), ID(".urem"), tp);
380 set_entity_ld_ident(rt_uMod.ent, ID(".urem"));
381 rt_uMod.mode = mode_T;
382 rt_uMod.res_mode = mode_Iu;
383 rt_uMod.mem_proj_nr = pn_Mod_M;
384 rt_uMod.regular_proj_nr = pn_Mod_X_regular;
385 rt_uMod.exc_proj_nr = pn_Mod_X_except;
386 rt_uMod.res_proj_nr = pn_Mod_res;
388 set_entity_visibility(rt_uMod.ent, ir_visibility_external);
390 map_Mod->kind = INTRINSIC_INSTR;
391 map_Mod->op = op_Mod;
392 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
393 map_Mod->ctx = &rt_uMod;
396 assert(n_records < ARRAY_SIZE(records));
397 lower_intrinsics(records, n_records, /*part_block_used=*/ true);
400 static void sparc_init(void)
402 sparc_register_init();
403 sparc_create_opcodes(&sparc_irn_ops);
407 static void sparc_finish(void)
409 sparc_free_opcodes();
412 static arch_env_t *sparc_begin_codegeneration(void)
414 sparc_isa_t *isa = XMALLOC(sparc_isa_t);
415 *isa = sparc_isa_template;
416 isa->constants = pmap_create();
418 be_gas_elf_type_char = '#';
419 be_gas_elf_variant = ELF_VARIANT_SPARC;
425 * Closes the output file and frees the ISA structure.
427 static void sparc_end_codegeneration(void *self)
429 sparc_isa_t *isa = (sparc_isa_t*)self;
430 pmap_destroy(isa->constants);
434 static void sparc_lower_for_target(void)
436 ir_mode *mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode;
437 size_t i, n_irgs = get_irp_n_irgs();
439 lower_calls_with_compounds(LF_RETURN_HIDDEN);
441 for (i = 0; i < n_irgs; ++i) {
442 ir_graph *irg = get_irp_irg(i);
443 /* Turn all small CopyBs into loads/stores and all bigger CopyBs into
445 lower_CopyB(irg, 31, 32, false);
448 if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT)
449 lower_floating_point();
451 lower_builtins(0, NULL);
455 for (i = 0; i < n_irgs; ++i) {
456 ir_graph *irg = get_irp_irg(i);
457 ir_lower_mode_b(irg, mode_Iu);
458 lower_switch(irg, 4, 256, mode_gp);
459 /* TODO: Pass SPARC_MIN_STACKSIZE as addr_delta as soon as
460 * Alloc nodes are implemented more efficiently. */
461 lower_alloc(irg, SPARC_STACK_ALIGNMENT, true, 0);
465 static int sparc_is_mux_allowed(ir_node *sel, ir_node *mux_false,
468 return ir_is_optimizable_mux(sel, mux_false, mux_true);
472 * Returns the libFirm configuration parameter for this backend.
474 static const backend_params *sparc_get_backend_params(void)
476 static const ir_settings_arch_dep_t arch_dep = {
477 1, /* also_use_subs */
478 1, /* maximum_shifts */
479 31, /* highest_shift_amount */
480 NULL, /* evaluate_cost_func */
483 32, /* max_bits_for_mulh */
485 static backend_params p = {
486 0, /* no inline assembly */
487 0, /* no support for RotL nodes */
489 1, /* modulo shift efficient */
490 0, /* non-modulo shift not efficient */
491 &arch_dep, /* will be set later */
492 sparc_is_mux_allowed, /* parameter for if conversion */
493 32, /* machine size */
494 NULL, /* float arithmetic mode */
495 NULL, /* long long type */
496 NULL, /* usigned long long type */
497 NULL, /* long double type */
498 0, /* no trampoline support: size 0 */
499 0, /* no trampoline support: align 0 */
500 NULL, /* no trampoline support: no trampoline builder */
501 4 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
504 ir_mode *mode_long_long
505 = new_int_mode("long long", irma_twos_complement, 64, 1, 64);
506 ir_type *type_long_long = new_type_primitive(mode_long_long);
507 ir_mode *mode_unsigned_long_long
508 = new_int_mode("unsigned long long", irma_twos_complement, 64, 0, 64);
509 ir_type *type_unsigned_long_long
510 = new_type_primitive(mode_unsigned_long_long);
512 p.type_long_long = type_long_long;
513 p.type_unsigned_long_long = type_unsigned_long_long;
515 ir_type *type_long_double = new_type_primitive(mode_Q);
517 set_type_alignment_bytes(type_long_double, 8);
518 set_type_size_bytes(type_long_double, 16);
519 p.type_long_double = type_long_double;
523 static asm_constraint_flags_t sparc_parse_asm_constraint(const char **c)
526 return ASM_CONSTRAINT_FLAG_INVALID;
529 static int sparc_is_valid_clobber(const char *clobber)
535 /* fpu set architectures. */
536 static const lc_opt_enum_int_items_t sparc_fpu_items[] = {
537 { "fpu", SPARC_FPU_ARCH_FPU },
538 { "softfloat", SPARC_FPU_ARCH_SOFTFLOAT },
542 static lc_opt_enum_int_var_t arch_fpu_var = {
543 &sparc_isa_template.fpu_arch, sparc_fpu_items
546 static const lc_opt_table_entry_t sparc_options[] = {
547 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
551 static ir_node *sparc_new_spill(ir_node *value, ir_node *after)
553 ir_node *block = get_block(after);
554 ir_graph *irg = get_irn_irg(value);
555 ir_node *frame = get_irg_frame(irg);
556 ir_node *mem = get_irg_no_mem(irg);
557 ir_mode *mode = get_irn_mode(value);
560 if (mode_is_float(mode)) {
561 store = create_stf(NULL, block, value, frame, mem, mode, NULL, 0, true);
563 store = new_bd_sparc_St_imm(NULL, block, value, frame, mem, mode, NULL,
566 sched_add_after(after, store);
570 static ir_node *sparc_new_reload(ir_node *value, ir_node *spill,
573 ir_node *block = get_block(before);
574 ir_graph *irg = get_irn_irg(value);
575 ir_node *frame = get_irg_frame(irg);
576 ir_mode *mode = get_irn_mode(value);
580 if (mode_is_float(mode)) {
581 load = create_ldf(NULL, block, frame, spill, mode, NULL, 0, true);
583 load = new_bd_sparc_Ld_imm(NULL, block, frame, spill, mode, NULL, 0,
586 sched_add_before(before, load);
587 assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res);
588 res = new_r_Proj(load, mode, pn_sparc_Ld_res);
593 const arch_isa_if_t sparc_isa_if = {
596 sparc_get_backend_params,
597 sparc_lower_for_target,
598 sparc_parse_asm_constraint,
599 sparc_is_valid_clobber,
601 sparc_begin_codegeneration,
602 sparc_end_codegeneration,
604 NULL, /* get call abi */
605 NULL, /* mark remat */
606 NULL, /* get_pic_base */
609 NULL, /* register_saved_by */
611 sparc_handle_intrinsics,
612 NULL, /* before_abi */
619 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc)
620 void be_init_arch_sparc(void)
622 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
623 lc_opt_entry_t *sparc_grp = lc_opt_get_grp(be_grp, "sparc");
625 lc_opt_add_table(sparc_grp, sparc_options);
627 be_register_isa_if("sparc", &sparc_isa_if);
628 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.cg");
629 sparc_init_transform();
630 sparc_init_emitter();