2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief The main arm backend driver file.
23 * @author Matthias Braun, Oliver Richter, Tobias Gneist
28 #include "lc_opts_enum.h"
36 #include "iroptimize.h"
38 #include "lower_calls.h"
53 #include "bespillslots.h"
54 #include "bespillutil.h"
56 #include "belistsched.h"
60 #include "bearch_arm_t.h"
62 #include "arm_new_nodes.h"
63 #include "gen_arm_regalloc_if.h"
64 #include "arm_transform.h"
65 #include "arm_optimize.h"
66 #include "arm_emitter.h"
67 #include "arm_map_regs.h"
69 static ir_entity *arm_get_frame_entity(const ir_node *irn)
71 const arm_attr_t *attr = get_arm_attr_const(irn);
73 if (is_arm_FrameAddr(irn)) {
74 const arm_SymConst_attr_t *frame_attr = get_arm_SymConst_attr_const(irn);
75 return frame_attr->entity;
77 if (attr->is_load_store) {
78 const arm_load_store_attr_t *load_store_attr
79 = get_arm_load_store_attr_const(irn);
80 if (load_store_attr->is_frame_entity) {
81 return load_store_attr->entity;
88 * This function is called by the generic backend to correct offsets for
89 * nodes accessing the stack.
91 static void arm_set_stack_bias(ir_node *irn, int bias)
93 if (is_arm_FrameAddr(irn)) {
94 arm_SymConst_attr_t *attr = get_arm_SymConst_attr(irn);
95 attr->fp_offset += bias;
97 arm_load_store_attr_t *attr = get_arm_load_store_attr(irn);
98 assert(attr->base.is_load_store);
103 static int arm_get_sp_bias(const ir_node *irn)
105 /* We don't have any nodes changing the stack pointer.
106 We probably want to support post-/pre increment/decrement later */
111 /* fill register allocator interface */
113 static const arch_irn_ops_t arm_irn_ops = {
114 arm_get_frame_entity,
117 NULL, /* get_op_estimated_cost */
118 NULL, /* possible_memory_operand */
119 NULL, /* perform_memory_operand */
123 * Transforms the standard Firm graph into
126 static void arm_prepare_graph(ir_graph *irg)
128 /* transform nodes into assembler instructions */
129 arm_transform_graph(irg);
131 /* do local optimizations (mainly CSE) */
132 local_optimize_graph(irg);
134 /* do code placement, to optimize the position of constants */
138 static void arm_collect_frame_entity_nodes(ir_node *node, void *data)
140 be_fec_env_t *env = (be_fec_env_t*)data;
144 const arm_load_store_attr_t *attr;
146 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
147 mode = get_irn_mode(node);
148 align = get_mode_size_bytes(mode);
149 be_node_needs_frame_entity(env, node, mode, align);
153 switch (get_arm_irn_opcode(node)) {
161 attr = get_arm_load_store_attr_const(node);
162 entity = attr->entity;
163 mode = attr->load_store_mode;
164 align = get_mode_size_bytes(mode);
167 if (!attr->is_frame_entity)
169 be_node_needs_frame_entity(env, node, mode, align);
172 static void arm_set_frame_entity(ir_node *node, ir_entity *entity)
174 if (is_be_node(node)) {
175 be_node_set_frame_entity(node, entity);
177 arm_load_store_attr_t *attr = get_arm_load_store_attr(node);
178 attr->entity = entity;
182 static void transform_Reload(ir_node *node)
184 ir_node *block = get_nodes_block(node);
185 dbg_info *dbgi = get_irn_dbg_info(node);
186 ir_node *ptr = get_irn_n(node, n_be_Reload_frame);
187 ir_node *mem = get_irn_n(node, n_be_Reload_mem);
188 ir_mode *mode = get_irn_mode(node);
189 ir_entity *entity = be_get_frame_entity(node);
190 const arch_register_t *reg;
194 ir_node *sched_point = sched_prev(node);
196 load = new_bd_arm_Ldr(dbgi, block, ptr, mem, mode, entity, false, 0, true);
197 sched_add_after(sched_point, load);
200 proj = new_rd_Proj(dbgi, load, mode, pn_arm_Ldr_res);
202 reg = arch_get_irn_register(node);
203 arch_set_irn_register(proj, reg);
205 exchange(node, proj);
208 static void transform_Spill(ir_node *node)
210 ir_node *block = get_nodes_block(node);
211 dbg_info *dbgi = get_irn_dbg_info(node);
212 ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
213 ir_graph *irg = get_irn_irg(node);
214 ir_node *mem = get_irg_no_mem(irg);
215 ir_node *val = get_irn_n(node, n_be_Spill_val);
216 ir_mode *mode = get_irn_mode(val);
217 ir_entity *entity = be_get_frame_entity(node);
218 ir_node *sched_point;
221 sched_point = sched_prev(node);
222 store = new_bd_arm_Str(dbgi, block, ptr, val, mem, mode, entity, false, 0,
226 sched_add_after(sched_point, store);
228 exchange(node, store);
231 static void arm_after_ra_walker(ir_node *block, void *data)
233 ir_node *node, *prev;
236 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
237 prev = sched_prev(node);
239 if (be_is_Reload(node)) {
240 transform_Reload(node);
241 } else if (be_is_Spill(node)) {
242 transform_Spill(node);
248 * Called immediately before emit phase.
250 static void arm_finish_irg(ir_graph *irg)
252 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
253 bool at_begin = stack_layout->sp_relative ? true : false;
254 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
256 irg_walk_graph(irg, NULL, arm_collect_frame_entity_nodes, fec_env);
257 be_assign_entities(fec_env, arm_set_frame_entity, at_begin);
258 be_free_frame_entity_coalescer(fec_env);
260 irg_block_walk_graph(irg, NULL, arm_after_ra_walker, NULL);
262 /* fix stack entity offsets */
263 be_abi_fix_stack_nodes(irg);
264 be_abi_fix_stack_bias(irg);
266 /* do peephole optimizations and fix stack offsets */
267 arm_peephole_optimization(irg);
270 static void arm_before_ra(ir_graph *irg)
272 be_sched_fix_flags(irg, &arm_reg_classes[CLASS_arm_flags], NULL, NULL);
276 * Maps all intrinsic calls that the backend support
277 * and map all instructions the backend did not support
280 static void arm_handle_intrinsics(void)
282 ir_type *tp, *int_tp, *uint_tp;
286 runtime_rt rt_iDiv, rt_uDiv, rt_iMod, rt_uMod;
288 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
290 int_tp = get_type_for_mode(mode_Is);
291 uint_tp = get_type_for_mode(mode_Iu);
293 /* ARM has neither a signed div instruction ... */
295 i_instr_record *map_Div = &records[n_records++].i_instr;
297 tp = new_type_method(2, 1);
298 set_method_param_type(tp, 0, int_tp);
299 set_method_param_type(tp, 1, int_tp);
300 set_method_res_type(tp, 0, int_tp);
302 rt_iDiv.ent = new_entity(get_glob_type(), ID("__divsi3"), tp);
303 set_entity_ld_ident(rt_iDiv.ent, ID("__divsi3"));
304 rt_iDiv.mode = mode_T;
305 rt_iDiv.res_mode = mode_Is;
306 rt_iDiv.mem_proj_nr = pn_Div_M;
307 rt_iDiv.regular_proj_nr = pn_Div_X_regular;
308 rt_iDiv.exc_proj_nr = pn_Div_X_except;
309 rt_iDiv.res_proj_nr = pn_Div_res;
311 add_entity_linkage(rt_iDiv.ent, IR_LINKAGE_CONSTANT);
312 set_entity_visibility(rt_iDiv.ent, ir_visibility_external);
314 map_Div->kind = INTRINSIC_INSTR;
315 map_Div->op = op_Div;
316 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
317 map_Div->ctx = &rt_iDiv;
319 /* ... nor an unsigned div instruction ... */
321 i_instr_record *map_Div = &records[n_records++].i_instr;
323 tp = new_type_method(2, 1);
324 set_method_param_type(tp, 0, uint_tp);
325 set_method_param_type(tp, 1, uint_tp);
326 set_method_res_type(tp, 0, uint_tp);
328 rt_uDiv.ent = new_entity(get_glob_type(), ID("__udivsi3"), tp);
329 set_entity_ld_ident(rt_uDiv.ent, ID("__udivsi3"));
330 rt_uDiv.mode = mode_T;
331 rt_uDiv.res_mode = mode_Iu;
332 rt_uDiv.mem_proj_nr = pn_Div_M;
333 rt_uDiv.regular_proj_nr = pn_Div_X_regular;
334 rt_uDiv.exc_proj_nr = pn_Div_X_except;
335 rt_uDiv.res_proj_nr = pn_Div_res;
337 set_entity_visibility(rt_uDiv.ent, ir_visibility_external);
339 map_Div->kind = INTRINSIC_INSTR;
340 map_Div->op = op_Div;
341 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
342 map_Div->ctx = &rt_uDiv;
344 /* ... nor a signed mod instruction ... */
346 i_instr_record *map_Mod = &records[n_records++].i_instr;
348 tp = new_type_method(2, 1);
349 set_method_param_type(tp, 0, int_tp);
350 set_method_param_type(tp, 1, int_tp);
351 set_method_res_type(tp, 0, int_tp);
353 rt_iMod.ent = new_entity(get_glob_type(), ID("__modsi3"), tp);
354 set_entity_ld_ident(rt_iMod.ent, ID("__modsi3"));
355 rt_iMod.mode = mode_T;
356 rt_iMod.res_mode = mode_Is;
357 rt_iMod.mem_proj_nr = pn_Mod_M;
358 rt_iMod.regular_proj_nr = pn_Mod_X_regular;
359 rt_iMod.exc_proj_nr = pn_Mod_X_except;
360 rt_iMod.res_proj_nr = pn_Mod_res;
362 set_entity_visibility(rt_iMod.ent, ir_visibility_external);
364 map_Mod->kind = INTRINSIC_INSTR;
365 map_Mod->op = op_Mod;
366 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
367 map_Mod->ctx = &rt_iMod;
369 /* ... nor an unsigned mod. */
371 i_instr_record *map_Mod = &records[n_records++].i_instr;
373 tp = new_type_method(2, 1);
374 set_method_param_type(tp, 0, uint_tp);
375 set_method_param_type(tp, 1, uint_tp);
376 set_method_res_type(tp, 0, uint_tp);
378 rt_uMod.ent = new_entity(get_glob_type(), ID("__umodsi3"), tp);
379 set_entity_ld_ident(rt_uMod.ent, ID("__umodsi3"));
380 rt_uMod.mode = mode_T;
381 rt_uMod.res_mode = mode_Iu;
382 rt_uMod.mem_proj_nr = pn_Mod_M;
383 rt_uMod.regular_proj_nr = pn_Mod_X_regular;
384 rt_uMod.exc_proj_nr = pn_Mod_X_except;
385 rt_uMod.res_proj_nr = pn_Mod_res;
387 set_entity_visibility(rt_uMod.ent, ir_visibility_external);
389 map_Mod->kind = INTRINSIC_INSTR;
390 map_Mod->op = op_Mod;
391 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
392 map_Mod->ctx = &rt_uMod;
396 lower_intrinsics(records, n_records, /*part_block_used=*/0);
399 extern const arch_isa_if_t arm_isa_if;
400 static arm_isa_t arm_isa_template = {
402 &arm_isa_if, /* isa interface */
407 &arm_registers[REG_SP], /* stack pointer */
408 &arm_registers[REG_R11], /* base pointer */
409 2, /* power of two stack alignment for calls, 2^2 == 4 */
410 NULL, /* main environment */
412 5, /* reload costs */
413 true, /* we do have custom abi handling */
415 ARM_FPU_ARCH_FPE, /* FPU architecture */
418 static void arm_init(void)
422 arm_create_opcodes(&arm_irn_ops);
425 static void arm_finish(void)
430 static arch_env_t *arm_begin_codegeneration(const be_main_env_t *env)
432 arm_isa_t *isa = XMALLOC(arm_isa_t);
433 *isa = arm_isa_template;
435 be_gas_emit_types = false;
437 be_emit_init(env->file_handle);
438 be_gas_begin_compilation_unit(env);
444 * Closes the output file and frees the ISA structure.
446 static void arm_end_codegeneration(void *self)
448 arm_isa_t *isa = (arm_isa_t*)self;
450 be_gas_end_compilation_unit(isa->base.main_env);
457 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
458 * @return 1 if allowed, 0 otherwise
460 static int arm_is_mux_allowed(ir_node *sel, ir_node *mux_false,
469 static asm_constraint_flags_t arm_parse_asm_constraint(const char **c)
471 /* asm not supported */
473 return ASM_CONSTRAINT_FLAG_INVALID;
476 static int arm_is_valid_clobber(const char *clobber)
482 static void arm_lower_for_target(void)
484 ir_mode *mode_gp = arm_reg_classes[CLASS_arm_gp].mode;
485 size_t i, n_irgs = get_irp_n_irgs();
487 /* lower compound param handling */
488 lower_calls_with_compounds(LF_RETURN_HIDDEN);
490 for (i = 0; i < n_irgs; ++i) {
491 ir_graph *irg = get_irp_irg(i);
492 lower_switch(irg, 4, 256, mode_gp);
495 for (i = 0; i < n_irgs; ++i) {
496 ir_graph *irg = get_irp_irg(i);
497 /* Turn all small CopyBs into loads/stores and all bigger CopyBs into
499 * TODO: These constants need arm-specific tuning. */
500 lower_CopyB(irg, 31, 32, false);
505 * Returns the libFirm configuration parameter for this backend.
507 static const backend_params *arm_get_libfirm_params(void)
509 static ir_settings_arch_dep_t ad = {
511 1, /* Muls are fast enough on ARM but ... */
512 31, /* ... one shift would be possible better */
513 NULL, /* no evaluator function */
514 0, /* SMUL is needed, only in Arch M */
515 0, /* UMUL is needed, only in Arch M */
516 32, /* SMUL & UMUL available for 32 bit */
518 static backend_params p = {
519 0, /* don't support inline assembler yet */
520 1, /* support Rotl nodes */
522 1, /* modulo shift efficient */
523 0, /* non-modulo shift not efficient */
524 &ad, /* will be set later */
525 arm_is_mux_allowed, /* allow_ifconv function */
526 32, /* machine size */
527 NULL, /* float arithmetic mode (TODO) */
528 NULL, /* long long type */
529 NULL, /* unsigned long long type */
530 NULL, /* long double type */
531 0, /* no trampoline support: size 0 */
532 0, /* no trampoline support: align 0 */
533 NULL, /* no trampoline support: no trampoline builder */
534 4 /* alignment of stack parameter */
540 /* fpu set architectures. */
541 static const lc_opt_enum_int_items_t arm_fpu_items[] = {
542 { "softfloat", ARM_FPU_ARCH_SOFTFLOAT },
543 { "fpe", ARM_FPU_ARCH_FPE },
544 { "fpa", ARM_FPU_ARCH_FPA },
545 { "vfp1xd", ARM_FPU_ARCH_VFP_V1xD },
546 { "vfp1", ARM_FPU_ARCH_VFP_V1 },
547 { "vfp2", ARM_FPU_ARCH_VFP_V2 },
551 static lc_opt_enum_int_var_t arch_fpu_var = {
552 &arm_isa_template.fpu_arch, arm_fpu_items
555 static const lc_opt_table_entry_t arm_options[] = {
556 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
560 const arch_isa_if_t arm_isa_if = {
563 arm_get_libfirm_params,
564 arm_lower_for_target,
565 arm_parse_asm_constraint,
566 arm_is_valid_clobber,
568 arm_begin_codegeneration,
569 arm_end_codegeneration,
571 NULL, /* get call abi */
572 NULL, /* mark remat */
573 NULL, /* get_pic_base */
576 NULL, /* register_saved_by */
578 arm_handle_intrinsics, /* handle_intrinsics */
579 NULL, /* before_abi */
586 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_arm)
587 void be_init_arch_arm(void)
589 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
590 lc_opt_entry_t *arm_grp = lc_opt_get_grp(be_grp, "arm");
592 lc_opt_add_table(arm_grp, arm_options);
594 be_register_isa_if("arm", &arm_isa_if);
596 arm_init_transform();