2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief The main arm backend driver file.
23 * @author Matthias Braun, Oliver Richter, Tobias Gneist
29 #include "lc_opts_enum.h"
31 #include "pseudo_irg.h"
38 #include "iroptimize.h"
48 #include "../bearch.h"
49 #include "../benode.h"
50 #include "../belower.h"
51 #include "../besched.h"
54 #include "../bemachine.h"
55 #include "../beilpsched.h"
56 #include "../bemodule.h"
58 #include "../bespillslots.h"
59 #include "../begnuas.h"
60 #include "../belistsched.h"
61 #include "../beflags.h"
63 #include "bearch_arm_t.h"
65 #include "arm_new_nodes.h"
66 #include "gen_arm_regalloc_if.h"
67 #include "arm_transform.h"
68 #include "arm_optimize.h"
69 #include "arm_emitter.h"
70 #include "arm_map_regs.h"
72 static arch_irn_class_t arm_classify(const ir_node *irn)
75 /* TODO: we should mark reload/spill instructions and classify them here */
79 static ir_entity *arm_get_frame_entity(const ir_node *irn)
81 const arm_attr_t *attr = get_arm_attr_const(irn);
83 if (is_arm_FrameAddr(irn)) {
84 const arm_SymConst_attr_t *attr = get_irn_generic_attr_const(irn);
87 if (attr->is_load_store) {
88 const arm_load_store_attr_t *load_store_attr
89 = get_arm_load_store_attr_const(irn);
90 if (load_store_attr->is_frame_entity) {
91 return load_store_attr->entity;
97 static void arm_set_frame_entity(ir_node *irn, ir_entity *ent)
101 panic("arm_set_frame_entity() called. This should not happen.");
105 * This function is called by the generic backend to correct offsets for
106 * nodes accessing the stack.
108 static void arm_set_stack_bias(ir_node *irn, int bias)
110 if (is_arm_FrameAddr(irn)) {
111 arm_SymConst_attr_t *attr = get_irn_generic_attr(irn);
112 attr->fp_offset += bias;
114 arm_load_store_attr_t *attr = get_arm_load_store_attr(irn);
115 assert(attr->base.is_load_store);
116 attr->offset += bias;
120 static int arm_get_sp_bias(const ir_node *irn)
122 /* We don't have any nodes changing the stack pointer.
123 We probably want to support post-/pre increment/decrement later */
128 /* fill register allocator interface */
130 static const arch_irn_ops_t arm_irn_ops = {
133 arm_get_frame_entity,
134 arm_set_frame_entity,
137 NULL, /* get_inverse */
138 NULL, /* get_op_estimated_cost */
139 NULL, /* possible_memory_operand */
140 NULL, /* perform_memory_operand */
144 * Transforms the standard Firm graph into
147 static void arm_prepare_graph(void *self)
149 arm_code_gen_t *cg = self;
151 /* transform nodes into assembler instructions */
152 arm_transform_graph(cg);
154 /* do local optimizations (mainly CSE) */
155 local_optimize_graph(cg->irg);
158 dump_ir_graph(cg->irg, "transformed");
160 /* do code placement, to optimize the position of constants */
164 dump_ir_graph(cg->irg, "place");
168 * Called immediately before emit phase.
170 static void arm_finish_irg(void *self)
172 arm_code_gen_t *cg = self;
174 /* do peephole optimizations and fix stack offsets */
175 arm_peephole_optimization(cg);
178 static ir_node *arm_flags_remat(ir_node *node, ir_node *after)
183 if (is_Block(after)) {
186 block = get_nodes_block(after);
188 copy = exact_copy(node);
189 set_nodes_block(copy, block);
190 sched_add_after(after, copy);
194 static void arm_before_ra(void *self)
196 arm_code_gen_t *cg = self;
198 be_sched_fix_flags(cg->irg, &arm_reg_classes[CLASS_arm_flags],
202 static void transform_Reload(ir_node *node)
204 ir_node *block = get_nodes_block(node);
205 dbg_info *dbgi = get_irn_dbg_info(node);
206 ir_node *ptr = get_irn_n(node, be_pos_Reload_frame);
207 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
208 ir_mode *mode = get_irn_mode(node);
209 ir_entity *entity = be_get_frame_entity(node);
210 const arch_register_t *reg;
214 ir_node *sched_point = sched_prev(node);
216 load = new_bd_arm_Ldr(dbgi, block, ptr, mem, mode, entity, false, 0, true);
217 sched_add_after(sched_point, load);
220 proj = new_rd_Proj(dbgi, load, mode, pn_arm_Ldr_res);
222 reg = arch_get_irn_register(node);
223 arch_set_irn_register(proj, reg);
225 exchange(node, proj);
228 static void transform_Spill(ir_node *node)
230 ir_node *block = get_nodes_block(node);
231 dbg_info *dbgi = get_irn_dbg_info(node);
232 ir_node *ptr = get_irn_n(node, be_pos_Spill_frame);
233 ir_node *mem = new_NoMem();
234 ir_node *val = get_irn_n(node, be_pos_Spill_val);
235 ir_mode *mode = get_irn_mode(val);
236 ir_entity *entity = be_get_frame_entity(node);
237 ir_node *sched_point;
240 sched_point = sched_prev(node);
241 store = new_bd_arm_Str(dbgi, block, ptr, val, mem, mode, entity, false, 0,
245 sched_add_after(sched_point, store);
247 exchange(node, store);
250 static void arm_after_ra_walker(ir_node *block, void *data)
252 ir_node *node, *prev;
255 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
256 prev = sched_prev(node);
258 if (be_is_Reload(node)) {
259 transform_Reload(node);
260 } else if (be_is_Spill(node)) {
261 transform_Spill(node);
266 static void arm_after_ra(void *self)
268 arm_code_gen_t *cg = self;
269 be_coalesce_spillslots(cg->irg);
271 irg_block_walk_graph(cg->irg, NULL, arm_after_ra_walker, NULL);
275 * Emits the code, closes the output file and frees
276 * the code generator interface.
278 static void arm_emit_and_done(void *self)
280 arm_code_gen_t *cg = self;
281 ir_graph *irg = cg->irg;
283 arm_gen_routine(cg, irg);
285 /* de-allocate code generator */
286 del_set(cg->reg_set);
291 static void *arm_cg_init(ir_graph *irg);
293 static const arch_code_generator_if_t arm_code_gen_if = {
295 NULL, /* get_pic_base */
296 NULL, /* before abi introduce */
299 arm_before_ra, /* before register allocation hook */
306 * Initializes the code generator.
308 static void *arm_cg_init(ir_graph *irg)
310 static ir_type *int_tp = NULL;
311 arm_isa_t *isa = (arm_isa_t *) be_get_irg_arch_env(irg);
315 /* create an integer type with machine size */
316 int_tp = new_type_primitive(mode_Is);
319 cg = XMALLOC(arm_code_gen_t);
320 cg->impl = &arm_code_gen_if;
322 cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024);
325 cg->have_fp_insn = 0;
326 cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
328 FIRM_DBG_REGISTER(cg->mod, "firm.be.arm.cg");
330 /* enter the current code generator */
333 return (arch_code_generator_t *)cg;
338 * Maps all intrinsic calls that the backend support
339 * and map all instructions the backend did not support
342 static void arm_handle_intrinsics(void)
344 ir_type *tp, *int_tp, *uint_tp;
348 runtime_rt rt_iDiv, rt_uDiv, rt_iMod, rt_uMod;
350 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
352 int_tp = new_type_primitive(mode_Is);
353 uint_tp = new_type_primitive(mode_Iu);
355 /* ARM has neither a signed div instruction ... */
357 i_instr_record *map_Div = &records[n_records++].i_instr;
359 tp = new_type_method(2, 1);
360 set_method_param_type(tp, 0, int_tp);
361 set_method_param_type(tp, 1, int_tp);
362 set_method_res_type(tp, 0, int_tp);
364 rt_iDiv.ent = new_entity(get_glob_type(), ID("__divsi3"), tp);
365 set_entity_ld_ident(rt_iDiv.ent, ID("__divsi3"));
366 rt_iDiv.mode = mode_T;
367 rt_iDiv.res_mode = mode_Is;
368 rt_iDiv.mem_proj_nr = pn_Div_M;
369 rt_iDiv.regular_proj_nr = pn_Div_X_regular;
370 rt_iDiv.exc_proj_nr = pn_Div_X_except;
371 rt_iDiv.exc_mem_proj_nr = pn_Div_M;
372 rt_iDiv.res_proj_nr = pn_Div_res;
374 add_entity_linkage(rt_iDiv.ent, IR_LINKAGE_CONSTANT);
375 set_entity_visibility(rt_iDiv.ent, ir_visibility_external);
377 map_Div->kind = INTRINSIC_INSTR;
378 map_Div->op = op_Div;
379 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
380 map_Div->ctx = &rt_iDiv;
382 /* ... nor an unsigned div instruction ... */
384 i_instr_record *map_Div = &records[n_records++].i_instr;
386 tp = new_type_method(2, 1);
387 set_method_param_type(tp, 0, uint_tp);
388 set_method_param_type(tp, 1, uint_tp);
389 set_method_res_type(tp, 0, uint_tp);
391 rt_uDiv.ent = new_entity(get_glob_type(), ID("__udivsi3"), tp);
392 set_entity_ld_ident(rt_uDiv.ent, ID("__udivsi3"));
393 rt_uDiv.mode = mode_T;
394 rt_uDiv.res_mode = mode_Iu;
395 rt_uDiv.mem_proj_nr = pn_Div_M;
396 rt_uDiv.regular_proj_nr = pn_Div_X_regular;
397 rt_uDiv.exc_proj_nr = pn_Div_X_except;
398 rt_uDiv.exc_mem_proj_nr = pn_Div_M;
399 rt_uDiv.res_proj_nr = pn_Div_res;
401 set_entity_visibility(rt_uDiv.ent, ir_visibility_external);
403 map_Div->kind = INTRINSIC_INSTR;
404 map_Div->op = op_Div;
405 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
406 map_Div->ctx = &rt_uDiv;
408 /* ... nor a signed mod instruction ... */
410 i_instr_record *map_Mod = &records[n_records++].i_instr;
412 tp = new_type_method(2, 1);
413 set_method_param_type(tp, 0, int_tp);
414 set_method_param_type(tp, 1, int_tp);
415 set_method_res_type(tp, 0, int_tp);
417 rt_iMod.ent = new_entity(get_glob_type(), ID("__modsi3"), tp);
418 set_entity_ld_ident(rt_iMod.ent, ID("__modsi3"));
419 rt_iMod.mode = mode_T;
420 rt_iMod.res_mode = mode_Is;
421 rt_iMod.mem_proj_nr = pn_Mod_M;
422 rt_iMod.regular_proj_nr = pn_Mod_X_regular;
423 rt_iMod.exc_proj_nr = pn_Mod_X_except;
424 rt_iMod.exc_mem_proj_nr = pn_Mod_M;
425 rt_iMod.res_proj_nr = pn_Mod_res;
427 set_entity_visibility(rt_iMod.ent, ir_visibility_external);
429 map_Mod->kind = INTRINSIC_INSTR;
430 map_Mod->op = op_Mod;
431 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
432 map_Mod->ctx = &rt_iMod;
434 /* ... nor an unsigned mod. */
436 i_instr_record *map_Mod = &records[n_records++].i_instr;
438 tp = new_type_method(2, 1);
439 set_method_param_type(tp, 0, uint_tp);
440 set_method_param_type(tp, 1, uint_tp);
441 set_method_res_type(tp, 0, uint_tp);
443 rt_uMod.ent = new_entity(get_glob_type(), ID("__umodsi3"), tp);
444 set_entity_ld_ident(rt_uMod.ent, ID("__umodsi3"));
445 rt_uMod.mode = mode_T;
446 rt_uMod.res_mode = mode_Iu;
447 rt_uMod.mem_proj_nr = pn_Mod_M;
448 rt_uMod.regular_proj_nr = pn_Mod_X_regular;
449 rt_uMod.exc_proj_nr = pn_Mod_X_except;
450 rt_uMod.exc_mem_proj_nr = pn_Mod_M;
451 rt_uMod.res_proj_nr = pn_Mod_res;
453 set_entity_visibility(rt_uMod.ent, ir_visibility_external);
455 map_Mod->kind = INTRINSIC_INSTR;
456 map_Mod->op = op_Mod;
457 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
458 map_Mod->ctx = &rt_uMod;
462 lower_intrinsics(records, n_records, /*part_block_used=*/0);
466 static arm_isa_t arm_isa_template = {
468 &arm_isa_if, /* isa interface */
469 &arm_gp_regs[REG_SP], /* stack pointer */
470 &arm_gp_regs[REG_R11], /* base pointer */
471 &arm_reg_classes[CLASS_arm_gp], /* static link pointer class */
472 -1, /* stack direction */
473 2, /* power of two stack alignment for calls, 2^2 == 4 */
474 NULL, /* main environment */
476 5, /* reload costs */
477 true, /* we do have custom abi handling */
479 0, /* use generic register names instead of SP, LR, PC */
480 ARM_FPU_ARCH_FPE, /* FPU architecture */
481 NULL, /* current code generator */
485 * Initializes the backend ISA and opens the output file.
487 static arch_env_t *arm_init(FILE *file_handle)
489 static int inited = 0;
495 isa = XMALLOC(arm_isa_t);
496 memcpy(isa, &arm_isa_template, sizeof(*isa));
501 be_emit_init(file_handle);
503 arm_create_opcodes(&arm_irn_ops);
504 arm_handle_intrinsics();
506 be_gas_emit_types = false;
508 /* needed for the debug support */
509 be_gas_emit_switch_section(GAS_SECTION_TEXT);
510 be_emit_irprintf("%stext0:\n", be_gas_get_private_prefix());
511 be_emit_write_line();
514 return &isa->arch_env;
520 * Closes the output file and frees the ISA structure.
522 static void arm_done(void *self)
524 arm_isa_t *isa = self;
526 be_gas_emit_decls(isa->arch_env.main_env);
534 * Report the number of register classes.
535 * If we don't have fp instructions, report only GP
536 * here to speed up register allocation (and makes dumps
537 * smaller and more readable).
539 static unsigned arm_get_n_reg_class(void)
545 * Return the register class with requested index.
547 static const arch_register_class_t *arm_get_reg_class(unsigned i)
549 assert(i < N_CLASSES);
550 return &arm_reg_classes[i];
554 * Get the register class which shall be used to store a value of a given mode.
555 * @param self The this pointer.
556 * @param mode The mode in question.
557 * @return A register class which can hold values of the given mode.
559 static const arch_register_class_t *arm_get_reg_class_for_mode(const ir_mode *mode)
561 if (mode_is_float(mode))
562 return &arm_reg_classes[CLASS_arm_fpa];
564 return &arm_reg_classes[CLASS_arm_gp];
568 * Produces the type which sits between the stack args and the locals on the stack.
569 * it will contain the return address and space to store the old base pointer.
570 * @return The Firm type modeling the ABI between type.
572 static ir_type *arm_get_between_type(void *self)
574 static ir_type *between_type = NULL;
577 if (between_type == NULL) {
578 between_type = new_type_class(new_id_from_str("arm_between_type"));
579 set_type_size_bytes(between_type, 0);
587 be_abi_call_flags_bits_t flags;
588 const arch_env_t *arch_env;
592 static void *arm_abi_init(const be_abi_call_t *call, const arch_env_t *arch_env, ir_graph *irg)
594 arm_abi_env_t *env = XMALLOC(arm_abi_env_t);
595 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
596 env->flags = fl.bits;
598 env->arch_env = arch_env;
603 * Generate the routine prologue.
605 * @param self The callback object.
606 * @param mem A pointer to the mem node. Update this if you define new memory.
607 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
608 * @param stack_bias Points to the current stack bias, can be modified if needed.
610 * @return The register which shall be used as a stack frame base.
612 * All nodes which define registers in @p reg_map must keep @p reg_map current.
614 static const arch_register_t *arm_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
616 arm_abi_env_t *env = self;
620 arch_register_class_t *gp;
622 ir_node *fp, *ip, *lr, *pc;
623 ir_node *sp = be_abi_reg_map_get(reg_map, env->arch_env->sp);
627 if (env->flags.try_omit_fp)
628 return env->arch_env->sp;
630 fp = be_abi_reg_map_get(reg_map, env->arch_env->bp);
631 ip = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_R12]);
632 lr = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_LR]);
633 pc = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_PC]);
635 gp = &arm_reg_classes[CLASS_arm_gp];
637 block = get_irg_start_block(irg);
639 /* mark bp register as ignore */
640 be_set_constr_single_reg_out(get_Proj_pred(fp),
641 get_Proj_proj(fp), env->arch_env->bp,
642 arch_register_req_type_ignore);
644 /* copy SP to IP (so we can spill it */
645 ip = be_new_Copy(gp, block, sp);
646 be_set_constr_single_reg_out(ip, 0, &arm_gp_regs[REG_R12], 0);
649 store = new_bd_arm_StoreStackM4Inc(NULL, block, sp, fp, ip, lr, pc, *mem);
651 sp = new_r_Proj(store, env->arch_env->sp->reg_class->mode, pn_arm_StoreStackM4Inc_ptr);
652 arch_set_irn_register(sp, env->arch_env->sp);
653 *mem = new_r_Proj(store, mode_M, pn_arm_StoreStackM4Inc_M);
655 /* frame pointer is ip-4 (because ip is our old sp value) */
656 fp = new_bd_arm_Sub_imm(NULL, block, ip, 4, 0);
657 arch_set_irn_register(fp, env->arch_env->bp);
659 /* beware: we change the fp but the StoreStackM4Inc above wants the old
660 * fp value. We are not allowed to spill or anything in the prolog, so we
661 * have to enforce some order here. (scheduler/regalloc are too stupid
662 * to extract this order from register requirements) */
663 add_irn_dep(fp, store);
665 fp = be_new_Copy(gp, block, fp); // XXX Gammelfix: only be_ have custom register requirements
666 be_set_constr_single_reg_out(fp, 0, env->arch_env->bp,
667 arch_register_req_type_ignore);
668 arch_set_irn_register(fp, env->arch_env->bp);
670 be_abi_reg_map_set(reg_map, env->arch_env->bp, fp);
671 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_R12], ip);
672 be_abi_reg_map_set(reg_map, env->arch_env->sp, sp);
673 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_LR], lr);
674 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_PC], pc);
676 return env->arch_env->bp;
680 * Builds the ARM epilogue
682 static void arm_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
684 arm_abi_env_t *env = self;
685 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->arch_env->sp);
686 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->arch_env->bp);
687 ir_node *curr_pc = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_PC]);
688 ir_node *curr_lr = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_LR]);
690 // TODO: Activate Omit fp in epilogue
691 if (env->flags.try_omit_fp) {
692 ir_node *incsp = be_new_IncSP(env->arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
697 load_node = new_bd_arm_LoadStackM3Epilogue(NULL, bl, curr_bp, *mem);
699 curr_bp = new_r_Proj(load_node, env->arch_env->bp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res0);
700 curr_sp = new_r_Proj(load_node, env->arch_env->sp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res1);
701 curr_pc = new_r_Proj(load_node, mode_Iu, pn_arm_LoadStackM3Epilogue_res2);
702 *mem = new_r_Proj(load_node, mode_M, pn_arm_LoadStackM3Epilogue_M);
703 arch_set_irn_register(curr_bp, env->arch_env->bp);
704 arch_set_irn_register(curr_sp, env->arch_env->sp);
705 arch_set_irn_register(curr_pc, &arm_gp_regs[REG_PC]);
707 be_abi_reg_map_set(reg_map, env->arch_env->sp, curr_sp);
708 be_abi_reg_map_set(reg_map, env->arch_env->bp, curr_bp);
709 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_LR], curr_lr);
710 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_PC], curr_pc);
713 static const be_abi_callbacks_t arm_abi_callbacks = {
716 arm_get_between_type,
723 * Get the ABI restrictions for procedure calls.
724 * @param self The this pointer.
725 * @param method_type The type of the method (procedure) in question.
726 * @param abi The abi object to be modified
728 static void arm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
733 int n = get_method_n_params(method_type);
734 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
737 /* set abi flags for calls */
738 call_flags.bits.left_to_right = 0;
739 call_flags.bits.store_args_sequential = 0;
740 /* call_flags.bits.try_omit_fp don't change this we can handle both */
741 call_flags.bits.fp_free = 0;
742 call_flags.bits.call_has_imm = 1;
744 /* set stack parameter passing style */
745 be_abi_call_set_flags(abi, call_flags, &arm_abi_callbacks);
747 for (i = 0; i < n; i++) {
748 /* reg = get reg for param i; */
749 /* be_abi_call_param_reg(abi, i, reg); */
751 be_abi_call_param_reg(abi, i, arm_get_RegParam_reg(i), ABI_CONTEXT_BOTH);
753 tp = get_method_param_type(method_type, i);
754 mode = get_type_mode(tp);
755 be_abi_call_param_stack(abi, i, mode, 4, 0, 0, ABI_CONTEXT_BOTH);
759 /* set return registers */
760 n = get_method_n_ress(method_type);
762 assert(n <= 2 && "more than two results not supported");
764 /* In case of 64bit returns, we will have two 32bit values */
766 tp = get_method_res_type(method_type, 0);
767 mode = get_type_mode(tp);
769 assert(!mode_is_float(mode) && "two FP results not supported");
771 tp = get_method_res_type(method_type, 1);
772 mode = get_type_mode(tp);
774 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
776 be_abi_call_res_reg(abi, 0, &arm_gp_regs[REG_R0], ABI_CONTEXT_BOTH);
777 be_abi_call_res_reg(abi, 1, &arm_gp_regs[REG_R1], ABI_CONTEXT_BOTH);
779 const arch_register_t *reg;
781 tp = get_method_res_type(method_type, 0);
782 assert(is_atomic_type(tp));
783 mode = get_type_mode(tp);
785 reg = mode_is_float(mode) ? &arm_fpa_regs[REG_F0] : &arm_gp_regs[REG_R0];
786 be_abi_call_res_reg(abi, 0, reg, ABI_CONTEXT_BOTH);
790 static int arm_to_appear_in_schedule(void *block_env, const ir_node *irn)
793 if (!is_arm_irn(irn))
800 * Initializes the code generator interface.
802 static const arch_code_generator_if_t *arm_get_code_generator_if(void *self)
805 return &arm_code_gen_if;
808 list_sched_selector_t arm_sched_selector;
811 * Returns the reg_pressure scheduler with to_appear_in_schedule() over\loaded
813 static const list_sched_selector_t *arm_get_list_sched_selector(const void *self, list_sched_selector_t *selector)
816 memcpy(&arm_sched_selector, selector, sizeof(arm_sched_selector));
817 /* arm_sched_selector.exectime = arm_sched_exectime; */
818 arm_sched_selector.to_appear_in_schedule = arm_to_appear_in_schedule;
819 return &arm_sched_selector;
823 static const ilp_sched_selector_t *arm_get_ilp_sched_selector(const void *self)
830 * Returns the necessary byte alignment for storing a register of given class.
832 static int arm_get_reg_class_alignment(const arch_register_class_t *cls)
835 /* ARM is a 32 bit CPU, no need for other alignment */
839 static const be_execution_unit_t ***arm_get_allowed_execution_units(const ir_node *irn)
843 panic("Unimplemented arm_get_allowed_execution_units()");
846 static const be_machine_t *arm_get_machine(const void *self)
850 panic("Unimplemented arm_get_machine()");
854 * Return irp irgs in the desired order.
856 static ir_graph **arm_get_irg_list(const void *self, ir_graph ***irg_list)
864 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
865 * @return 1 if allowed, 0 otherwise
867 static int arm_is_mux_allowed(ir_node *sel, ir_node *mux_false,
877 static asm_constraint_flags_t arm_parse_asm_constraint(const char **c)
879 /* asm not supported */
881 return ASM_CONSTRAINT_FLAG_INVALID;
884 static int arm_is_valid_clobber(const char *clobber)
891 * Returns the libFirm configuration parameter for this backend.
893 static const backend_params *arm_get_libfirm_params(void)
895 static const ir_settings_if_conv_t ifconv = {
896 4, /* maxdepth, doesn't matter for Psi-conversion */
897 arm_is_mux_allowed /* allows or disallows Mux creation for given selector */
899 static ir_settings_arch_dep_t ad = {
901 1, /* Muls are fast enough on ARM but ... */
902 31, /* ... one shift would be possible better */
903 NULL, /* no evaluator function */
904 0, /* SMUL is needed, only in Arch M */
905 0, /* UMUL is needed, only in Arch M */
906 32, /* SMUL & UMUL available for 32 bit */
908 static backend_params p = {
909 1, /* need dword lowering */
910 0, /* don't support inline assembler yet */
911 NULL, /* will be set later */
912 NULL, /* but yet no creator function */
913 NULL, /* context for create_intrinsic_fkt */
914 NULL, /* ifconv_info will be set below */
915 NULL, /* float arithmetic mode (TODO) */
916 0, /* no trampoline support: size 0 */
917 0, /* no trampoline support: align 0 */
918 NULL, /* no trampoline support: no trampoline builder */
919 4 /* alignment of stack parameter */
923 p.if_conv_info = &ifconv;
927 /* fpu set architectures. */
928 static const lc_opt_enum_int_items_t arm_fpu_items[] = {
929 { "softfloat", ARM_FPU_ARCH_SOFTFLOAT },
930 { "fpe", ARM_FPU_ARCH_FPE },
931 { "fpa", ARM_FPU_ARCH_FPA },
932 { "vfp1xd", ARM_FPU_ARCH_VFP_V1xD },
933 { "vfp1", ARM_FPU_ARCH_VFP_V1 },
934 { "vfp2", ARM_FPU_ARCH_VFP_V2 },
938 static lc_opt_enum_int_var_t arch_fpu_var = {
939 &arm_isa_template.fpu_arch, arm_fpu_items
942 static const lc_opt_table_entry_t arm_options[] = {
943 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
944 LC_OPT_ENT_BOOL("gen_reg_names", "use generic register names", &arm_isa_template.gen_reg_names),
948 const arch_isa_if_t arm_isa_if = {
951 NULL, /* handle_intrinsics */
954 arm_get_reg_class_for_mode,
956 arm_get_code_generator_if,
957 arm_get_list_sched_selector,
958 arm_get_ilp_sched_selector,
959 arm_get_reg_class_alignment,
960 arm_get_libfirm_params,
961 arm_get_allowed_execution_units,
964 NULL, /* mark remat */
965 arm_parse_asm_constraint,
969 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_arm);
970 void be_init_arch_arm(void)
972 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
973 lc_opt_entry_t *arm_grp = lc_opt_get_grp(be_grp, "arm");
975 lc_opt_add_table(arm_grp, arm_options);
977 be_register_isa_if("arm", &arm_isa_if);
979 arm_init_transform();