2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief The main arm backend driver file.
23 * @author Oliver Richter, Tobias Gneist
29 #include "lc_opts_enum.h"
31 #include "pseudo_irg.h"
38 #include "iroptimize.h"
47 #include "../bearch_t.h" /* the general register allocator interface */
48 #include "../benode_t.h"
49 #include "../belower.h"
50 #include "../besched_t.h"
53 #include "../bemachine.h"
54 #include "../beilpsched.h"
55 #include "../bemodule.h"
56 #include "../beirg_t.h"
57 #include "../bespillslots.h"
58 #include "../begnuas.h"
60 #include "bearch_arm_t.h"
62 #include "arm_new_nodes.h" /* arm nodes interface */
63 #include "gen_arm_regalloc_if.h" /* the generated interface (register type and class defenitions) */
64 #include "arm_transform.h"
65 #include "arm_optimize.h"
66 #include "arm_emitter.h"
67 #include "arm_map_regs.h"
69 #define DEBUG_MODULE "firm.be.arm.isa"
71 /* TODO: ugly, but we need it to get access to the registers assigned to Phi nodes */
72 static set *cur_reg_set = NULL;
74 /**************************************************
77 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
78 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
79 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
80 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
83 **************************************************/
86 * Return register requirements for a arm node.
87 * If the node returns a tuple (mode_T) then the proj's
88 * will be asked for this information.
90 static const arch_register_req_t *arm_get_irn_reg_req(const ir_node *node,
93 long node_pos = pos == -1 ? 0 : pos;
94 ir_mode *mode = get_irn_mode(node);
96 if (is_Block(node) || mode == mode_X) {
97 return arch_no_register_req;
100 if (mode == mode_T && pos < 0) {
101 return arch_no_register_req;
106 return arch_no_register_req;
109 return arch_no_register_req;
112 node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
113 node = skip_Proj_const(node);
116 /* get requirements for our own nodes */
117 if (is_arm_irn(node)) {
118 const arch_register_req_t *req;
120 req = get_arm_in_req(node, pos);
122 req = get_arm_out_req(node, node_pos);
128 /* unknown should be transformed by now */
129 assert(!is_Unknown(node));
130 return arch_no_register_req;
133 static void arm_set_irn_reg(ir_node *irn, const arch_register_t *reg)
137 if (get_irn_mode(irn) == mode_X) {
142 pos = get_Proj_proj(irn);
143 irn = skip_Proj(irn);
146 if (is_arm_irn(irn)) {
147 const arch_register_t **slots;
149 slots = get_arm_slots(irn);
153 /* here we set the registers for the Phi nodes */
154 arm_set_firm_reg(irn, reg, cur_reg_set);
158 static const arch_register_t *arm_get_irn_reg(const ir_node *irn)
161 const arch_register_t *reg = NULL;
165 if (get_irn_mode(irn) == mode_X) {
169 pos = get_Proj_proj(irn);
170 irn = skip_Proj_const(irn);
173 if (is_arm_irn(irn)) {
174 const arch_register_t **slots;
175 slots = get_arm_slots(irn);
179 reg = arm_get_firm_reg(irn, cur_reg_set);
185 static arch_irn_class_t arm_classify(const ir_node *irn)
187 irn = skip_Proj_const(irn);
190 return arch_irn_class_branch;
192 else if (is_arm_irn(irn)) {
193 return arch_irn_class_normal;
199 static arch_irn_flags_t arm_get_flags(const ir_node *irn)
201 arch_irn_flags_t flags = arch_irn_flags_none;
203 if(is_Unknown(irn)) {
204 return arch_irn_flags_ignore;
207 if (is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
208 ir_node *pred = get_Proj_pred(irn);
209 if (is_arm_irn(pred)) {
210 flags = get_arm_out_flags(pred, get_Proj_proj(irn));
215 if (is_arm_irn(irn)) {
216 flags |= get_arm_flags(irn);
222 static ir_entity *arm_get_frame_entity(const ir_node *irn) {
223 /* we do NOT transform be_Spill or be_Reload nodes, so we never
224 have frame access using ARM nodes. */
229 static void arm_set_frame_entity(ir_node *irn, ir_entity *ent) {
232 panic("arm_set_frame_entity() called. This should not happen.");
236 * This function is called by the generic backend to correct offsets for
237 * nodes accessing the stack.
239 static void arm_set_stack_bias(ir_node *irn, int bias)
243 /* TODO: correct offset if irn accesses the stack */
246 static int arm_get_sp_bias(const ir_node *irn)
252 /* fill register allocator interface */
254 static const arch_irn_ops_t arm_irn_ops = {
260 arm_get_frame_entity,
261 arm_set_frame_entity,
264 NULL, /* get_inverse */
265 NULL, /* get_op_estimated_cost */
266 NULL, /* possible_memory_operand */
267 NULL, /* perform_memory_operand */
270 /**************************************************
273 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
274 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
275 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
276 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
279 **************************************************/
282 * Transforms the standard Firm graph into
285 static void arm_prepare_graph(void *self) {
286 arm_code_gen_t *cg = self;
288 /* transform nodes into assembler instructions */
289 arm_transform_graph(cg);
291 /* do local optimizations (mainly CSE) */
292 local_optimize_graph(cg->irg);
295 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
297 /* do code placement, to optimize the position of constants */
301 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
305 * Called immediately before emit phase.
307 static void arm_finish_irg(void *self)
309 arm_code_gen_t *cg = self;
311 /* do peephole optimizations and fix stack offsets */
312 arm_peephole_optimization(cg);
317 * These are some hooks which must be filled but are probably not needed.
319 static void arm_before_sched(void *self)
322 /* Some stuff you need to do after scheduling but before register allocation */
325 static void arm_before_ra(void *self)
328 /* Some stuff you need to do immediately after register allocation */
332 * We transform Spill and Reload here. This needs to be done before
333 * stack biasing otherwise we would miss the corrected offset for these nodes.
335 static void arm_after_ra(void *self)
337 arm_code_gen_t *cg = self;
338 be_coalesce_spillslots(cg->birg);
342 * Emits the code, closes the output file and frees
343 * the code generator interface.
345 static void arm_emit_and_done(void *self) {
346 arm_code_gen_t *cg = self;
347 ir_graph *irg = cg->irg;
349 arm_gen_routine(cg, irg);
353 /* de-allocate code generator */
354 del_set(cg->reg_set);
359 * Move a double floating point value into an integer register.
360 * Place the move operation into block bl.
362 * Handle some special cases here:
363 * 1.) A constant: simply split into two
364 * 2.) A load: simply split into two
366 static ir_node *convert_dbl_to_int(ir_node *bl, ir_node *arg, ir_node *mem,
367 ir_node **resH, ir_node **resL) {
369 tarval *tv = get_Const_tarval(arg);
372 /* get the upper 32 bits */
373 v = get_tarval_sub_bits(tv, 7);
374 v = (v << 8) | get_tarval_sub_bits(tv, 6);
375 v = (v << 8) | get_tarval_sub_bits(tv, 5);
376 v = (v << 8) | get_tarval_sub_bits(tv, 4);
377 *resH = new_Const_long(mode_Is, v);
379 /* get the lower 32 bits */
380 v = get_tarval_sub_bits(tv, 3);
381 v = (v << 8) | get_tarval_sub_bits(tv, 2);
382 v = (v << 8) | get_tarval_sub_bits(tv, 1);
383 v = (v << 8) | get_tarval_sub_bits(tv, 0);
384 *resL = new_Const_long(mode_Is, v);
385 } else if (is_Load(skip_Proj(arg))) {
386 /* FIXME: handling of low/high depends on LE/BE here */
387 panic("Unimplemented convert_dbl_to_int() case");
390 ir_graph *irg = current_ir_graph;
393 conv = new_rd_arm_fpaDbl2GP(NULL, irg, bl, arg, mem);
395 *resL = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_low);
396 *resH = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_high);
397 mem = new_r_Proj(irg, bl, conv, mode_M, pn_arm_fpaDbl2GP_M);
403 * Move a single floating point value into an integer register.
404 * Place the move operation into block bl.
406 * Handle some special cases here:
407 * 1.) A constant: simply move
408 * 2.) A load: simply load
410 static ir_node *convert_sng_to_int(ir_node *bl, ir_node *arg)
415 tarval *tv = get_Const_tarval(arg);
418 /* get the lower 32 bits */
419 v = get_tarval_sub_bits(tv, 3);
420 v = (v << 8) | get_tarval_sub_bits(tv, 2);
421 v = (v << 8) | get_tarval_sub_bits(tv, 1);
422 v = (v << 8) | get_tarval_sub_bits(tv, 0);
423 return new_Const_long(mode_Is, v);
424 } else if (is_Load(skip_Proj(arg))) {
427 load = skip_Proj(arg);
429 panic("Unimplemented convert_sng_to_int() case");
433 * Convert the arguments of a call to support the
434 * ARM calling convention of general purpose AND floating
437 static void handle_calls(ir_node *call, void *env)
439 arm_code_gen_t *cg = env;
440 int i, j, n, size, idx, flag, n_param, n_res, first_variadic;
441 ir_type *mtp, *new_mtd, *new_tp[5];
442 ir_node *new_in[5], **in;
448 /* check, if we need conversions */
449 n = get_Call_n_params(call);
450 mtp = get_Call_type(call);
451 assert(get_method_n_params(mtp) == n);
453 /* it's always enough to handle the first 4 parameters */
456 flag = size = idx = 0;
457 bl = get_nodes_block(call);
458 for (i = 0; i < n; ++i) {
459 ir_type *param_tp = get_method_param_type(mtp, i);
461 if (is_compound_type(param_tp)) {
462 /* an aggregate parameter: bad case */
466 /* a primitive parameter */
467 ir_mode *mode = get_type_mode(param_tp);
469 if (mode_is_float(mode)) {
470 if (get_mode_size_bits(mode) > 32) {
471 ir_node *mem = get_Call_mem(call);
473 /* Beware: ARM wants the high part first */
475 new_tp[idx] = cg->int_tp;
476 new_tp[idx+1] = cg->int_tp;
477 mem = convert_dbl_to_int(bl, get_Call_param(call, i), mem, &new_in[idx], &new_in[idx+1]);
479 set_Call_mem(call, mem);
483 new_tp[idx] = cg->int_tp;
484 new_in[idx] = convert_sng_to_int(bl, get_Call_param(call, i));
491 new_tp[idx] = param_tp;
492 new_in[idx] = get_Call_param(call, i);
501 /* if flag is NOT set, no need to translate the method type */
505 /* construct a new method type */
507 n_param = get_method_n_params(mtp) - n + idx;
508 n_res = get_method_n_ress(mtp);
509 new_mtd = new_d_type_method(get_type_ident(mtp), n_param, n_res, get_type_dbg_info(mtp));
511 for (i = 0; i < idx; ++i)
512 set_method_param_type(new_mtd, i, new_tp[i]);
513 for (i = n, j = idx; i < get_method_n_params(mtp); ++i)
514 set_method_param_type(new_mtd, j++, get_method_param_type(mtp, i));
515 for (i = 0; i < n_res; ++i)
516 set_method_res_type(new_mtd, i, get_method_res_type(mtp, i));
518 set_method_calling_convention(new_mtd, get_method_calling_convention(mtp));
519 first_variadic = get_method_first_variadic_param_index(mtp);
520 if (first_variadic >= 0)
521 set_method_first_variadic_param_index(new_mtd, first_variadic);
523 if (is_lowered_type(mtp)) {
524 mtp = get_associated_type(mtp);
526 set_lowered_type(mtp, new_mtd);
528 set_Call_type(call, new_mtd);
530 /* calculate new in array of the Call */
531 NEW_ARR_A(ir_node *, in, n_param + 2);
532 for (i = 0; i < idx; ++i)
533 in[2 + i] = new_in[i];
534 for (i = n, j = idx; i < get_method_n_params(mtp); ++i)
535 in[2 + j++] = get_Call_param(call, i);
537 in[0] = get_Call_mem(call);
538 in[1] = get_Call_ptr(call);
540 /* finally, change the call inputs */
541 set_irn_in(call, n_param + 2, in);
545 * Handle graph transformations before the abi converter does its work.
547 static void arm_before_abi(void *self) {
548 arm_code_gen_t *cg = self;
550 irg_walk_graph(cg->irg, NULL, handle_calls, cg);
554 static void *arm_cg_init(be_irg_t *birg);
556 static const arch_code_generator_if_t arm_code_gen_if = {
558 NULL, /* get_pic_base */
559 arm_before_abi, /* before abi introduce */
562 arm_before_sched, /* before scheduling hook */
563 arm_before_ra, /* before register allocation hook */
570 * Initializes the code generator.
572 static void *arm_cg_init(be_irg_t *birg) {
573 static ir_type *int_tp = NULL;
574 arm_isa_t *isa = (arm_isa_t *)birg->main_env->arch_env;
578 /* create an integer type with machine size */
579 int_tp = new_type_primitive(new_id_from_chars("int", 3), mode_Is);
582 cg = XMALLOC(arm_code_gen_t);
583 cg->impl = &arm_code_gen_if;
585 cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024);
586 cg->arch_env = birg->main_env->arch_env;
590 cg->have_fp_insn = 0;
591 cg->unknown_gp = NULL;
592 cg->unknown_fpa = NULL;
593 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
595 FIRM_DBG_REGISTER(cg->mod, "firm.be.arm.cg");
597 cur_reg_set = cg->reg_set;
599 /* enter the current code generator */
602 return (arch_code_generator_t *)cg;
607 * Maps all intrinsic calls that the backend support
608 * and map all instructions the backend did not support
611 static void arm_handle_intrinsics(void) {
612 ir_type *tp, *int_tp, *uint_tp;
616 runtime_rt rt_iDiv, rt_uDiv, rt_iMod, rt_uMod;
618 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
620 int_tp = new_type_primitive(ID("int"), mode_Is);
621 uint_tp = new_type_primitive(ID("uint"), mode_Iu);
623 /* ARM has neither a signed div instruction ... */
625 i_instr_record *map_Div = &records[n_records++].i_instr;
627 tp = new_type_method(ID("rt_iDiv"), 2, 1);
628 set_method_param_type(tp, 0, int_tp);
629 set_method_param_type(tp, 1, int_tp);
630 set_method_res_type(tp, 0, int_tp);
632 rt_iDiv.ent = new_entity(get_glob_type(), ID("__divsi3"), tp);
633 set_entity_ld_ident(rt_iDiv.ent, ID("__divsi3"));
634 rt_iDiv.mode = mode_T;
635 rt_iDiv.res_mode = mode_Is;
636 rt_iDiv.mem_proj_nr = pn_Div_M;
637 rt_iDiv.regular_proj_nr = pn_Div_X_regular;
638 rt_iDiv.exc_proj_nr = pn_Div_X_except;
639 rt_iDiv.exc_mem_proj_nr = pn_Div_M;
640 rt_iDiv.res_proj_nr = pn_Div_res;
642 set_entity_visibility(rt_iDiv.ent, visibility_external_allocated);
644 map_Div->kind = INTRINSIC_INSTR;
645 map_Div->op = op_Div;
646 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
647 map_Div->ctx = &rt_iDiv;
649 /* ... nor an unsigned div instruction ... */
651 i_instr_record *map_Div = &records[n_records++].i_instr;
653 tp = new_type_method(ID("rt_uDiv"), 2, 1);
654 set_method_param_type(tp, 0, uint_tp);
655 set_method_param_type(tp, 1, uint_tp);
656 set_method_res_type(tp, 0, uint_tp);
658 rt_uDiv.ent = new_entity(get_glob_type(), ID("__udivsi3"), tp);
659 set_entity_ld_ident(rt_uDiv.ent, ID("__udivsi3"));
660 rt_uDiv.mode = mode_T;
661 rt_uDiv.res_mode = mode_Iu;
662 rt_uDiv.mem_proj_nr = pn_Div_M;
663 rt_uDiv.regular_proj_nr = pn_Div_X_regular;
664 rt_uDiv.exc_proj_nr = pn_Div_X_except;
665 rt_uDiv.exc_mem_proj_nr = pn_Div_M;
666 rt_uDiv.res_proj_nr = pn_Div_res;
668 set_entity_visibility(rt_uDiv.ent, visibility_external_allocated);
670 map_Div->kind = INTRINSIC_INSTR;
671 map_Div->op = op_Div;
672 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
673 map_Div->ctx = &rt_uDiv;
675 /* ... nor a signed mod instruction ... */
677 i_instr_record *map_Mod = &records[n_records++].i_instr;
679 tp = new_type_method(ID("rt_iMod"), 2, 1);
680 set_method_param_type(tp, 0, int_tp);
681 set_method_param_type(tp, 1, int_tp);
682 set_method_res_type(tp, 0, int_tp);
684 rt_iMod.ent = new_entity(get_glob_type(), ID("__modsi3"), tp);
685 set_entity_ld_ident(rt_iMod.ent, ID("__modsi3"));
686 rt_iMod.mode = mode_T;
687 rt_iMod.res_mode = mode_Is;
688 rt_iMod.mem_proj_nr = pn_Mod_M;
689 rt_iMod.regular_proj_nr = pn_Mod_X_regular;
690 rt_iMod.exc_proj_nr = pn_Mod_X_except;
691 rt_iMod.exc_mem_proj_nr = pn_Mod_M;
692 rt_iMod.res_proj_nr = pn_Mod_res;
694 set_entity_visibility(rt_iMod.ent, visibility_external_allocated);
696 map_Mod->kind = INTRINSIC_INSTR;
697 map_Mod->op = op_Mod;
698 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
699 map_Mod->ctx = &rt_iMod;
701 /* ... nor an unsigned mod. */
703 i_instr_record *map_Mod = &records[n_records++].i_instr;
705 tp = new_type_method(ID("rt_uMod"), 2, 1);
706 set_method_param_type(tp, 0, uint_tp);
707 set_method_param_type(tp, 1, uint_tp);
708 set_method_res_type(tp, 0, uint_tp);
710 rt_uMod.ent = new_entity(get_glob_type(), ID("__umodsi3"), tp);
711 set_entity_ld_ident(rt_uMod.ent, ID("__umodsi3"));
712 rt_uMod.mode = mode_T;
713 rt_uMod.res_mode = mode_Iu;
714 rt_uMod.mem_proj_nr = pn_Mod_M;
715 rt_uMod.regular_proj_nr = pn_Mod_X_regular;
716 rt_uMod.exc_proj_nr = pn_Mod_X_except;
717 rt_uMod.exc_mem_proj_nr = pn_Mod_M;
718 rt_uMod.res_proj_nr = pn_Mod_res;
720 set_entity_visibility(rt_uMod.ent, visibility_external_allocated);
722 map_Mod->kind = INTRINSIC_INSTR;
723 map_Mod->op = op_Mod;
724 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
725 map_Mod->ctx = &rt_uMod;
729 lower_intrinsics(records, n_records, /*part_block_used=*/0);
732 /*****************************************************************
733 * ____ _ _ _____ _____
734 * | _ \ | | | | |_ _|/ ____| /\
735 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
736 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
737 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
738 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
740 *****************************************************************/
742 static arm_isa_t arm_isa_template = {
744 &arm_isa_if, /* isa interface */
745 &arm_gp_regs[REG_SP], /* stack pointer */
746 &arm_gp_regs[REG_R11], /* base pointer */
747 -1, /* stack direction */
748 2, /* power of two stack alignment for calls, 2^2 == 4 */
749 NULL, /* main environment */
751 5, /* reload costs */
753 0, /* use generic register names instead of SP, LR, PC */
754 ARM_FPU_ARCH_FPE, /* FPU architecture */
755 NULL, /* current code generator */
759 * Initializes the backend ISA and opens the output file.
761 static arch_env_t *arm_init(FILE *file_handle) {
762 static int inited = 0;
768 isa = XMALLOC(arm_isa_t);
769 memcpy(isa, &arm_isa_template, sizeof(*isa));
774 be_emit_init(file_handle);
776 arm_create_opcodes(&arm_irn_ops);
777 arm_handle_intrinsics();
779 /* needed for the debug support */
780 be_gas_emit_switch_section(GAS_SECTION_TEXT);
781 be_emit_cstring(".Ltext0:\n");
782 be_emit_write_line();
784 /* we mark referenced global entities, so we can only emit those which
785 * are actually referenced. (Note: you mustn't use the type visited flag
786 * elsewhere in the backend)
788 inc_master_type_visited();
791 return &isa->arch_env;
797 * Closes the output file and frees the ISA structure.
799 static void arm_done(void *self) {
800 arm_isa_t *isa = self;
802 be_gas_emit_decls(isa->arch_env.main_env, 1);
810 * Report the number of register classes.
811 * If we don't have fp instructions, report only GP
812 * here to speed up register allocation (and makes dumps
813 * smaller and more readable).
815 static unsigned arm_get_n_reg_class(const void *self) {
821 * Return the register class with requested index.
823 static const arch_register_class_t *arm_get_reg_class(const void *self,
826 assert(i < N_CLASSES);
827 return &arm_reg_classes[i];
831 * Get the register class which shall be used to store a value of a given mode.
832 * @param self The this pointer.
833 * @param mode The mode in question.
834 * @return A register class which can hold values of the given mode.
836 const arch_register_class_t *arm_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
838 if (mode_is_float(mode))
839 return &arm_reg_classes[CLASS_arm_fpa];
841 return &arm_reg_classes[CLASS_arm_gp];
845 * Produces the type which sits between the stack args and the locals on the stack.
846 * it will contain the return address and space to store the old base pointer.
847 * @return The Firm type modeling the ABI between type.
849 static ir_type *arm_get_between_type(void *self) {
850 static ir_type *between_type = NULL;
851 static ir_entity *old_bp_ent = NULL;
854 if (between_type == NULL) {
855 ir_entity *ret_addr_ent;
856 ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P);
857 ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P);
859 between_type = new_type_class(new_id_from_str("arm_between_type"));
860 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
861 ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
863 set_entity_offset(old_bp_ent, 0);
864 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
865 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
873 be_abi_call_flags_bits_t flags;
874 const arch_env_t *arch_env;
878 static void *arm_abi_init(const be_abi_call_t *call, const arch_env_t *arch_env, ir_graph *irg)
880 arm_abi_env_t *env = XMALLOC(arm_abi_env_t);
881 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
882 env->flags = fl.bits;
884 env->arch_env = arch_env;
889 * Put all registers which are saved by the prologue/epilogue in a set.
891 * @param self The callback object.
892 * @param s The result set.
894 static void arm_abi_dont_save_regs(void *self, pset *s)
896 arm_abi_env_t *env = self;
897 if (env->flags.try_omit_fp)
898 pset_insert_ptr(s, env->arch_env->bp);
902 * Generate the routine prologue.
904 * @param self The callback object.
905 * @param mem A pointer to the mem node. Update this if you define new memory.
906 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
907 * @param stack_bias Points to the current stack bias, can be modified if needed.
909 * @return The register which shall be used as a stack frame base.
911 * All nodes which define registers in @p reg_map must keep @p reg_map current.
913 static const arch_register_t *arm_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias) {
914 arm_abi_env_t *env = self;
915 ir_node *keep, *store;
918 arch_register_class_t *gp;
920 ir_node *fp, *ip, *lr, *pc;
921 ir_node *sp = be_abi_reg_map_get(reg_map, env->arch_env->sp);
925 if (env->flags.try_omit_fp)
926 return env->arch_env->sp;
928 fp = be_abi_reg_map_get(reg_map, env->arch_env->bp);
929 ip = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_R12]);
930 lr = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_LR]);
931 pc = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_PC]);
933 gp = &arm_reg_classes[CLASS_arm_gp];
935 block = get_irg_start_block(irg);
937 ip = be_new_Copy(gp, irg, block, sp);
938 arch_set_irn_register(ip, &arm_gp_regs[REG_R12]);
939 be_set_constr_single_reg(ip, BE_OUT_POS(0), &arm_gp_regs[REG_R12] );
941 store = new_rd_arm_StoreStackM4Inc(NULL, irg, block, sp, fp, ip, lr, pc, *mem);
943 sp = new_r_Proj(irg, block, store, env->arch_env->sp->reg_class->mode, pn_arm_StoreStackM4Inc_ptr);
944 arch_set_irn_register(sp, env->arch_env->sp);
945 *mem = new_r_Proj(irg, block, store, mode_M, pn_arm_StoreStackM4Inc_M);
947 keep = be_new_CopyKeep_single(gp, irg, block, ip, sp, get_irn_mode(ip));
948 be_node_set_reg_class(keep, 1, gp);
949 arch_set_irn_register(keep, &arm_gp_regs[REG_R12]);
950 be_set_constr_single_reg(keep, BE_OUT_POS(0), &arm_gp_regs[REG_R12] );
952 fp = new_rd_arm_Sub_i(NULL, irg, block, keep, get_irn_mode(fp), 4);
953 arch_set_irn_register(fp, env->arch_env->bp);
954 fp = be_new_Copy(gp, irg, block, fp); // XXX Gammelfix: only be_ nodes can have the ignore flag set
955 arch_set_irn_register(fp, env->arch_env->bp);
956 be_node_set_flags(fp, BE_OUT_POS(0), arch_irn_flags_ignore);
958 be_abi_reg_map_set(reg_map, env->arch_env->bp, fp);
959 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_R12], keep);
960 be_abi_reg_map_set(reg_map, env->arch_env->sp, sp);
961 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_LR], lr);
962 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_PC], pc);
964 return env->arch_env->bp;
968 * Builds the ARM epilogue
970 static void arm_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map) {
971 arm_abi_env_t *env = self;
972 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->arch_env->sp);
973 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->arch_env->bp);
974 ir_node *curr_pc = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_PC]);
975 ir_node *curr_lr = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_LR]);
977 // TODO: Activate Omit fp in epilogue
978 if (env->flags.try_omit_fp) {
979 curr_sp = be_new_IncSP(env->arch_env->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
981 curr_lr = be_new_CopyKeep_single(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr, curr_sp, get_irn_mode(curr_lr));
982 be_node_set_reg_class(curr_lr, 1, &arm_reg_classes[CLASS_arm_gp]);
983 arch_set_irn_register(curr_lr, &arm_gp_regs[REG_LR]);
984 be_set_constr_single_reg(curr_lr, BE_OUT_POS(0), &arm_gp_regs[REG_LR] );
986 curr_pc = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr );
987 arch_set_irn_register(curr_pc, &arm_gp_regs[REG_PC]);
988 be_set_constr_single_reg(curr_pc, BE_OUT_POS(0), &arm_gp_regs[REG_PC] );
989 be_node_set_flags(curr_pc, BE_OUT_POS(0), arch_irn_flags_ignore);
993 sub12_node = new_rd_arm_Sub_i(NULL, env->irg, bl, curr_bp, mode_Iu, 12);
995 //set_arm_req_out_all(sub12_node, sub12_req);
996 arch_set_irn_register(sub12_node, env->arch_env->sp);
997 load_node = new_rd_arm_LoadStackM3( NULL, env->irg, bl, sub12_node, *mem );
999 //set_arm_req_out(load_node, &arm_default_req_arm_gp_r11, 0);
1000 //set_arm_req_out(load_node, &arm_default_req_arm_gp_sp, 1);
1001 //set_arm_req_out(load_node, &arm_default_req_arm_gp_pc, 2);
1002 curr_bp = new_r_Proj(env->irg, bl, load_node, env->arch_env->bp->reg_class->mode, pn_arm_LoadStackM3_res0);
1003 curr_sp = new_r_Proj(env->irg, bl, load_node, env->arch_env->sp->reg_class->mode, pn_arm_LoadStackM3_res1);
1004 curr_pc = new_r_Proj(env->irg, bl, load_node, mode_Iu, pn_arm_LoadStackM3_res2);
1005 *mem = new_r_Proj(env->irg, bl, load_node, mode_M, pn_arm_LoadStackM3_M);
1006 arch_set_irn_register(curr_bp, env->arch_env->bp);
1007 arch_set_irn_register(curr_sp, env->arch_env->sp);
1008 arch_set_irn_register(curr_pc, &arm_gp_regs[REG_PC]);
1010 be_abi_reg_map_set(reg_map, env->arch_env->sp, curr_sp);
1011 be_abi_reg_map_set(reg_map, env->arch_env->bp, curr_bp);
1012 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_LR], curr_lr);
1013 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_PC], curr_pc);
1016 static const be_abi_callbacks_t arm_abi_callbacks = {
1019 arm_get_between_type,
1020 arm_abi_dont_save_regs,
1027 * Get the ABI restrictions for procedure calls.
1028 * @param self The this pointer.
1029 * @param method_type The type of the method (procedure) in question.
1030 * @param abi The abi object to be modified
1032 void arm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1036 int n = get_method_n_params(method_type);
1037 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1040 /* set abi flags for calls */
1041 call_flags.bits.left_to_right = 0;
1042 call_flags.bits.store_args_sequential = 0;
1043 /* call_flags.bits.try_omit_fp don't change this we can handle both */
1044 call_flags.bits.fp_free = 0;
1045 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1047 /* set stack parameter passing style */
1048 be_abi_call_set_flags(abi, call_flags, &arm_abi_callbacks);
1050 for (i = 0; i < n; i++) {
1051 /* reg = get reg for param i; */
1052 /* be_abi_call_param_reg(abi, i, reg); */
1054 be_abi_call_param_reg(abi, i, arm_get_RegParam_reg(i));
1056 tp = get_method_param_type(method_type, i);
1057 mode = get_type_mode(tp);
1058 be_abi_call_param_stack(abi, i, mode, 4, 0, 0);
1062 /* set return registers */
1063 n = get_method_n_ress(method_type);
1065 assert(n <= 2 && "more than two results not supported");
1067 /* In case of 64bit returns, we will have two 32bit values */
1069 tp = get_method_res_type(method_type, 0);
1070 mode = get_type_mode(tp);
1072 assert(!mode_is_float(mode) && "two FP results not supported");
1074 tp = get_method_res_type(method_type, 1);
1075 mode = get_type_mode(tp);
1077 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1079 be_abi_call_res_reg(abi, 0, &arm_gp_regs[REG_R0]);
1080 be_abi_call_res_reg(abi, 1, &arm_gp_regs[REG_R1]);
1081 } else if (n == 1) {
1082 const arch_register_t *reg;
1084 tp = get_method_res_type(method_type, 0);
1085 assert(is_atomic_type(tp));
1086 mode = get_type_mode(tp);
1088 reg = mode_is_float(mode) ? &arm_fpa_regs[REG_F0] : &arm_gp_regs[REG_R0];
1089 be_abi_call_res_reg(abi, 0, reg);
1093 int arm_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1095 if(!is_arm_irn(irn))
1102 * Initializes the code generator interface.
1104 static const arch_code_generator_if_t *arm_get_code_generator_if(void *self) {
1106 return &arm_code_gen_if;
1109 list_sched_selector_t arm_sched_selector;
1112 * Returns the reg_pressure scheduler with to_appear_in_schedule() over\loaded
1114 static const list_sched_selector_t *arm_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1116 memcpy(&arm_sched_selector, selector, sizeof(arm_sched_selector));
1117 /* arm_sched_selector.exectime = arm_sched_exectime; */
1118 arm_sched_selector.to_appear_in_schedule = arm_to_appear_in_schedule;
1119 return &arm_sched_selector;
1123 static const ilp_sched_selector_t *arm_get_ilp_sched_selector(const void *self) {
1129 * Returns the necessary byte alignment for storing a register of given class.
1131 static int arm_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1134 /* ARM is a 32 bit CPU, no need for other alignment */
1138 static const be_execution_unit_t ***arm_get_allowed_execution_units(const void *self, const ir_node *irn) {
1142 panic("Unimplemented arm_get_allowed_execution_units()");
1145 static const be_machine_t *arm_get_machine(const void *self) {
1148 panic("Unimplemented arm_get_machine()");
1152 * Return irp irgs in the desired order.
1154 static ir_graph **arm_get_irg_list(const void *self, ir_graph ***irg_list) {
1161 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
1162 * @return 1 if allowed, 0 otherwise
1164 static int arm_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j) {
1165 ir_node *cmp, *cmp_a, *phi;
1169 /* currently Psi support is not implemented */
1172 /* we don't want long long Psi */
1173 #define IS_BAD_PSI_MODE(mode) (!mode_is_float(mode) && get_mode_size_bits(mode) > 32)
1175 if (get_irn_mode(sel) != mode_b)
1178 cmp = get_Proj_pred(sel);
1179 cmp_a = get_Cmp_left(cmp);
1180 mode = get_irn_mode(cmp_a);
1182 if (IS_BAD_PSI_MODE(mode))
1185 /* check the Phi nodes */
1186 for (phi = phi_list; phi; phi = get_irn_link(phi)) {
1187 ir_node *pred_i = get_irn_n(phi, i);
1188 ir_node *pred_j = get_irn_n(phi, j);
1189 ir_mode *mode_i = get_irn_mode(pred_i);
1190 ir_mode *mode_j = get_irn_mode(pred_j);
1192 if (IS_BAD_PSI_MODE(mode_i) || IS_BAD_PSI_MODE(mode_j))
1196 #undef IS_BAD_PSI_MODE
1201 static asm_constraint_flags_t arm_parse_asm_constraint(const void *self, const char **c)
1203 /* asm not supported */
1206 return ASM_CONSTRAINT_FLAG_INVALID;
1209 static int arm_is_valid_clobber(const void *self, const char *clobber)
1217 * Returns the libFirm configuration parameter for this backend.
1219 static const backend_params *arm_get_libfirm_params(void) {
1220 static const ir_settings_if_conv_t ifconv = {
1221 4, /* maxdepth, doesn't matter for Psi-conversion */
1222 arm_is_psi_allowed /* allows or disallows Psi creation for given selector */
1224 static ir_settings_arch_dep_t ad = {
1226 1, /* Muls are fast enough on ARM but ... */
1227 31, /* ... one shift would be possible better */
1228 NULL, /* no evaluator function */
1229 0, /* SMUL is needed, only in Arch M */
1230 0, /* UMUL is needed, only in Arch M */
1231 32, /* SMUL & UMUL available for 32 bit */
1233 static backend_params p = {
1234 1, /* need dword lowering */
1235 0, /* don't support inline assembler yet */
1236 0, /* no immediate floating point mode. */
1237 NULL, /* no additional opcodes */
1238 NULL, /* will be set later */
1239 NULL, /* but yet no creator function */
1240 NULL, /* context for create_intrinsic_fkt */
1241 NULL, /* will be set below */
1242 NULL /* no immediate fp mode */
1246 p.if_conv_info = &ifconv;
1250 /* fpu set architectures. */
1251 static const lc_opt_enum_int_items_t arm_fpu_items[] = {
1252 { "softfloat", ARM_FPU_ARCH_SOFTFLOAT },
1253 { "fpe", ARM_FPU_ARCH_FPE },
1254 { "fpa", ARM_FPU_ARCH_FPA },
1255 { "vfp1xd", ARM_FPU_ARCH_VFP_V1xD },
1256 { "vfp1", ARM_FPU_ARCH_VFP_V1 },
1257 { "vfp2", ARM_FPU_ARCH_VFP_V2 },
1261 static lc_opt_enum_int_var_t arch_fpu_var = {
1262 &arm_isa_template.fpu_arch, arm_fpu_items
1265 static const lc_opt_table_entry_t arm_options[] = {
1266 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
1267 LC_OPT_ENT_BOOL("gen_reg_names", "use generic register names", &arm_isa_template.gen_reg_names),
1271 const arch_isa_if_t arm_isa_if = {
1274 arm_get_n_reg_class,
1276 arm_get_reg_class_for_mode,
1278 arm_get_code_generator_if,
1279 arm_get_list_sched_selector,
1280 arm_get_ilp_sched_selector,
1281 arm_get_reg_class_alignment,
1282 arm_get_libfirm_params,
1283 arm_get_allowed_execution_units,
1286 NULL, /* mark remat */
1287 arm_parse_asm_constraint,
1288 arm_is_valid_clobber
1291 void be_init_arch_arm(void)
1293 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1294 lc_opt_entry_t *arm_grp = lc_opt_get_grp(be_grp, "arm");
1296 lc_opt_add_table(arm_grp, arm_options);
1298 be_register_isa_if("arm", &arm_isa_if);
1300 arm_init_transform();
1304 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_arm);