2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief The main arm backend driver file.
23 * @author Oliver Richter, Tobias Gneist
29 #include "lc_opts_enum.h"
31 #include "pseudo_irg.h"
38 #include "iroptimize.h"
47 #include "../bearch_t.h" /* the general register allocator interface */
48 #include "../benode_t.h"
49 #include "../belower.h"
50 #include "../besched_t.h"
53 #include "../bemachine.h"
54 #include "../beilpsched.h"
55 #include "../bemodule.h"
56 #include "../beirg_t.h"
57 #include "../bespillslots.h"
58 #include "../begnuas.h"
60 #include "bearch_arm_t.h"
62 #include "arm_new_nodes.h" /* arm nodes interface */
63 #include "gen_arm_regalloc_if.h" /* the generated interface (register type and class defenitions) */
64 #include "arm_transform.h"
65 #include "arm_optimize.h"
66 #include "arm_emitter.h"
67 #include "arm_map_regs.h"
69 #define DEBUG_MODULE "firm.be.arm.isa"
71 /* TODO: ugly, but we need it to get access to the registers assigned to Phi nodes */
72 static set *cur_reg_set = NULL;
74 /**************************************************
77 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
78 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
79 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
80 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
83 **************************************************/
86 * Return register requirements for a arm node.
87 * If the node returns a tuple (mode_T) then the proj's
88 * will be asked for this information.
90 static const arch_register_req_t *arm_get_irn_reg_req(const ir_node *node,
93 long node_pos = pos == -1 ? 0 : pos;
94 ir_mode *mode = get_irn_mode(node);
96 if (is_Block(node) || mode == mode_X) {
97 return arch_no_register_req;
100 if (mode == mode_T && pos < 0) {
101 return arch_no_register_req;
106 return arch_no_register_req;
109 return arch_no_register_req;
112 node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
113 node = skip_Proj_const(node);
116 /* get requirements for our own nodes */
117 if (is_arm_irn(node)) {
118 const arch_register_req_t *req;
120 req = get_arm_in_req(node, pos);
122 req = get_arm_out_req(node, node_pos);
128 /* unknown should be transformed by now */
129 assert(!is_Unknown(node));
130 return arch_no_register_req;
133 static arch_irn_class_t arm_classify(const ir_node *irn)
135 irn = skip_Proj_const(irn);
138 return arch_irn_class_branch;
144 static ir_entity *arm_get_frame_entity(const ir_node *irn) {
145 /* we do NOT transform be_Spill or be_Reload nodes, so we never
146 have frame access using ARM nodes. */
151 static void arm_set_frame_entity(ir_node *irn, ir_entity *ent) {
154 panic("arm_set_frame_entity() called. This should not happen.");
158 * This function is called by the generic backend to correct offsets for
159 * nodes accessing the stack.
161 static void arm_set_stack_bias(ir_node *irn, int bias)
165 /* TODO: correct offset if irn accesses the stack */
168 static int arm_get_sp_bias(const ir_node *irn)
174 /* fill register allocator interface */
176 static const arch_irn_ops_t arm_irn_ops = {
179 arm_get_frame_entity,
180 arm_set_frame_entity,
183 NULL, /* get_inverse */
184 NULL, /* get_op_estimated_cost */
185 NULL, /* possible_memory_operand */
186 NULL, /* perform_memory_operand */
189 /**************************************************
192 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
193 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
194 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
195 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
198 **************************************************/
201 * Transforms the standard Firm graph into
204 static void arm_prepare_graph(void *self) {
205 arm_code_gen_t *cg = self;
207 /* transform nodes into assembler instructions */
208 arm_transform_graph(cg);
210 /* do local optimizations (mainly CSE) */
211 local_optimize_graph(cg->irg);
214 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
216 /* do code placement, to optimize the position of constants */
220 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
224 * Called immediately before emit phase.
226 static void arm_finish_irg(void *self)
228 arm_code_gen_t *cg = self;
230 /* do peephole optimizations and fix stack offsets */
231 arm_peephole_optimization(cg);
235 static void arm_before_ra(void *self)
238 /* Some stuff you need to do immediately after register allocation */
242 * We transform Spill and Reload here. This needs to be done before
243 * stack biasing otherwise we would miss the corrected offset for these nodes.
245 static void arm_after_ra(void *self)
247 arm_code_gen_t *cg = self;
248 be_coalesce_spillslots(cg->birg);
252 * Emits the code, closes the output file and frees
253 * the code generator interface.
255 static void arm_emit_and_done(void *self) {
256 arm_code_gen_t *cg = self;
257 ir_graph *irg = cg->irg;
259 arm_gen_routine(cg, irg);
263 /* de-allocate code generator */
264 del_set(cg->reg_set);
269 * Move a double floating point value into an integer register.
270 * Place the move operation into block bl.
272 * Handle some special cases here:
273 * 1.) A constant: simply split into two
274 * 2.) A load: simply split into two
276 static ir_node *convert_dbl_to_int(ir_node *bl, ir_node *arg, ir_node *mem,
277 ir_node **resH, ir_node **resL) {
279 tarval *tv = get_Const_tarval(arg);
282 /* get the upper 32 bits */
283 v = get_tarval_sub_bits(tv, 7);
284 v = (v << 8) | get_tarval_sub_bits(tv, 6);
285 v = (v << 8) | get_tarval_sub_bits(tv, 5);
286 v = (v << 8) | get_tarval_sub_bits(tv, 4);
287 *resH = new_Const_long(mode_Is, v);
289 /* get the lower 32 bits */
290 v = get_tarval_sub_bits(tv, 3);
291 v = (v << 8) | get_tarval_sub_bits(tv, 2);
292 v = (v << 8) | get_tarval_sub_bits(tv, 1);
293 v = (v << 8) | get_tarval_sub_bits(tv, 0);
294 *resL = new_Const_long(mode_Is, v);
295 } else if (is_Load(skip_Proj(arg))) {
296 /* FIXME: handling of low/high depends on LE/BE here */
297 panic("Unimplemented convert_dbl_to_int() case");
300 ir_graph *irg = current_ir_graph;
303 conv = new_bd_arm_fpaDbl2GP(NULL, bl, arg, mem);
305 *resL = new_r_Proj(bl, conv, mode_Is, pn_arm_fpaDbl2GP_low);
306 *resH = new_r_Proj(bl, conv, mode_Is, pn_arm_fpaDbl2GP_high);
307 mem = new_r_Proj(bl, conv, mode_M, pn_arm_fpaDbl2GP_M);
313 * Move a single floating point value into an integer register.
314 * Place the move operation into block bl.
316 * Handle some special cases here:
317 * 1.) A constant: simply move
318 * 2.) A load: simply load
320 static ir_node *convert_sng_to_int(ir_node *bl, ir_node *arg)
325 tarval *tv = get_Const_tarval(arg);
328 /* get the lower 32 bits */
329 v = get_tarval_sub_bits(tv, 3);
330 v = (v << 8) | get_tarval_sub_bits(tv, 2);
331 v = (v << 8) | get_tarval_sub_bits(tv, 1);
332 v = (v << 8) | get_tarval_sub_bits(tv, 0);
333 return new_Const_long(mode_Is, v);
334 } else if (is_Load(skip_Proj(arg))) {
337 load = skip_Proj(arg);
339 panic("Unimplemented convert_sng_to_int() case");
343 * Convert the arguments of a call to support the
344 * ARM calling convention of general purpose AND floating
347 static void handle_calls(ir_node *call, void *env)
349 arm_code_gen_t *cg = env;
350 int i, j, n, size, idx, flag, n_param, n_res, first_variadic;
351 ir_type *mtp, *new_mtd, *new_tp[5];
352 ir_node *new_in[5], **in;
358 /* check, if we need conversions */
359 n = get_Call_n_params(call);
360 mtp = get_Call_type(call);
361 assert(get_method_n_params(mtp) == n);
363 /* it's always enough to handle the first 4 parameters */
366 flag = size = idx = 0;
367 bl = get_nodes_block(call);
368 for (i = 0; i < n; ++i) {
369 ir_type *param_tp = get_method_param_type(mtp, i);
371 if (is_compound_type(param_tp)) {
372 /* an aggregate parameter: bad case */
376 /* a primitive parameter */
377 ir_mode *mode = get_type_mode(param_tp);
379 if (mode_is_float(mode)) {
380 if (get_mode_size_bits(mode) > 32) {
381 ir_node *mem = get_Call_mem(call);
383 /* Beware: ARM wants the high part first */
385 new_tp[idx] = cg->int_tp;
386 new_tp[idx+1] = cg->int_tp;
387 mem = convert_dbl_to_int(bl, get_Call_param(call, i), mem, &new_in[idx], &new_in[idx+1]);
389 set_Call_mem(call, mem);
393 new_tp[idx] = cg->int_tp;
394 new_in[idx] = convert_sng_to_int(bl, get_Call_param(call, i));
401 new_tp[idx] = param_tp;
402 new_in[idx] = get_Call_param(call, i);
411 /* if flag is NOT set, no need to translate the method type */
415 /* construct a new method type */
417 n_param = get_method_n_params(mtp) - n + idx;
418 n_res = get_method_n_ress(mtp);
419 new_mtd = new_d_type_method(get_type_ident(mtp), n_param, n_res, get_type_dbg_info(mtp));
421 for (i = 0; i < idx; ++i)
422 set_method_param_type(new_mtd, i, new_tp[i]);
423 for (i = n, j = idx; i < get_method_n_params(mtp); ++i)
424 set_method_param_type(new_mtd, j++, get_method_param_type(mtp, i));
425 for (i = 0; i < n_res; ++i)
426 set_method_res_type(new_mtd, i, get_method_res_type(mtp, i));
428 set_method_calling_convention(new_mtd, get_method_calling_convention(mtp));
429 first_variadic = get_method_first_variadic_param_index(mtp);
430 if (first_variadic >= 0)
431 set_method_first_variadic_param_index(new_mtd, first_variadic);
433 if (is_lowered_type(mtp)) {
434 mtp = get_associated_type(mtp);
436 set_lowered_type(mtp, new_mtd);
438 set_Call_type(call, new_mtd);
440 /* calculate new in array of the Call */
441 NEW_ARR_A(ir_node *, in, n_param + 2);
442 for (i = 0; i < idx; ++i)
443 in[2 + i] = new_in[i];
444 for (i = n, j = idx; i < get_method_n_params(mtp); ++i)
445 in[2 + j++] = get_Call_param(call, i);
447 in[0] = get_Call_mem(call);
448 in[1] = get_Call_ptr(call);
450 /* finally, change the call inputs */
451 set_irn_in(call, n_param + 2, in);
455 * Handle graph transformations before the abi converter does its work.
457 static void arm_before_abi(void *self) {
458 arm_code_gen_t *cg = self;
460 irg_walk_graph(cg->irg, NULL, handle_calls, cg);
464 static void *arm_cg_init(be_irg_t *birg);
466 static const arch_code_generator_if_t arm_code_gen_if = {
468 NULL, /* get_pic_base */
469 arm_before_abi, /* before abi introduce */
472 arm_before_ra, /* before register allocation hook */
479 * Initializes the code generator.
481 static void *arm_cg_init(be_irg_t *birg) {
482 static ir_type *int_tp = NULL;
483 arm_isa_t *isa = (arm_isa_t *)birg->main_env->arch_env;
487 /* create an integer type with machine size */
488 int_tp = new_type_primitive(new_id_from_chars("int", 3), mode_Is);
491 cg = XMALLOC(arm_code_gen_t);
492 cg->impl = &arm_code_gen_if;
494 cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024);
498 cg->have_fp_insn = 0;
499 cg->unknown_gp = NULL;
500 cg->unknown_fpa = NULL;
501 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
503 FIRM_DBG_REGISTER(cg->mod, "firm.be.arm.cg");
505 cur_reg_set = cg->reg_set;
507 /* enter the current code generator */
510 return (arch_code_generator_t *)cg;
515 * Maps all intrinsic calls that the backend support
516 * and map all instructions the backend did not support
519 static void arm_handle_intrinsics(void) {
520 ir_type *tp, *int_tp, *uint_tp;
524 runtime_rt rt_iDiv, rt_uDiv, rt_iMod, rt_uMod;
526 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
528 int_tp = new_type_primitive(ID("int"), mode_Is);
529 uint_tp = new_type_primitive(ID("uint"), mode_Iu);
531 /* ARM has neither a signed div instruction ... */
533 i_instr_record *map_Div = &records[n_records++].i_instr;
535 tp = new_type_method(ID("rt_iDiv"), 2, 1);
536 set_method_param_type(tp, 0, int_tp);
537 set_method_param_type(tp, 1, int_tp);
538 set_method_res_type(tp, 0, int_tp);
540 rt_iDiv.ent = new_entity(get_glob_type(), ID("__divsi3"), tp);
541 set_entity_ld_ident(rt_iDiv.ent, ID("__divsi3"));
542 rt_iDiv.mode = mode_T;
543 rt_iDiv.res_mode = mode_Is;
544 rt_iDiv.mem_proj_nr = pn_Div_M;
545 rt_iDiv.regular_proj_nr = pn_Div_X_regular;
546 rt_iDiv.exc_proj_nr = pn_Div_X_except;
547 rt_iDiv.exc_mem_proj_nr = pn_Div_M;
548 rt_iDiv.res_proj_nr = pn_Div_res;
550 set_entity_visibility(rt_iDiv.ent, visibility_external_allocated);
552 map_Div->kind = INTRINSIC_INSTR;
553 map_Div->op = op_Div;
554 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
555 map_Div->ctx = &rt_iDiv;
557 /* ... nor an unsigned div instruction ... */
559 i_instr_record *map_Div = &records[n_records++].i_instr;
561 tp = new_type_method(ID("rt_uDiv"), 2, 1);
562 set_method_param_type(tp, 0, uint_tp);
563 set_method_param_type(tp, 1, uint_tp);
564 set_method_res_type(tp, 0, uint_tp);
566 rt_uDiv.ent = new_entity(get_glob_type(), ID("__udivsi3"), tp);
567 set_entity_ld_ident(rt_uDiv.ent, ID("__udivsi3"));
568 rt_uDiv.mode = mode_T;
569 rt_uDiv.res_mode = mode_Iu;
570 rt_uDiv.mem_proj_nr = pn_Div_M;
571 rt_uDiv.regular_proj_nr = pn_Div_X_regular;
572 rt_uDiv.exc_proj_nr = pn_Div_X_except;
573 rt_uDiv.exc_mem_proj_nr = pn_Div_M;
574 rt_uDiv.res_proj_nr = pn_Div_res;
576 set_entity_visibility(rt_uDiv.ent, visibility_external_allocated);
578 map_Div->kind = INTRINSIC_INSTR;
579 map_Div->op = op_Div;
580 map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
581 map_Div->ctx = &rt_uDiv;
583 /* ... nor a signed mod instruction ... */
585 i_instr_record *map_Mod = &records[n_records++].i_instr;
587 tp = new_type_method(ID("rt_iMod"), 2, 1);
588 set_method_param_type(tp, 0, int_tp);
589 set_method_param_type(tp, 1, int_tp);
590 set_method_res_type(tp, 0, int_tp);
592 rt_iMod.ent = new_entity(get_glob_type(), ID("__modsi3"), tp);
593 set_entity_ld_ident(rt_iMod.ent, ID("__modsi3"));
594 rt_iMod.mode = mode_T;
595 rt_iMod.res_mode = mode_Is;
596 rt_iMod.mem_proj_nr = pn_Mod_M;
597 rt_iMod.regular_proj_nr = pn_Mod_X_regular;
598 rt_iMod.exc_proj_nr = pn_Mod_X_except;
599 rt_iMod.exc_mem_proj_nr = pn_Mod_M;
600 rt_iMod.res_proj_nr = pn_Mod_res;
602 set_entity_visibility(rt_iMod.ent, visibility_external_allocated);
604 map_Mod->kind = INTRINSIC_INSTR;
605 map_Mod->op = op_Mod;
606 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
607 map_Mod->ctx = &rt_iMod;
609 /* ... nor an unsigned mod. */
611 i_instr_record *map_Mod = &records[n_records++].i_instr;
613 tp = new_type_method(ID("rt_uMod"), 2, 1);
614 set_method_param_type(tp, 0, uint_tp);
615 set_method_param_type(tp, 1, uint_tp);
616 set_method_res_type(tp, 0, uint_tp);
618 rt_uMod.ent = new_entity(get_glob_type(), ID("__umodsi3"), tp);
619 set_entity_ld_ident(rt_uMod.ent, ID("__umodsi3"));
620 rt_uMod.mode = mode_T;
621 rt_uMod.res_mode = mode_Iu;
622 rt_uMod.mem_proj_nr = pn_Mod_M;
623 rt_uMod.regular_proj_nr = pn_Mod_X_regular;
624 rt_uMod.exc_proj_nr = pn_Mod_X_except;
625 rt_uMod.exc_mem_proj_nr = pn_Mod_M;
626 rt_uMod.res_proj_nr = pn_Mod_res;
628 set_entity_visibility(rt_uMod.ent, visibility_external_allocated);
630 map_Mod->kind = INTRINSIC_INSTR;
631 map_Mod->op = op_Mod;
632 map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
633 map_Mod->ctx = &rt_uMod;
637 lower_intrinsics(records, n_records, /*part_block_used=*/0);
640 /*****************************************************************
641 * ____ _ _ _____ _____
642 * | _ \ | | | | |_ _|/ ____| /\
643 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
644 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
645 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
646 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
648 *****************************************************************/
650 static arm_isa_t arm_isa_template = {
652 &arm_isa_if, /* isa interface */
653 &arm_gp_regs[REG_SP], /* stack pointer */
654 &arm_gp_regs[REG_R11], /* base pointer */
655 &arm_reg_classes[CLASS_arm_gp], /* static link pointer class */
656 -1, /* stack direction */
657 2, /* power of two stack alignment for calls, 2^2 == 4 */
658 NULL, /* main environment */
660 5, /* reload costs */
662 0, /* use generic register names instead of SP, LR, PC */
663 ARM_FPU_ARCH_FPE, /* FPU architecture */
664 NULL, /* current code generator */
668 * Initializes the backend ISA and opens the output file.
670 static arch_env_t *arm_init(FILE *file_handle) {
671 static int inited = 0;
677 isa = XMALLOC(arm_isa_t);
678 memcpy(isa, &arm_isa_template, sizeof(*isa));
683 be_emit_init(file_handle);
685 arm_create_opcodes(&arm_irn_ops);
686 arm_handle_intrinsics();
688 /* needed for the debug support */
689 be_gas_emit_switch_section(GAS_SECTION_TEXT);
690 be_emit_cstring(".Ltext0:\n");
691 be_emit_write_line();
693 /* we mark referenced global entities, so we can only emit those which
694 * are actually referenced. (Note: you mustn't use the type visited flag
695 * elsewhere in the backend)
697 inc_master_type_visited();
700 return &isa->arch_env;
706 * Closes the output file and frees the ISA structure.
708 static void arm_done(void *self) {
709 arm_isa_t *isa = self;
711 be_gas_emit_decls(isa->arch_env.main_env, 1);
719 * Report the number of register classes.
720 * If we don't have fp instructions, report only GP
721 * here to speed up register allocation (and makes dumps
722 * smaller and more readable).
724 static unsigned arm_get_n_reg_class(const void *self) {
730 * Return the register class with requested index.
732 static const arch_register_class_t *arm_get_reg_class(const void *self,
735 assert(i < N_CLASSES);
736 return &arm_reg_classes[i];
740 * Get the register class which shall be used to store a value of a given mode.
741 * @param self The this pointer.
742 * @param mode The mode in question.
743 * @return A register class which can hold values of the given mode.
745 const arch_register_class_t *arm_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
747 if (mode_is_float(mode))
748 return &arm_reg_classes[CLASS_arm_fpa];
750 return &arm_reg_classes[CLASS_arm_gp];
754 * Produces the type which sits between the stack args and the locals on the stack.
755 * it will contain the return address and space to store the old base pointer.
756 * @return The Firm type modeling the ABI between type.
758 static ir_type *arm_get_between_type(void *self) {
759 static ir_type *between_type = NULL;
760 static ir_entity *old_bp_ent = NULL;
763 if (between_type == NULL) {
764 ir_entity *ret_addr_ent;
765 ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P);
766 ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P);
768 between_type = new_type_class(new_id_from_str("arm_between_type"));
769 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
770 ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
772 set_entity_offset(old_bp_ent, 0);
773 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
774 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
782 be_abi_call_flags_bits_t flags;
783 const arch_env_t *arch_env;
787 static void *arm_abi_init(const be_abi_call_t *call, const arch_env_t *arch_env, ir_graph *irg)
789 arm_abi_env_t *env = XMALLOC(arm_abi_env_t);
790 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
791 env->flags = fl.bits;
793 env->arch_env = arch_env;
798 * Generate the routine prologue.
800 * @param self The callback object.
801 * @param mem A pointer to the mem node. Update this if you define new memory.
802 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
803 * @param stack_bias Points to the current stack bias, can be modified if needed.
805 * @return The register which shall be used as a stack frame base.
807 * All nodes which define registers in @p reg_map must keep @p reg_map current.
809 static const arch_register_t *arm_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias) {
810 arm_abi_env_t *env = self;
814 arch_register_class_t *gp;
816 ir_node *fp, *ip, *lr, *pc;
817 ir_node *sp = be_abi_reg_map_get(reg_map, env->arch_env->sp);
821 if (env->flags.try_omit_fp)
822 return env->arch_env->sp;
824 fp = be_abi_reg_map_get(reg_map, env->arch_env->bp);
825 ip = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_R12]);
826 lr = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_LR]);
827 pc = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_PC]);
829 gp = &arm_reg_classes[CLASS_arm_gp];
831 block = get_irg_start_block(irg);
833 /* mark bp register as ignore */
834 be_set_constr_single_reg_out(get_Proj_pred(fp),
835 get_Proj_proj(fp), env->arch_env->bp,
836 arch_register_req_type_ignore);
838 /* copy SP to IP (so we can spill it */
839 ip = be_new_Copy(gp, block, sp);
840 be_set_constr_single_reg_out(ip, 0, &arm_gp_regs[REG_R12], 0);
843 store = new_bd_arm_StoreStackM4Inc(NULL, block, sp, fp, ip, lr, pc, *mem);
845 sp = new_r_Proj(block, store, env->arch_env->sp->reg_class->mode, pn_arm_StoreStackM4Inc_ptr);
846 arch_set_irn_register(sp, env->arch_env->sp);
847 *mem = new_r_Proj(block, store, mode_M, pn_arm_StoreStackM4Inc_M);
849 /* frame pointer is ip-4 (because ip is our old sp value) */
850 fp = new_bd_arm_Sub_i(NULL, block, ip, get_irn_mode(fp), 4);
851 arch_set_irn_register(fp, env->arch_env->bp);
853 /* beware: we change the fp but the StoreStackM4Inc above wants the old
854 * fp value. We are not allowed to spill or anything in the prolog, so we
855 * have to enforce some order here. (scheduler/regalloc are too stupid
856 * to extract this order from register requirements) */
857 add_irn_dep(fp, store);
859 fp = be_new_Copy(gp, block, fp); // XXX Gammelfix: only be_ have custom register requirements
860 be_set_constr_single_reg_out(fp, 0, env->arch_env->bp,
861 arch_register_req_type_ignore);
862 arch_set_irn_register(fp, env->arch_env->bp);
864 be_abi_reg_map_set(reg_map, env->arch_env->bp, fp);
865 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_R12], ip);
866 be_abi_reg_map_set(reg_map, env->arch_env->sp, sp);
867 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_LR], lr);
868 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_PC], pc);
870 return env->arch_env->bp;
874 * Builds the ARM epilogue
876 static void arm_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map) {
877 arm_abi_env_t *env = self;
878 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->arch_env->sp);
879 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->arch_env->bp);
880 ir_node *curr_pc = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_PC]);
881 ir_node *curr_lr = be_abi_reg_map_get(reg_map, &arm_gp_regs[REG_LR]);
883 // TODO: Activate Omit fp in epilogue
884 if (env->flags.try_omit_fp) {
885 curr_sp = be_new_IncSP(env->arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
887 curr_lr = be_new_CopyKeep_single(&arm_reg_classes[CLASS_arm_gp], bl, curr_lr, curr_sp, get_irn_mode(curr_lr));
888 be_set_constr_single_reg_out(curr_lr, 0, &arm_gp_regs[REG_LR], 0);
890 curr_pc = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], bl, curr_lr );
891 be_set_constr_single_reg_out(curr_pc, BE_OUT_POS(0), &arm_gp_regs[REG_PC], 0);
895 load_node = new_bd_arm_LoadStackM3Epilogue(NULL, bl, curr_bp, *mem);
897 curr_bp = new_r_Proj(bl, load_node, env->arch_env->bp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res0);
898 curr_sp = new_r_Proj(bl, load_node, env->arch_env->sp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res1);
899 curr_pc = new_r_Proj(bl, load_node, mode_Iu, pn_arm_LoadStackM3Epilogue_res2);
900 *mem = new_r_Proj(bl, load_node, mode_M, pn_arm_LoadStackM3Epilogue_M);
901 arch_set_irn_register(curr_bp, env->arch_env->bp);
902 arch_set_irn_register(curr_sp, env->arch_env->sp);
903 arch_set_irn_register(curr_pc, &arm_gp_regs[REG_PC]);
905 be_abi_reg_map_set(reg_map, env->arch_env->sp, curr_sp);
906 be_abi_reg_map_set(reg_map, env->arch_env->bp, curr_bp);
907 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_LR], curr_lr);
908 be_abi_reg_map_set(reg_map, &arm_gp_regs[REG_PC], curr_pc);
911 static const be_abi_callbacks_t arm_abi_callbacks = {
914 arm_get_between_type,
921 * Get the ABI restrictions for procedure calls.
922 * @param self The this pointer.
923 * @param method_type The type of the method (procedure) in question.
924 * @param abi The abi object to be modified
926 void arm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
930 int n = get_method_n_params(method_type);
931 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
934 /* set abi flags for calls */
935 call_flags.bits.left_to_right = 0;
936 call_flags.bits.store_args_sequential = 0;
937 /* call_flags.bits.try_omit_fp don't change this we can handle both */
938 call_flags.bits.fp_free = 0;
939 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
941 /* set stack parameter passing style */
942 be_abi_call_set_flags(abi, call_flags, &arm_abi_callbacks);
944 for (i = 0; i < n; i++) {
945 /* reg = get reg for param i; */
946 /* be_abi_call_param_reg(abi, i, reg); */
948 be_abi_call_param_reg(abi, i, arm_get_RegParam_reg(i));
950 tp = get_method_param_type(method_type, i);
951 mode = get_type_mode(tp);
952 be_abi_call_param_stack(abi, i, mode, 4, 0, 0);
956 /* set return registers */
957 n = get_method_n_ress(method_type);
959 assert(n <= 2 && "more than two results not supported");
961 /* In case of 64bit returns, we will have two 32bit values */
963 tp = get_method_res_type(method_type, 0);
964 mode = get_type_mode(tp);
966 assert(!mode_is_float(mode) && "two FP results not supported");
968 tp = get_method_res_type(method_type, 1);
969 mode = get_type_mode(tp);
971 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
973 be_abi_call_res_reg(abi, 0, &arm_gp_regs[REG_R0]);
974 be_abi_call_res_reg(abi, 1, &arm_gp_regs[REG_R1]);
976 const arch_register_t *reg;
978 tp = get_method_res_type(method_type, 0);
979 assert(is_atomic_type(tp));
980 mode = get_type_mode(tp);
982 reg = mode_is_float(mode) ? &arm_fpa_regs[REG_F0] : &arm_gp_regs[REG_R0];
983 be_abi_call_res_reg(abi, 0, reg);
987 int arm_to_appear_in_schedule(void *block_env, const ir_node *irn) {
996 * Initializes the code generator interface.
998 static const arch_code_generator_if_t *arm_get_code_generator_if(void *self) {
1000 return &arm_code_gen_if;
1003 list_sched_selector_t arm_sched_selector;
1006 * Returns the reg_pressure scheduler with to_appear_in_schedule() over\loaded
1008 static const list_sched_selector_t *arm_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1010 memcpy(&arm_sched_selector, selector, sizeof(arm_sched_selector));
1011 /* arm_sched_selector.exectime = arm_sched_exectime; */
1012 arm_sched_selector.to_appear_in_schedule = arm_to_appear_in_schedule;
1013 return &arm_sched_selector;
1017 static const ilp_sched_selector_t *arm_get_ilp_sched_selector(const void *self) {
1023 * Returns the necessary byte alignment for storing a register of given class.
1025 static int arm_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1028 /* ARM is a 32 bit CPU, no need for other alignment */
1032 static const be_execution_unit_t ***arm_get_allowed_execution_units(const void *self, const ir_node *irn) {
1036 panic("Unimplemented arm_get_allowed_execution_units()");
1039 static const be_machine_t *arm_get_machine(const void *self) {
1042 panic("Unimplemented arm_get_machine()");
1046 * Return irp irgs in the desired order.
1048 static ir_graph **arm_get_irg_list(const void *self, ir_graph ***irg_list) {
1055 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
1056 * @return 1 if allowed, 0 otherwise
1058 static int arm_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j) {
1059 ir_node *cmp, *cmp_a, *phi;
1063 /* currently Psi support is not implemented */
1066 /* we don't want long long Psi */
1067 #define IS_BAD_PSI_MODE(mode) (!mode_is_float(mode) && get_mode_size_bits(mode) > 32)
1069 if (get_irn_mode(sel) != mode_b)
1072 cmp = get_Proj_pred(sel);
1073 cmp_a = get_Cmp_left(cmp);
1074 mode = get_irn_mode(cmp_a);
1076 if (IS_BAD_PSI_MODE(mode))
1079 /* check the Phi nodes */
1080 for (phi = phi_list; phi; phi = get_irn_link(phi)) {
1081 ir_node *pred_i = get_irn_n(phi, i);
1082 ir_node *pred_j = get_irn_n(phi, j);
1083 ir_mode *mode_i = get_irn_mode(pred_i);
1084 ir_mode *mode_j = get_irn_mode(pred_j);
1086 if (IS_BAD_PSI_MODE(mode_i) || IS_BAD_PSI_MODE(mode_j))
1090 #undef IS_BAD_PSI_MODE
1095 static asm_constraint_flags_t arm_parse_asm_constraint(const void *self, const char **c)
1097 /* asm not supported */
1100 return ASM_CONSTRAINT_FLAG_INVALID;
1103 static int arm_is_valid_clobber(const void *self, const char *clobber)
1111 * Returns the libFirm configuration parameter for this backend.
1113 static const backend_params *arm_get_libfirm_params(void) {
1114 static const ir_settings_if_conv_t ifconv = {
1115 4, /* maxdepth, doesn't matter for Psi-conversion */
1116 arm_is_psi_allowed /* allows or disallows Psi creation for given selector */
1118 static ir_settings_arch_dep_t ad = {
1120 1, /* Muls are fast enough on ARM but ... */
1121 31, /* ... one shift would be possible better */
1122 NULL, /* no evaluator function */
1123 0, /* SMUL is needed, only in Arch M */
1124 0, /* UMUL is needed, only in Arch M */
1125 32, /* SMUL & UMUL available for 32 bit */
1127 static backend_params p = {
1128 1, /* need dword lowering */
1129 0, /* don't support inline assembler yet */
1130 NULL, /* will be set later */
1131 NULL, /* but yet no creator function */
1132 NULL, /* context for create_intrinsic_fkt */
1133 NULL, /* ifconv_info will be set below */
1134 NULL, /* float arithmetic mode (TODO) */
1135 0, /* no trampoline support: size 0 */
1136 0, /* no trampoline support: align 0 */
1137 NULL, /* no trampoline support: no trampoline builder */
1138 4 /* alignment of stack parameter */
1142 p.if_conv_info = &ifconv;
1146 /* fpu set architectures. */
1147 static const lc_opt_enum_int_items_t arm_fpu_items[] = {
1148 { "softfloat", ARM_FPU_ARCH_SOFTFLOAT },
1149 { "fpe", ARM_FPU_ARCH_FPE },
1150 { "fpa", ARM_FPU_ARCH_FPA },
1151 { "vfp1xd", ARM_FPU_ARCH_VFP_V1xD },
1152 { "vfp1", ARM_FPU_ARCH_VFP_V1 },
1153 { "vfp2", ARM_FPU_ARCH_VFP_V2 },
1157 static lc_opt_enum_int_var_t arch_fpu_var = {
1158 &arm_isa_template.fpu_arch, arm_fpu_items
1161 static const lc_opt_table_entry_t arm_options[] = {
1162 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
1163 LC_OPT_ENT_BOOL("gen_reg_names", "use generic register names", &arm_isa_template.gen_reg_names),
1167 const arch_isa_if_t arm_isa_if = {
1170 NULL, /* handle_intrinsics */
1171 arm_get_n_reg_class,
1173 arm_get_reg_class_for_mode,
1175 arm_get_code_generator_if,
1176 arm_get_list_sched_selector,
1177 arm_get_ilp_sched_selector,
1178 arm_get_reg_class_alignment,
1179 arm_get_libfirm_params,
1180 arm_get_allowed_execution_units,
1183 NULL, /* mark remat */
1184 arm_parse_asm_constraint,
1185 arm_is_valid_clobber
1188 void be_init_arch_arm(void)
1190 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1191 lc_opt_entry_t *arm_grp = lc_opt_get_grp(be_grp, "arm");
1193 lc_opt_add_table(arm_grp, arm_options);
1195 be_register_isa_if("arm", &arm_isa_if);
1197 arm_init_transform();
1201 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_arm);