4 * @author Sebastian Hack
19 #include "irgraph_t.h"
22 #include "iredges_t.h"
25 #include "irprintf_t.h"
34 #include "besched_t.h"
36 #define MAX(x, y) ((x) > (y) ? (x) : (y))
37 #define MIN(x, y) ((x) < (y) ? (x) : (y))
39 typedef struct _be_abi_call_arg_t {
40 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
41 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
42 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
45 const arch_register_t *reg;
48 unsigned space_before;
52 struct _be_abi_call_t {
53 be_abi_call_flags_t flags;
54 const be_abi_callbacks_t *cb;
55 ir_type *between_type;
59 #define N_FRAME_TYPES 3
62 * This type describes the stack layout.
63 * The stack is divided into 3 parts:
64 * - arg_type: A struct type describing the stack arguments and it's order.
65 * - between_type: A struct type describing the stack layout between arguments
67 * - frame_type: A class type descibing the frame layout
69 typedef struct _be_stack_layout_t {
70 ir_type *arg_type; /**< A type describing the stack argument layout. */
71 ir_type *between_type; /**< A type describing the "between" layout. */
72 ir_type *frame_type; /**< The frame type. */
74 ir_type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
77 int stack_dir; /**< -1 for decreasing, 1 for increasing. */
80 struct _be_abi_irg_t {
82 be_stack_layout_t *frame; /**< The stack frame model. */
83 const be_irg_t *birg; /**< The back end IRG. */
84 const arch_isa_t *isa; /**< The isa. */
85 survive_dce_t *dce_survivor;
87 be_abi_call_t *call; /**< The ABI call information. */
88 ir_type *method_type; /**< The type of the method of the IRG. */
90 ir_node *init_sp; /**< The node representing the stack pointer
91 at the start of the function. */
93 ir_node *reg_params; /**< The reg params node. */
94 pmap *regs; /**< A map of all callee-save and ignore regs to
95 their Projs to the RegParams node. */
97 pset *stack_phis; /**< The set of all Phi nodes inserted due to
98 stack pointer modifying nodes. */
100 int start_block_bias; /**< The stack bias at the end of the start block. */
102 void *cb; /**< ABI Callback self pointer. */
104 pmap *keep_map; /**< mapping blocks to keep nodes. */
105 pset *ignore_regs; /**< Additional registers which shall be ignored. */
107 arch_irn_handler_t irn_handler;
108 arch_irn_ops_t irn_ops;
109 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
112 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
113 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
115 /* Forward, since be need it in be_abi_introduce(). */
116 static const arch_irn_ops_if_t abi_irn_ops;
117 static const arch_irn_handler_t abi_irn_handler;
119 /* Flag: if set, try to omit the frame pointer if called by the backend */
123 _ ____ ___ ____ _ _ _ _
124 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
125 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
126 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
127 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
129 These callbacks are used by the backend to set the parameters
130 for a specific call type.
134 * Set compare function: compares two ABI call object arguments.
136 static int cmp_call_arg(const void *a, const void *b, size_t n)
138 const be_abi_call_arg_t *p = a, *q = b;
139 return !(p->is_res == q->is_res && p->pos == q->pos);
143 * Get or set an ABI call object argument.
145 * @param call the abi call
146 * @param is_res true for call results, false for call arguments
147 * @param pos position of the argument
148 * @param do_insert true if the argument is set, false if it's retrieved
150 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
152 be_abi_call_arg_t arg;
155 memset(&arg, 0, sizeof(arg));
159 hash = is_res * 128 + pos;
162 ? set_insert(call->params, &arg, sizeof(arg), hash)
163 : set_find(call->params, &arg, sizeof(arg), hash);
167 * Retrieve an ABI call object argument.
169 * @param call the ABI call object
170 * @param is_res true for call results, false for call arguments
171 * @param pos position of the argument
173 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
175 return get_or_set_call_arg(call, is_res, pos, 0);
178 /* Set the flags for a call. */
179 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
185 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
187 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
189 arg->alignment = alignment;
190 arg->space_before = space_before;
191 arg->space_after = space_after;
192 assert(alignment > 0 && "Alignment must be greater than 0");
195 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
197 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
202 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
204 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
209 /* Get the flags of a ABI call object. */
210 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
216 * Constructor for a new ABI call object.
218 * @return the new ABI call object
220 static be_abi_call_t *be_abi_call_new()
222 be_abi_call_t *call = xmalloc(sizeof(call[0]));
224 call->params = new_set(cmp_call_arg, 16);
227 call->flags.bits.try_omit_fp = be_omit_fp;
232 * Destructor for an ABI call object.
234 static void be_abi_call_free(be_abi_call_t *call)
236 del_set(call->params);
242 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
243 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
244 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
245 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
248 Handling of the stack frame. It is composed of three types:
249 1) The type of the arguments which are pushed on the stack.
250 2) The "between type" which consists of stuff the call of the
251 function pushes on the stack (like the return address and
252 the old base pointer for ia32).
253 3) The Firm frame type which consists of all local variables
257 static int get_stack_entity_offset(be_stack_layout_t *frame, entity *ent, int bias)
259 ir_type *t = get_entity_owner(ent);
260 int ofs = get_entity_offset_bytes(ent);
264 /* Find the type the entity is contained in. */
265 for(index = 0; index < N_FRAME_TYPES; ++index) {
266 if(frame->order[index] == t)
270 /* Add the size of all the types below the one of the entity to the entity's offset */
271 for(i = 0; i < index; ++i)
272 ofs += get_type_size_bytes(frame->order[i]);
274 /* correct the offset by the initial position of the frame pointer */
275 ofs -= frame->initial_offset;
277 /* correct the offset with the current bias. */
284 * Retrieve the entity with given offset from a frame type.
286 static entity *search_ent_with_offset(ir_type *t, int offset)
290 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
291 entity *ent = get_compound_member(t, i);
292 if(get_entity_offset_bytes(ent) == offset)
299 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
301 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
302 entity *ent = search_ent_with_offset(base, 0);
303 frame->initial_offset = 0;
304 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
305 return frame->initial_offset;
309 * Initializes the frame layout from parts
311 * @param frame the stack layout that will be initialized
312 * @param args the stack argument layout type
313 * @param between the between layout type
314 * @param locals the method frame type
315 * @param stack_dir the stack direction
317 * @return the initialized stack layout
319 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
320 ir_type *between, ir_type *locals, int stack_dir)
322 frame->arg_type = args;
323 frame->between_type = between;
324 frame->frame_type = locals;
325 frame->initial_offset = 0;
326 frame->stack_dir = stack_dir;
327 frame->order[1] = between;
330 frame->order[0] = args;
331 frame->order[2] = locals;
334 frame->order[0] = locals;
335 frame->order[2] = args;
340 /** Dumps the stack layout to file. */
341 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
345 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
346 for (j = 0; j < N_FRAME_TYPES; ++j) {
347 ir_type *t = frame->order[j];
349 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
350 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
351 entity *ent = get_compound_member(t, i);
352 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
358 * Returns non-zero if the call argument at given position
359 * is transfered on the stack.
361 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
363 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
364 return arg && !arg->in_reg;
374 Adjustment of the calls inside a graph.
379 * Transform a call node.
380 * @param env The ABI environment for the current irg.
381 * @param irn The call node.
382 * @param curr_sp The stack pointer node to use.
383 * @return The stack pointer after the call.
385 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
387 ir_graph *irg = env->birg->irg;
388 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
389 be_abi_call_t *call = be_abi_call_new();
390 ir_type *mt = get_Call_type(irn);
391 ir_node *call_ptr = get_Call_ptr(irn);
392 int n_params = get_method_n_params(mt);
393 ir_node *curr_mem = get_Call_mem(irn);
394 ir_node *bl = get_nodes_block(irn);
395 pset *results = pset_new_ptr(8);
396 pset *caller_save = pset_new_ptr(8);
398 int stack_dir = arch_isa_stack_dir(isa);
399 const arch_register_t *sp = arch_isa_sp(isa);
400 ir_mode *mach_mode = sp->reg_class->mode;
401 struct obstack *obst = &env->obst;
402 ir_node *no_mem = get_irg_no_mem(irg);
403 int no_alloc = call->flags.bits.frame_is_setup_on_call;
405 ir_node *res_proj = NULL;
406 int curr_res_proj = pn_Call_max;
413 const ir_edge_t *edge;
418 /* Let the isa fill out the abi description for that call node. */
419 arch_isa_get_call_abi(isa, mt, call);
421 /* Insert code to put the stack arguments on the stack. */
422 assert(get_Call_n_params(irn) == n_params);
423 for(i = 0; i < n_params; ++i) {
424 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
427 stack_size += arg->space_before;
428 stack_size = round_up2(stack_size, arg->alignment);
429 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
430 stack_size += arg->space_after;
431 obstack_int_grow(obst, i);
435 pos = obstack_finish(obst);
437 /* Collect all arguments which are passed in registers. */
438 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
439 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
440 if(arg && arg->in_reg) {
441 obstack_int_grow(obst, i);
445 low_args = obstack_finish(obst);
447 /* If there are some parameters which shall be passed on the stack. */
450 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
453 * Reverse list of stack parameters if call arguments are from left to right.
454 * We must them reverse again in they are pushed (not stored) and the stack
455 * direction is downwards.
457 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
458 for(i = 0; i < n_pos >> 1; ++i) {
459 int other = n_pos - i - 1;
467 * If the stack is decreasing and we do not want to store sequentially,
468 * or someone else allocated the call frame
469 * we allocate as much space on the stack all parameters need, by
470 * moving the stack pointer along the stack's direction.
472 if(stack_dir < 0 && !do_seq && !no_alloc) {
473 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
476 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
477 for(i = 0; i < n_pos; ++i) {
479 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
480 ir_node *param = get_Call_param(irn, p);
481 ir_node *addr = curr_sp;
483 ir_type *param_type = get_method_param_type(mt, p);
484 int param_size = get_type_size_bytes(param_type) + arg->space_after;
487 * If we wanted to build the arguments sequentially,
488 * the stack pointer for the next must be incremented,
489 * and the memory value propagated.
493 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
494 param_size + arg->space_before, be_stack_dir_expand);
497 curr_ofs += arg->space_before;
498 curr_ofs = round_up2(curr_ofs, arg->alignment);
500 /* Make the expression to compute the argument's offset. */
502 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
503 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
507 /* Insert a store for primitive arguments. */
508 if (is_atomic_type(param_type)) {
509 mem = new_r_Store(irg, bl, curr_mem, addr, param);
510 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
513 /* Make a mem copy for compound arguments. */
515 assert(mode_is_reference(get_irn_mode(param)));
516 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
517 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
520 curr_ofs += param_size;
525 obstack_ptr_grow(obst, mem);
528 in = (ir_node **) obstack_finish(obst);
530 /* We need the sync only, if we didn't build the stores sequentially. */
532 curr_mem = new_r_Sync(irg, bl, n_pos, in);
533 obstack_free(obst, in);
536 /* Collect caller save registers */
537 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
539 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
540 for(j = 0; j < cls->n_regs; ++j) {
541 const arch_register_t *reg = arch_register_for_index(cls, j);
542 if(arch_register_type_is(reg, caller_save))
543 pset_insert_ptr(caller_save, (void *) reg);
547 /* search the greatest result proj number */
549 /* TODO: what if the result is NOT used? Currently there is
550 * no way to detect this later, especially there is no way to
551 * see this in the proj numbers.
552 * While this is ok for the register allocator, it is bad for
553 * backends which need to change the be_Call further (x87 simulator
554 * for instance. However for this particular case the call_type is
557 foreach_out_edge(irn, edge) {
558 const ir_edge_t *res_edge;
559 ir_node *irn = get_edge_src_irn(edge);
561 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
563 foreach_out_edge(irn, res_edge) {
565 be_abi_call_arg_t *arg;
566 ir_node *res = get_edge_src_irn(res_edge);
568 assert(is_Proj(res));
570 proj = get_Proj_proj(res);
571 arg = get_call_arg(call, 1, proj);
574 shift the proj number to the right, since we will drop the
575 unspeakable Proj_T from the Call. Therefore, all real argument
576 Proj numbers must be increased by pn_be_Call_first_res
578 proj += pn_be_Call_first_res;
579 set_Proj_proj(res, proj);
580 obstack_ptr_grow(obst, res);
582 if(proj > curr_res_proj)
583 curr_res_proj = proj;
585 pset_remove_ptr(caller_save, arg->reg);
586 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
593 obstack_ptr_grow(obst, NULL);
594 res_projs = obstack_finish(obst);
596 /* make the back end call node and set its register requirements. */
597 for(i = 0; i < n_low_args; ++i)
598 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
600 in = obstack_finish(obst);
602 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
603 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
604 curr_res_proj + pset_count(caller_save), n_low_args, in,
606 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
610 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
611 curr_res_proj + pset_count(caller_save), n_low_args, in,
616 Set the register class of the call address to the same as the stack pointer's.
617 That' probably buggy for some architectures.
619 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
621 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
623 /* Set the register classes and constraints of the Call parameters. */
624 for(i = 0; i < n_low_args; ++i) {
625 int index = low_args[i];
626 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
627 assert(arg->reg != NULL);
629 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
632 /* Set the register constraints of the results. */
633 for(i = 0; res_projs[i]; ++i) {
634 ir_node *irn = res_projs[i];
635 int proj = get_Proj_proj(irn);
637 /* Correct Proj number since it has been adjusted! (see above) */
638 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
641 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
643 obstack_free(obst, in);
644 exchange(irn, low_call);
646 /* redirect the result projs to the lowered call instead of the Proj_T */
647 for(i = 0; res_projs[i]; ++i)
648 set_Proj_pred(res_projs[i], low_call);
650 /* Make additional projs for the caller save registers
651 and the Keep node which keeps them alive. */
652 if(pset_count(caller_save) > 0) {
653 const arch_register_t *reg;
657 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
658 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
660 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
661 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
662 set_irn_link(proj, (void *) reg);
663 obstack_ptr_grow(obst, proj);
667 in = (ir_node **) obstack_finish(obst);
668 keep = be_new_Keep(NULL, irg, bl, n, in);
669 for(i = 0; i < n; ++i) {
670 const arch_register_t *reg = get_irn_link(in[i]);
671 be_node_set_reg_class(keep, i, reg->reg_class);
673 obstack_free(obst, in);
676 /* Clean up the stack. */
678 ir_node *mem_proj = NULL;
680 foreach_out_edge(low_call, edge) {
681 ir_node *irn = get_edge_src_irn(edge);
682 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
689 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
691 /* Clean up the stack frame if we allocated it */
693 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
696 be_abi_call_free(call);
697 obstack_free(obst, pos);
699 del_pset(caller_save);
706 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
708 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
710 if (get_Alloc_where(alloc) == stack_alloc) {
711 ir_node *bl = get_nodes_block(alloc);
712 ir_graph *irg = get_irn_irg(bl);
713 ir_node *alloc_mem = NULL;
714 ir_node *alloc_res = NULL;
716 const ir_edge_t *edge;
719 foreach_out_edge(alloc, edge) {
720 ir_node *irn = get_edge_src_irn(edge);
722 assert(is_Proj(irn));
723 switch(get_Proj_proj(irn)) {
735 /* Beware: currently Alloc nodes without a result might happen,
736 only escape analysis kills them and this phase runs only for object
737 oriented source. We kill the Alloc here. */
738 if (alloc_res == NULL) {
739 exchange(alloc_mem, get_Alloc_mem(alloc));
743 /* The stack pointer will be modified in an unknown manner.
744 We cannot omit it. */
745 env->call->flags.bits.try_omit_fp = 0;
746 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
748 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
750 if(alloc_mem != NULL)
751 exchange(alloc_mem, new_r_NoMem(irg));
760 * Walker for dependent_on().
761 * This function searches a node tgt recursively from a given node
762 * but is restricted to the given block.
763 * @return 1 if tgt was reachable from curr, 0 if not.
765 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
769 if (get_nodes_block(curr) != bl)
775 /* Phi functions stop the recursion inside a basic block */
776 if (! is_Phi(curr)) {
777 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
778 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
787 * Check if a node is somehow data dependent on another one.
788 * both nodes must be in the same basic block.
789 * @param n1 The first node.
790 * @param n2 The second node.
791 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
793 static int dependent_on(ir_node *n1, ir_node *n2)
795 ir_node *bl = get_nodes_block(n1);
796 ir_graph *irg = get_irn_irg(bl);
798 assert(bl == get_nodes_block(n2));
799 return check_dependence(n1, n2, bl);
802 static int cmp_call_dependecy(const void *c1, const void *c2)
804 ir_node *n1 = *(ir_node **) c1;
805 ir_node *n2 = *(ir_node **) c2;
808 Classical qsort() comparison function behavior:
809 0 if both elements are equal
810 1 if second is "smaller" that first
811 -1 if first is "smaller" that second
813 if (dependent_on(n1, n2))
816 if (dependent_on(n2, n1))
823 * Walker: links all Call nodes to the Block they are contained.
825 static void link_calls_in_block_walker(ir_node *irn, void *data)
828 be_abi_irg_t *env = data;
829 ir_node *bl = get_nodes_block(irn);
830 void *save = get_irn_link(bl);
832 env->call->flags.bits.irg_is_leaf = 0;
834 set_irn_link(irn, save);
835 set_irn_link(bl, irn);
841 * Process all Call nodes inside a basic block.
842 * Note that the link field of the block must contain a linked list of all
843 * Call nodes inside the Block. We first order this list according to data dependency
844 * and that connect the calls together.
846 static void process_calls_in_block(ir_node *bl, void *data)
848 be_abi_irg_t *env = data;
849 ir_node *curr_sp = env->init_sp;
853 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
854 obstack_ptr_grow(&env->obst, irn);
856 /* If there were call nodes in the block. */
862 nodes = obstack_finish(&env->obst);
864 /* order the call nodes according to data dependency */
865 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
867 for(i = n - 1; i >= 0; --i) {
868 ir_node *irn = nodes[i];
870 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
871 switch(get_irn_opcode(irn)) {
873 curr_sp = adjust_call(env, irn, curr_sp);
876 curr_sp = adjust_alloc(env, irn, curr_sp);
883 obstack_free(&env->obst, nodes);
885 /* Keep the last stack state in the block by tying it to Keep node */
887 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
888 pmap_insert(env->keep_map, bl, keep);
891 set_irn_link(bl, curr_sp);
895 * Adjust all call nodes in the graph to the ABI conventions.
897 static void process_calls(be_abi_irg_t *env)
899 ir_graph *irg = env->birg->irg;
901 env->call->flags.bits.irg_is_leaf = 1;
902 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
903 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
906 static void collect_return_walker(ir_node *irn, void *data)
908 if(get_irn_opcode(irn) == iro_Return) {
909 struct obstack *obst = data;
910 obstack_ptr_grow(obst, irn);
915 static ir_node *setup_frame(be_abi_irg_t *env)
917 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
918 const arch_register_t *sp = isa->sp;
919 const arch_register_t *bp = isa->bp;
920 be_abi_call_flags_bits_t flags = env->call->flags.bits;
921 ir_graph *irg = env->birg->irg;
922 ir_node *bl = get_irg_start_block(irg);
923 ir_node *no_mem = get_irg_no_mem(irg);
924 ir_node *old_frame = get_irg_frame(irg);
925 ir_node *stack = pmap_get(env->regs, (void *) sp);
926 ir_node *frame = pmap_get(env->regs, (void *) bp);
928 int stack_nr = get_Proj_proj(stack);
930 if(flags.try_omit_fp) {
931 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
936 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
938 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
940 be_set_constr_single_reg(frame, -1, bp);
941 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
942 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
945 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
948 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
949 env->init_sp = stack;
950 set_irg_frame(irg, frame);
951 edges_reroute(old_frame, frame, irg);
956 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
958 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
959 const arch_register_t *sp = isa->sp;
960 const arch_register_t *bp = isa->bp;
961 ir_graph *irg = env->birg->irg;
962 ir_node *ret_mem = get_Return_mem(ret);
963 ir_node *frame = get_irg_frame(irg);
964 ir_node *bl = get_nodes_block(ret);
965 ir_node *stack = get_irn_link(bl);
969 if(env->call->flags.bits.try_omit_fp) {
970 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
974 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
975 be_set_constr_single_reg(stack, -1, sp);
976 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
979 pmap_foreach(env->regs, ent) {
980 const arch_register_t *reg = ent->key;
981 ir_node *irn = ent->value;
984 obstack_ptr_grow(&env->obst, stack);
986 obstack_ptr_grow(&env->obst, frame);
987 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
988 obstack_ptr_grow(obst, irn);
995 * Computes the stack argument layout type.
996 * Changes a possibly allocated value param type by moving
997 * entities to the stack layout type.
999 * @param env the ABI environment
1000 * @param call the current call ABI
1001 * @param method_type the method type
1003 * @return the stack argument layout type
1005 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
1007 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1008 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1009 int n = get_method_n_params(method_type);
1010 int curr = inc > 0 ? 0 : n - 1;
1016 ir_type *val_param_tp = get_method_value_param_type(method_type);
1017 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1019 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1020 for (i = 0; i < n; ++i, curr += inc) {
1021 ir_type *param_type = get_method_param_type(method_type, curr);
1022 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1024 if (arg->on_stack) {
1026 /* the entity was already created, move it to the param type */
1027 arg->stack_ent = get_method_value_param_ent(method_type, i);
1028 remove_struct_member(val_param_tp, arg->stack_ent);
1029 set_entity_owner(arg->stack_ent, res);
1030 add_struct_member(res, arg->stack_ent);
1031 /* must be automatic to set a fixed layout */
1032 set_entity_allocation(arg->stack_ent, allocation_automatic);
1035 snprintf(buf, sizeof(buf), "param_%d", i);
1036 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1038 ofs += arg->space_before;
1039 ofs = round_up2(ofs, arg->alignment);
1040 set_entity_offset_bytes(arg->stack_ent, ofs);
1041 ofs += arg->space_after;
1042 ofs += get_type_size_bytes(param_type);
1045 set_type_size_bytes(res, ofs);
1046 set_type_state(res, layout_fixed);
1050 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1053 struct obstack obst;
1055 obstack_init(&obst);
1057 /* Create a Perm after the RegParams node to delimit it. */
1058 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1059 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1064 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1065 const arch_register_t *reg = &cls->regs[j];
1066 ir_node *irn = pmap_get(regs, (void *) reg);
1068 if(irn && !arch_register_type_is(reg, ignore)) {
1070 obstack_ptr_grow(&obst, irn);
1071 set_irn_link(irn, (void *) reg);
1075 obstack_ptr_grow(&obst, NULL);
1076 in = obstack_finish(&obst);
1078 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1079 for(j = 0; j < n_regs; ++j) {
1080 ir_node *arg = in[j];
1081 arch_register_t *reg = get_irn_link(arg);
1082 pmap_insert(regs, reg, arg);
1083 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1086 obstack_free(&obst, in);
1089 obstack_free(&obst, NULL);
1093 const arch_register_t *reg;
1097 static int cmp_regs(const void *a, const void *b)
1099 const reg_node_map_t *p = a;
1100 const reg_node_map_t *q = b;
1102 if(p->reg->reg_class == q->reg->reg_class)
1103 return p->reg->index - q->reg->index;
1105 return p->reg->reg_class - q->reg->reg_class;
1108 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1111 int n = pmap_count(reg_map);
1113 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1115 pmap_foreach(reg_map, ent) {
1116 res[i].reg = ent->key;
1117 res[i].irn = ent->value;
1121 qsort(res, n, sizeof(res[0]), cmp_regs);
1126 * Creates a barrier.
1128 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1130 ir_graph *irg = env->birg->irg;
1131 int n_regs = pmap_count(regs);
1137 rm = reg_map_to_arr(&env->obst, regs);
1139 for(n = 0; n < n_regs; ++n)
1140 obstack_ptr_grow(&env->obst, rm[n].irn);
1143 obstack_ptr_grow(&env->obst, *mem);
1147 in = (ir_node **) obstack_finish(&env->obst);
1148 irn = be_new_Barrier(irg, bl, n, in);
1149 obstack_free(&env->obst, in);
1151 for(n = 0; n < n_regs; ++n) {
1152 const arch_register_t *reg = rm[n].reg;
1154 int pos = BE_OUT_POS(n);
1157 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1158 be_node_set_reg_class(irn, n, reg->reg_class);
1160 be_set_constr_single_reg(irn, n, reg);
1161 be_set_constr_single_reg(irn, pos, reg);
1162 be_node_set_reg_class(irn, pos, reg->reg_class);
1163 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1165 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1166 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1167 flags |= arch_irn_flags_ignore;
1169 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1170 flags |= arch_irn_flags_modify_sp;
1172 be_node_set_flags(irn, pos, flags);
1174 pmap_insert(regs, (void *) reg, proj);
1178 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1181 obstack_free(&env->obst, rm);
1186 * Creates a be_Return for a Return node.
1188 * @param @env the abi environment
1189 * @param irn the Return node or NULL if there was none
1190 * @param bl the block where the be_Retun should be placed
1191 * @param mem the current memory
1192 * @param n_res number of return results
1194 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1195 be_abi_call_t *call = env->call;
1196 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1198 pmap *reg_map = pmap_create();
1199 ir_node *keep = pmap_get(env->keep_map, bl);
1205 const arch_register_t **regs;
1209 get the valid stack node in this block.
1210 If we had a call in that block there is a Keep constructed by process_calls()
1211 which points to the last stack modification in that block. we'll use
1212 it then. Else we use the stack from the start block and let
1213 the ssa construction fix the usage.
1215 stack = be_abi_reg_map_get(env->regs, isa->sp);
1217 ir_node *bad = new_r_Bad(env->birg->irg);
1218 stack = get_irn_n(keep, 0);
1219 set_nodes_block(keep, bad);
1220 set_irn_n(keep, 0, bad);
1221 // exchange(keep, new_r_Bad(env->birg->irg));
1224 /* Insert results for Return into the register map. */
1225 for(i = 0; i < n_res; ++i) {
1226 ir_node *res = get_Return_res(irn, i);
1227 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1228 assert(arg->in_reg && "return value must be passed in register");
1229 pmap_insert(reg_map, (void *) arg->reg, res);
1232 /* Add uses of the callee save registers. */
1233 pmap_foreach(env->regs, ent) {
1234 const arch_register_t *reg = ent->key;
1235 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1236 pmap_insert(reg_map, ent->key, ent->value);
1239 be_abi_reg_map_set(reg_map, isa->sp, stack);
1241 /* Make the Epilogue node and call the arch's epilogue maker. */
1242 create_barrier(env, bl, &mem, reg_map, 1);
1243 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1246 Maximum size of the in array for Return nodes is
1247 return args + callee save/ignore registers + memory + stack pointer
1249 in_max = pmap_count(reg_map) + n_res + 2;
1251 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1252 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1255 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1260 /* clear SP entry, since it has already been grown. */
1261 pmap_insert(reg_map, (void *) isa->sp, NULL);
1262 for(i = 0; i < n_res; ++i) {
1263 ir_node *res = get_Return_res(irn, i);
1264 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1266 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1267 regs[n++] = arg->reg;
1269 /* Clear the map entry to mark the register as processed. */
1270 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1273 /* grow the rest of the stuff. */
1274 pmap_foreach(reg_map, ent) {
1277 regs[n++] = ent->key;
1281 /* The in array for the new back end return is now ready. */
1282 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1284 /* Set the register classes of the return's parameter accordingly. */
1285 for(i = 0; i < n; ++i)
1287 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1289 /* Free the space of the Epilog's in array and the register <-> proj map. */
1290 obstack_free(&env->obst, in);
1291 pmap_destroy(reg_map);
1296 typedef struct lower_frame_sels_env_t {
1298 entity *value_param_list; /**< the list of all value param antities */
1299 } lower_frame_sels_env_t;
1302 * Walker: Replaces Sels of frame type and
1303 * value param type entities by FrameAddress.
1305 static void lower_frame_sels_walker(ir_node *irn, void *data)
1307 lower_frame_sels_env_t *ctx = data;
1310 ir_graph *irg = current_ir_graph;
1311 ir_node *frame = get_irg_frame(irg);
1312 ir_node *param_base = get_irg_value_param_base(irg);
1313 ir_node *ptr = get_Sel_ptr(irn);
1315 if (ptr == frame || ptr == param_base) {
1316 be_abi_irg_t *env = ctx->env;
1317 entity *ent = get_Sel_entity(irn);
1318 ir_node *bl = get_nodes_block(irn);
1321 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1324 if (ptr == param_base) {
1325 set_entity_link(ent, ctx->value_param_list);
1326 ctx->value_param_list = ent;
1333 * Check if a value parameter is transmitted as a register.
1334 * This might happen if the address of an parameter is taken which is
1335 * transmitted in registers.
1337 * Note that on some architectures this case must be handled specially
1338 * because the place of the backing store is determined by their ABI.
1340 * In the default case we move the entity to the frame type and create
1341 * a backing store into the first block.
1343 static void fix_address_of_parameter_access(be_abi_irg_t *env, entity *value_param_list) {
1344 be_abi_call_t *call = env->call;
1345 ir_graph *irg = env->birg->irg;
1346 entity *ent, *next_ent, *new_list;
1348 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1351 for (ent = value_param_list; ent; ent = next_ent) {
1352 int i = get_struct_member_index(get_entity_owner(ent), ent);
1353 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1355 next_ent = get_entity_link(ent);
1357 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1358 set_entity_link(ent, new_list);
1363 /* ok, change the graph */
1364 ir_node *start_bl = get_irg_start_block(irg);
1365 ir_node *first_bl = NULL;
1366 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1367 const ir_edge_t *edge;
1368 optimization_state_t state;
1371 foreach_block_succ(start_bl, edge) {
1372 ir_node *succ = get_edge_src_irn(edge);
1373 if (start_bl != succ) {
1379 /* we had already removed critical edges, so the following
1380 assertion should be always true. */
1381 assert(get_Block_n_cfgpreds(first_bl) == 1);
1383 /* now create backing stores */
1384 frame = get_irg_frame(irg);
1385 imem = get_irg_initial_mem(irg);
1387 save_optimization_state(&state);
1389 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1390 restore_optimization_state(&state);
1392 /* reroute all edges to the new memory source */
1393 edges_reroute(imem, nmem, irg);
1397 args = get_irg_args(irg);
1398 args_bl = get_nodes_block(args);
1399 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1400 int i = get_struct_member_index(get_entity_owner(ent), ent);
1401 ir_type *tp = get_entity_type(ent);
1402 ir_mode *mode = get_type_mode(tp);
1405 /* address for the backing store */
1406 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1409 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1411 /* the backing store itself */
1412 store = new_r_Store(irg, first_bl, mem, addr,
1413 new_r_Proj(irg, args_bl, args, mode, i));
1415 /* the new memory Proj gets the last Proj from store */
1416 set_Proj_pred(nmem, store);
1417 set_Proj_proj(nmem, pn_Store_M);
1419 /* move all entities to the frame type */
1420 frame_tp = get_irg_frame_type(irg);
1421 offset = get_type_size_bytes(frame_tp);
1422 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1423 ir_type *tp = get_entity_type(ent);
1424 int align = get_type_alignment_bytes(tp);
1426 offset += align - 1;
1428 set_entity_owner(ent, frame_tp);
1429 add_class_member(frame_tp, ent);
1430 /* must be automatic to set a fixed layout */
1431 set_entity_allocation(ent, allocation_automatic);
1432 set_entity_offset_bytes(ent, offset);
1433 offset += get_type_size_bytes(tp);
1435 set_type_size_bytes(frame_tp, offset);
1440 * Modify the irg itself and the frame type.
1442 static void modify_irg(be_abi_irg_t *env)
1444 be_abi_call_t *call = env->call;
1445 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1446 const arch_register_t *sp = arch_isa_sp(isa);
1447 ir_graph *irg = env->birg->irg;
1448 ir_node *bl = get_irg_start_block(irg);
1449 ir_node *end = get_irg_end_block(irg);
1450 ir_node *no_mem = get_irg_no_mem(irg);
1451 ir_node *mem = get_irg_initial_mem(irg);
1452 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1453 pset *dont_save = pset_new_ptr(8);
1459 const arch_register_t *fp_reg;
1460 ir_node *frame_pointer;
1462 ir_node *reg_params_bl;
1465 const ir_edge_t *edge;
1466 ir_type *arg_type, *bet_type;
1467 lower_frame_sels_env_t ctx;
1469 bitset_t *used_proj_nr;
1470 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1472 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1474 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1476 ctx.value_param_list = NULL;
1477 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1479 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1480 env->regs = pmap_create();
1482 used_proj_nr = bitset_alloca(1024);
1483 n_params = get_method_n_params(method_type);
1484 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1485 memset(args, 0, n_params * sizeof(args[0]));
1487 /* Check if a value parameter is transmitted as a register.
1488 * This might happen if the address of an parameter is taken which is
1489 * transmitted in registers.
1491 * Note that on some architectures this case must be handled specially
1492 * because the place of the backing store is determined by their ABI.
1494 * In the default case we move the entity to the frame type and create
1495 * a backing store into the first block.
1497 fix_address_of_parameter_access(env, ctx.value_param_list);
1499 /* Fill the argument vector */
1500 arg_tuple = get_irg_args(irg);
1501 foreach_out_edge(arg_tuple, edge) {
1502 ir_node *irn = get_edge_src_irn(edge);
1503 int nr = get_Proj_proj(irn);
1505 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1508 arg_type = compute_arg_type(env, call, method_type);
1509 bet_type = call->cb->get_between_type(env->cb);
1510 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1512 /* Count the register params and add them to the number of Projs for the RegParams node */
1513 for(i = 0; i < n_params; ++i) {
1514 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1515 if(arg->in_reg && args[i]) {
1516 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1517 assert(i == get_Proj_proj(args[i]));
1519 /* For now, associate the register with the old Proj from Start representing that argument. */
1520 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1521 bitset_set(used_proj_nr, i);
1522 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1526 /* Collect all callee-save registers */
1527 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1528 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1529 for(j = 0; j < cls->n_regs; ++j) {
1530 const arch_register_t *reg = &cls->regs[j];
1531 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1532 pmap_insert(env->regs, (void *) reg, NULL);
1536 pmap_insert(env->regs, (void *) sp, NULL);
1537 pmap_insert(env->regs, (void *) isa->bp, NULL);
1538 reg_params_bl = get_irg_start_block(irg);
1539 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1542 * make proj nodes for the callee save registers.
1543 * memorize them, since Return nodes get those as inputs.
1545 * Note, that if a register corresponds to an argument, the regs map contains
1546 * the old Proj from start for that argument.
1549 rm = reg_map_to_arr(&env->obst, env->regs);
1550 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1551 arch_register_t *reg = (void *) rm[i].reg;
1552 ir_node *arg_proj = rm[i].irn;
1553 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1555 int pos = BE_OUT_POS((int) nr);
1561 bitset_set(used_proj_nr, nr);
1562 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1563 pmap_insert(env->regs, (void *) reg, proj);
1564 be_set_constr_single_reg(env->reg_params, pos, reg);
1565 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1568 * If the register is an ignore register,
1569 * The Proj for that register shall also be ignored during register allocation.
1571 if(arch_register_type_is(reg, ignore))
1572 flags |= arch_irn_flags_ignore;
1575 flags |= arch_irn_flags_modify_sp;
1577 be_node_set_flags(env->reg_params, pos, flags);
1579 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1581 obstack_free(&env->obst, rm);
1583 /* Generate the Prologue */
1584 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1586 /* do the stack allocation BEFORE the barrier, or spill code
1587 might be added before it */
1588 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1589 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1590 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1592 barrier = create_barrier(env, bl, &mem, env->regs, 0);
1594 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1595 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1597 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1598 set_irg_frame(irg, frame_pointer);
1599 pset_insert_ptr(env->ignore_regs, fp_reg);
1601 /* Now, introduce stack param nodes for all parameters passed on the stack */
1602 for(i = 0; i < n_params; ++i) {
1603 ir_node *arg_proj = args[i];
1604 ir_node *repl = NULL;
1606 if(arg_proj != NULL) {
1607 be_abi_call_arg_t *arg;
1608 ir_type *param_type;
1609 int nr = get_Proj_proj(arg_proj);
1611 nr = MIN(nr, n_params);
1612 arg = get_call_arg(call, 0, nr);
1613 param_type = get_method_param_type(method_type, nr);
1616 repl = pmap_get(env->regs, (void *) arg->reg);
1619 else if(arg->on_stack) {
1620 /* For atomic parameters which are actually used, we create a StackParam node. */
1621 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1622 ir_mode *mode = get_type_mode(param_type);
1623 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1624 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1627 /* The stack parameter is not primitive (it is a struct or array),
1628 we thus will create a node representing the parameter's address
1631 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1635 assert(repl != NULL);
1636 edges_reroute(args[i], repl, irg);
1640 /* All Return nodes hang on the End node, so look for them there. */
1641 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1642 ir_node *irn = get_Block_cfgpred(end, i);
1644 if (is_Return(irn)) {
1645 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1649 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return than,
1650 the code is dead and will never be executed. */
1652 del_pset(dont_save);
1653 obstack_free(&env->obst, args);
1657 * Walker: puts all Alloc(stack_alloc) on a obstack
1659 static void collect_alloca_walker(ir_node *irn, void *data)
1661 be_abi_irg_t *env = data;
1662 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1663 obstack_ptr_grow(&env->obst, irn);
1666 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1668 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1669 ir_node *old_frame = get_irg_frame(birg->irg);
1670 ir_graph *irg = birg->irg;
1674 optimization_state_t state;
1676 obstack_init(&env->obst);
1678 env->isa = birg->main_env->arch_env->isa;
1679 env->method_type = get_entity_type(get_irg_entity(irg));
1680 env->call = be_abi_call_new();
1681 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1683 env->ignore_regs = pset_new_ptr_default();
1684 env->keep_map = pmap_create();
1685 env->dce_survivor = new_survive_dce();
1687 env->stack_phis = pset_new_ptr(16);
1688 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1689 to another Unknown or the stack pointer gets used */
1690 save_optimization_state(&state);
1692 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1693 restore_optimization_state(&state);
1694 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1696 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1698 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1699 env->irn_ops.impl = &abi_irn_ops;
1701 /* Lower all call nodes in the IRG. */
1704 /* Process the IRG */
1707 /* We don't need the keep map anymore. */
1708 pmap_destroy(env->keep_map);
1710 /* reroute the stack origin of the calls to the true stack origin. */
1711 edges_reroute(dummy, env->init_sp, irg);
1712 edges_reroute(old_frame, get_irg_frame(irg), irg);
1714 /* Make some important node pointers survive the dead node elimination. */
1715 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1716 pmap_foreach(env->regs, ent)
1717 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1719 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1721 env->call->cb->done(env->cb);
1726 void be_abi_free(be_abi_irg_t *env)
1728 free_survive_dce(env->dce_survivor);
1729 del_pset(env->stack_phis);
1730 del_pset(env->ignore_regs);
1731 pmap_destroy(env->regs);
1732 obstack_free(&env->obst, NULL);
1733 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1737 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
1739 arch_register_t *reg;
1741 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
1742 if(reg->reg_class == cls)
1743 bitset_set(bs, reg->index);
1750 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1751 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1752 | _| | |> < ___) | || (_| | (__| <
1753 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1757 struct fix_stack_walker_info {
1759 const arch_env_t *aenv;
1763 * Walker. Collect all stack modifying nodes.
1765 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1767 struct fix_stack_walker_info *info = data;
1769 if(arch_irn_is(info->aenv, irn, modify_sp))
1770 pset_insert_ptr(info->nodes, irn);
1773 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1775 dom_front_info_t *df;
1776 pset *stack_nodes = pset_new_ptr(16);
1777 struct fix_stack_walker_info info;
1779 info.nodes = stack_nodes;
1780 info.aenv = env->birg->main_env->arch_env;
1782 /* We need dominance frontiers for fix up */
1783 df = be_compute_dominance_frontiers(env->birg->irg);
1784 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
1785 pset_insert_ptr(stack_nodes, env->init_sp);
1786 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1787 del_pset(stack_nodes);
1789 /* Liveness could have changed due to Phi nodes. */
1790 be_liveness(env->birg->irg);
1792 /* free these dominance frontiers */
1793 be_free_dominance_frontiers(df);
1797 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1798 * into -1 or 1, respectively.
1799 * @param irn The node.
1800 * @return 1, if the direction of the IncSP was along, -1 if against.
1802 static int get_dir(ir_node *irn)
1804 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1807 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1809 const arch_env_t *aenv = env->birg->main_env->arch_env;
1810 int omit_fp = env->call->flags.bits.try_omit_fp;
1813 sched_foreach(bl, irn) {
1816 If the node modifies the stack pointer by a constant offset,
1817 record that in the bias.
1819 if(be_is_IncSP(irn)) {
1820 int ofs = be_get_IncSP_offset(irn);
1821 int dir = get_dir(irn);
1823 if(ofs == BE_STACK_FRAME_SIZE) {
1824 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1825 be_set_IncSP_offset(irn, ofs);
1833 Else check, if the node relates to an entity on the stack frame.
1834 If so, set the true offset (including the bias) for that
1838 entity *ent = arch_get_frame_entity(aenv, irn);
1840 int offset = get_stack_entity_offset(env->frame, ent, bias);
1841 arch_set_frame_offset(aenv, irn, offset);
1842 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1851 * A helper struct for the bias walker.
1854 be_abi_irg_t *env; /**< The ABI irg environment. */
1855 int start_block_bias; /**< The bias at the end of the start block. */
1856 ir_node *start_block; /**< The start block of the current graph. */
1860 * Block-Walker: fix all stack offsets
1862 static void stack_bias_walker(ir_node *bl, void *data)
1864 struct bias_walk *bw = data;
1865 if (bl != bw->start_block) {
1866 process_stack_bias(bw->env, bl, bw->start_block_bias);
1870 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1872 ir_graph *irg = env->birg->irg;
1873 struct bias_walk bw;
1875 stack_frame_compute_initial_offset(env->frame);
1876 // stack_layout_dump(stdout, env->frame);
1878 /* Determine the stack bias at the end of the start block. */
1879 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1881 /* fix the bias is all other blocks */
1883 bw.start_block = get_irg_start_block(irg);
1884 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1887 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1889 assert(arch_register_type_is(reg, callee_save));
1890 assert(pmap_contains(abi->regs, (void *) reg));
1891 return pmap_get(abi->regs, (void *) reg);
1895 _____ _____ _ _ _ _ _ _
1896 |_ _| __ \| \ | | | | | | | | |
1897 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1898 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1899 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1900 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1902 for Phi nodes which are created due to stack modifying nodes
1903 such as IncSP, AddSP and SetSP.
1905 These Phis are always to be ignored by the reg alloc and are
1906 fixed on the SP register of the ISA.
1909 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1911 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1912 const void *res = NULL;
1914 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1915 res = &abi->irn_ops;
1920 static void be_abi_limited(void *data, bitset_t *bs)
1922 be_abi_irg_t *abi = data;
1923 bitset_clear_all(bs);
1924 bitset_set(bs, abi->isa->sp->index);
1927 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1929 be_abi_irg_t *abi = get_abi_from_ops(self);
1930 const arch_register_t *reg = abi->isa->sp;
1932 memset(req, 0, sizeof(req[0]));
1934 if(pos == BE_OUT_POS(0)) {
1935 req->cls = reg->reg_class;
1936 req->type = arch_register_req_type_limited;
1937 req->limited = be_abi_limited;
1938 req->limited_env = abi;
1941 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1942 req->cls = reg->reg_class;
1943 req->type = arch_register_req_type_normal;
1949 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1953 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1955 const be_abi_irg_t *abi = get_abi_from_ops(self);
1956 return abi->isa->sp;
1959 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1961 return arch_irn_class_normal;
1964 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1966 return arch_irn_flags_ignore | arch_irn_flags_modify_sp;
1969 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1974 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1978 static const arch_irn_ops_if_t abi_irn_ops = {
1979 abi_get_irn_reg_req,
1984 abi_get_frame_entity,
1988 static const arch_irn_handler_t abi_irn_handler = {