4 * @author Sebastian Hack
19 #include "irgraph_t.h"
22 #include "iredges_t.h"
25 #include "irprintf_t.h"
37 #include "besched_t.h"
39 typedef struct _be_abi_call_arg_t {
40 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
41 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
42 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
45 const arch_register_t *reg;
48 unsigned space_before;
52 struct _be_abi_call_t {
53 be_abi_call_flags_t flags;
54 const be_abi_callbacks_t *cb;
55 ir_type *between_type;
59 struct _be_abi_irg_t {
61 be_stack_layout_t *frame; /**< The stack frame model. */
62 const be_irg_t *birg; /**< The back end IRG. */
63 const arch_isa_t *isa; /**< The isa. */
64 survive_dce_t *dce_survivor;
66 be_abi_call_t *call; /**< The ABI call information. */
67 ir_type *method_type; /**< The type of the method of the IRG. */
69 ir_node *init_sp; /**< The node representing the stack pointer
70 at the start of the function. */
72 ir_node *start_barrier; /**< The barrier of the start block */
74 ir_node *reg_params; /**< The reg params node. */
75 pmap *regs; /**< A map of all callee-save and ignore regs to
76 their Projs to the RegParams node. */
78 pset *stack_phis; /**< The set of all Phi nodes inserted due to
79 stack pointer modifying nodes. */
81 int start_block_bias; /**< The stack bias at the end of the start block. */
83 void *cb; /**< ABI Callback self pointer. */
85 pmap *keep_map; /**< mapping blocks to keep nodes. */
86 pset *ignore_regs; /**< Additional registers which shall be ignored. */
88 arch_irn_handler_t irn_handler;
89 arch_irn_ops_t irn_ops;
90 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
93 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
94 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
96 /* Forward, since be need it in be_abi_introduce(). */
97 static const arch_irn_ops_if_t abi_irn_ops;
98 static const arch_irn_handler_t abi_irn_handler;
99 static heights_t *ir_heights;
101 /* Flag: if set, try to omit the frame pointer if called by the backend */
102 static int be_omit_fp = 1;
105 _ ____ ___ ____ _ _ _ _
106 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
107 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
108 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
109 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
111 These callbacks are used by the backend to set the parameters
112 for a specific call type.
116 * Set compare function: compares two ABI call object arguments.
118 static int cmp_call_arg(const void *a, const void *b, size_t n)
120 const be_abi_call_arg_t *p = a, *q = b;
121 return !(p->is_res == q->is_res && p->pos == q->pos);
125 * Get or set an ABI call object argument.
127 * @param call the abi call
128 * @param is_res true for call results, false for call arguments
129 * @param pos position of the argument
130 * @param do_insert true if the argument is set, false if it's retrieved
132 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
134 be_abi_call_arg_t arg;
137 memset(&arg, 0, sizeof(arg));
141 hash = is_res * 128 + pos;
144 ? set_insert(call->params, &arg, sizeof(arg), hash)
145 : set_find(call->params, &arg, sizeof(arg), hash);
149 * Retrieve an ABI call object argument.
151 * @param call the ABI call object
152 * @param is_res true for call results, false for call arguments
153 * @param pos position of the argument
155 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
157 return get_or_set_call_arg(call, is_res, pos, 0);
160 /* Set the flags for a call. */
161 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
167 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
169 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
171 arg->alignment = alignment;
172 arg->space_before = space_before;
173 arg->space_after = space_after;
174 assert(alignment > 0 && "Alignment must be greater than 0");
177 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
179 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
184 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
186 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
191 /* Get the flags of a ABI call object. */
192 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
198 * Constructor for a new ABI call object.
200 * @return the new ABI call object
202 static be_abi_call_t *be_abi_call_new(void)
204 be_abi_call_t *call = xmalloc(sizeof(call[0]));
206 call->params = new_set(cmp_call_arg, 16);
209 call->flags.bits.try_omit_fp = be_omit_fp;
214 * Destructor for an ABI call object.
216 static void be_abi_call_free(be_abi_call_t *call)
218 del_set(call->params);
224 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
225 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
226 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
227 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
230 Handling of the stack frame. It is composed of three types:
231 1) The type of the arguments which are pushed on the stack.
232 2) The "between type" which consists of stuff the call of the
233 function pushes on the stack (like the return address and
234 the old base pointer for ia32).
235 3) The Firm frame type which consists of all local variables
239 static int get_stack_entity_offset(be_stack_layout_t *frame, entity *ent, int bias)
241 ir_type *t = get_entity_owner(ent);
242 int ofs = get_entity_offset_bytes(ent);
246 /* Find the type the entity is contained in. */
247 for(index = 0; index < N_FRAME_TYPES; ++index) {
248 if(frame->order[index] == t)
252 /* Add the size of all the types below the one of the entity to the entity's offset */
253 for(i = 0; i < index; ++i)
254 ofs += get_type_size_bytes(frame->order[i]);
256 /* correct the offset by the initial position of the frame pointer */
257 ofs -= frame->initial_offset;
259 /* correct the offset with the current bias. */
266 * Retrieve the entity with given offset from a frame type.
268 static entity *search_ent_with_offset(ir_type *t, int offset)
272 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
273 entity *ent = get_compound_member(t, i);
274 if(get_entity_offset_bytes(ent) == offset)
281 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
283 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
284 entity *ent = search_ent_with_offset(base, 0);
286 frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
288 return frame->initial_offset;
292 * Initializes the frame layout from parts
294 * @param frame the stack layout that will be initialized
295 * @param args the stack argument layout type
296 * @param between the between layout type
297 * @param locals the method frame type
298 * @param stack_dir the stack direction
299 * @param param_map an array mapping method argument positions to the stack argument type
301 * @return the initialized stack layout
303 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
304 ir_type *between, ir_type *locals, int stack_dir,
307 frame->arg_type = args;
308 frame->between_type = between;
309 frame->frame_type = locals;
310 frame->initial_offset = 0;
311 frame->stack_dir = stack_dir;
312 frame->order[1] = between;
313 frame->param_map = param_map;
316 frame->order[0] = args;
317 frame->order[2] = locals;
320 frame->order[0] = locals;
321 frame->order[2] = args;
327 /** Dumps the stack layout to file. */
328 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
332 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
333 for (j = 0; j < N_FRAME_TYPES; ++j) {
334 ir_type *t = frame->order[j];
336 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
337 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
338 entity *ent = get_compound_member(t, i);
339 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
346 * Returns non-zero if the call argument at given position
347 * is transfered on the stack.
349 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
351 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
352 return arg && !arg->in_reg;
362 Adjustment of the calls inside a graph.
367 * Transform a call node.
368 * @param env The ABI environment for the current irg.
369 * @param irn The call node.
370 * @param curr_sp The stack pointer node to use.
371 * @return The stack pointer after the call.
373 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
375 ir_graph *irg = env->birg->irg;
376 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
377 be_abi_call_t *call = be_abi_call_new();
378 ir_type *mt = get_Call_type(irn);
379 ir_node *call_ptr = get_Call_ptr(irn);
380 int n_params = get_method_n_params(mt);
381 ir_node *curr_mem = get_Call_mem(irn);
382 ir_node *bl = get_nodes_block(irn);
383 pset *results = pset_new_ptr(8);
384 pset *caller_save = pset_new_ptr(8);
386 int stack_dir = arch_isa_stack_dir(isa);
387 const arch_register_t *sp = arch_isa_sp(isa);
388 ir_mode *mach_mode = sp->reg_class->mode;
389 struct obstack *obst = &env->obst;
390 int no_alloc = call->flags.bits.frame_is_setup_on_call;
392 ir_node *res_proj = NULL;
393 int curr_res_proj = pn_Call_max;
400 const ir_edge_t *edge;
405 /* Let the isa fill out the abi description for that call node. */
406 arch_isa_get_call_abi(isa, mt, call);
408 /* Insert code to put the stack arguments on the stack. */
409 assert(get_Call_n_params(irn) == n_params);
410 for(i = 0; i < n_params; ++i) {
411 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
414 int arg_size = get_type_size_bytes(get_method_param_type(mt, i));
416 stack_size += round_up2(arg->space_before, arg->alignment);
417 stack_size += round_up2(arg_size, arg->alignment);
418 stack_size += round_up2(arg->space_after, arg->alignment);
419 obstack_int_grow(obst, i);
423 pos = obstack_finish(obst);
425 /* Collect all arguments which are passed in registers. */
426 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
427 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
428 if(arg && arg->in_reg) {
429 obstack_int_grow(obst, i);
433 low_args = obstack_finish(obst);
435 /* If there are some parameters which shall be passed on the stack. */
438 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
441 * Reverse list of stack parameters if call arguments are from left to right.
442 * We must them reverse again in they are pushed (not stored) and the stack
443 * direction is downwards.
445 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
446 for(i = 0; i < n_pos >> 1; ++i) {
447 int other = n_pos - i - 1;
455 * If the stack is decreasing and we do not want to store sequentially,
456 * or someone else allocated the call frame
457 * we allocate as much space on the stack all parameters need, by
458 * moving the stack pointer along the stack's direction.
460 if(stack_dir < 0 && !do_seq && !no_alloc) {
461 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
463 add_irn_dep(curr_sp, alloca_copy);
469 obstack_ptr_grow(obst, get_Call_mem(irn));
470 curr_mem = new_NoMem();
472 curr_mem = get_Call_mem(irn);
475 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
476 for(i = 0; i < n_pos; ++i) {
478 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
479 ir_node *param = get_Call_param(irn, p);
480 ir_node *addr = curr_sp;
482 ir_type *param_type = get_method_param_type(mt, p);
483 int param_size = get_type_size_bytes(param_type) + arg->space_after;
486 * If we wanted to build the arguments sequentially,
487 * the stack pointer for the next must be incremented,
488 * and the memory value propagated.
492 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
494 add_irn_dep(curr_sp, alloca_copy);
497 add_irn_dep(curr_sp, curr_mem);
500 curr_ofs += arg->space_before;
501 curr_ofs = round_up2(curr_ofs, arg->alignment);
503 /* Make the expression to compute the argument's offset. */
505 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
506 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
510 /* Insert a store for primitive arguments. */
511 if (is_atomic_type(param_type)) {
513 store = new_r_Store(irg, bl, curr_mem, addr, param);
514 mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
517 /* Make a mem copy for compound arguments. */
521 assert(mode_is_reference(get_irn_mode(param)));
522 copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
523 mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
526 curr_ofs += param_size;
531 obstack_ptr_grow(obst, mem);
534 in = (ir_node **) obstack_finish(obst);
536 /* We need the sync only, if we didn't build the stores sequentially. */
539 curr_mem = new_r_Sync(irg, bl, n_pos + 1, in);
541 curr_mem = get_Call_mem(irn);
544 obstack_free(obst, in);
547 /* Collect caller save registers */
548 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
550 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
551 for(j = 0; j < cls->n_regs; ++j) {
552 const arch_register_t *reg = arch_register_for_index(cls, j);
553 if(arch_register_type_is(reg, caller_save))
554 pset_insert_ptr(caller_save, (void *) reg);
558 /* search the greatest result proj number */
560 /* TODO: what if the result is NOT used? Currently there is
561 * no way to detect this later, especially there is no way to
562 * see this in the proj numbers.
563 * While this is ok for the register allocator, it is bad for
564 * backends which need to change the be_Call further (x87 simulator
565 * for instance. However for this particular case the call_type is
568 foreach_out_edge(irn, edge) {
569 const ir_edge_t *res_edge;
570 ir_node *irn = get_edge_src_irn(edge);
572 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
574 foreach_out_edge(irn, res_edge) {
576 be_abi_call_arg_t *arg;
577 ir_node *res = get_edge_src_irn(res_edge);
579 assert(is_Proj(res));
581 proj = get_Proj_proj(res);
582 arg = get_call_arg(call, 1, proj);
585 shift the proj number to the right, since we will drop the
586 unspeakable Proj_T from the Call. Therefore, all real argument
587 Proj numbers must be increased by pn_be_Call_first_res
589 proj += pn_be_Call_first_res;
590 set_Proj_proj(res, proj);
591 obstack_ptr_grow(obst, res);
593 if(proj > curr_res_proj)
594 curr_res_proj = proj;
596 pset_remove_ptr(caller_save, arg->reg);
597 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
604 obstack_ptr_grow(obst, NULL);
605 res_projs = obstack_finish(obst);
607 /* make the back end call node and set its register requirements. */
608 for(i = 0; i < n_low_args; ++i)
609 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
611 in = obstack_finish(obst);
613 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
614 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
615 curr_res_proj + pset_count(caller_save), n_low_args, in,
617 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
621 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
622 curr_res_proj + pset_count(caller_save), n_low_args, in,
627 Set the register class of the call address to the same as the stack pointer's.
628 That' probably buggy for some architectures.
630 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
632 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
634 /* Set the register classes and constraints of the Call parameters. */
635 for(i = 0; i < n_low_args; ++i) {
636 int index = low_args[i];
637 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
638 assert(arg->reg != NULL);
640 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
643 /* Set the register constraints of the results. */
644 for(i = 0; res_projs[i]; ++i) {
645 ir_node *irn = res_projs[i];
646 int proj = get_Proj_proj(irn);
648 /* Correct Proj number since it has been adjusted! (see above) */
649 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
652 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
654 obstack_free(obst, in);
655 exchange(irn, low_call);
657 /* redirect the result projs to the lowered call instead of the Proj_T */
658 for(i = 0; res_projs[i]; ++i)
659 set_Proj_pred(res_projs[i], low_call);
661 /* Make additional projs for the caller save registers
662 and the Keep node which keeps them alive. */
663 if(pset_count(caller_save) > 0) {
664 const arch_register_t *reg;
668 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
669 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
671 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
672 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
673 set_irn_link(proj, (void *) reg);
674 obstack_ptr_grow(obst, proj);
678 in = (ir_node **) obstack_finish(obst);
679 keep = be_new_Keep(NULL, irg, bl, n, in);
680 for(i = 0; i < n; ++i) {
681 const arch_register_t *reg = get_irn_link(in[i]);
682 be_node_set_reg_class(keep, i, reg->reg_class);
684 obstack_free(obst, in);
687 /* Clean up the stack. */
689 ir_node *mem_proj = NULL;
691 foreach_out_edge(low_call, edge) {
692 ir_node *irn = get_edge_src_irn(edge);
693 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
700 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
701 keep_alive(mem_proj);
704 /* Clean up the stack frame if we allocated it */
706 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
707 add_irn_dep(curr_sp, mem_proj);
709 add_irn_dep(curr_sp, alloca_copy);
715 be_abi_call_free(call);
716 obstack_free(obst, pos);
718 del_pset(caller_save);
725 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
727 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
729 if (get_Alloc_where(alloc) == stack_alloc) {
730 ir_node *bl = get_nodes_block(alloc);
731 ir_graph *irg = get_irn_irg(bl);
732 ir_node *alloc_mem = NULL;
733 ir_node *alloc_res = NULL;
735 const ir_edge_t *edge;
741 foreach_out_edge(alloc, edge) {
742 ir_node *irn = get_edge_src_irn(edge);
744 assert(is_Proj(irn));
745 switch(get_Proj_proj(irn)) {
757 /* Beware: currently Alloc nodes without a result might happen,
758 only escape analysis kills them and this phase runs only for object
759 oriented source. We kill the Alloc here. */
760 if (alloc_res == NULL && alloc_mem) {
761 exchange(alloc_mem, get_Alloc_mem(alloc));
765 /* The stack pointer will be modified in an unknown manner.
766 We cannot omit it. */
767 env->call->flags.bits.try_omit_fp = 0;
768 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
770 if(alloc_mem != NULL) {
774 addsp_mem = new_r_Proj(irg, bl, new_alloc, mode_M, pn_be_AddSP_M);
776 // We need to sync the output mem of the AddSP with the input mem
777 // edge into the alloc node
778 ins[0] = get_Alloc_mem(alloc);
780 sync = new_r_Sync(irg, bl, 2, ins);
782 exchange(alloc_mem, sync);
785 exchange(alloc, new_alloc);
787 /* fix projnum of alloca res */
788 set_Proj_proj(alloc_res, pn_be_AddSP_res);
790 addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
792 /* copy the address away, since it could be used after further stack pointer modifications. */
793 /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
794 *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
796 /* Let all users of the Alloc() result now point to the copy. */
797 edges_reroute(alloc_res, copy, irg);
799 /* Rewire the copy appropriately. */
800 set_irn_n(copy, be_pos_Copy_op, addr);
809 * The Free is transformed into a back end free node and connected to the stack nodes.
811 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
813 if (get_Free_where(free) == stack_alloc) {
814 ir_node *bl = get_nodes_block(free);
815 ir_graph *irg = get_irn_irg(bl);
816 ir_node *addsp, *mem, *res;
818 /* The stack pointer will be modified in an unknown manner.
819 We cannot omit it. */
820 env->call->flags.bits.try_omit_fp = 0;
821 addsp = be_new_SubSP(env->isa->sp, irg, bl, curr_sp, get_Free_size(free));
823 mem = new_r_Proj(irg, bl, addsp, mode_M, pn_be_SubSP_M);
824 res = new_r_Proj(irg, bl, addsp, mode_P_data, pn_be_SubSP_res);
832 /* the following function is replaced by the usage of the heights module */
835 * Walker for dependent_on().
836 * This function searches a node tgt recursively from a given node
837 * but is restricted to the given block.
838 * @return 1 if tgt was reachable from curr, 0 if not.
840 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
844 if (get_nodes_block(curr) != bl)
850 /* Phi functions stop the recursion inside a basic block */
851 if (! is_Phi(curr)) {
852 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
853 if (check_dependence(get_irn_n(curr, i), tgt, bl))
863 * Check if a node is somehow data dependent on another one.
864 * both nodes must be in the same basic block.
865 * @param n1 The first node.
866 * @param n2 The second node.
867 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
869 static int dependent_on(ir_node *n1, ir_node *n2)
871 ir_node *bl = get_nodes_block(n1);
873 assert(bl == get_nodes_block(n2));
875 return heights_reachable_in_block(ir_heights, n1, n2);
876 //return check_dependence(n1, n2, bl);
879 static int cmp_call_dependecy(const void *c1, const void *c2)
881 ir_node *n1 = *(ir_node **) c1;
882 ir_node *n2 = *(ir_node **) c2;
885 Classical qsort() comparison function behavior:
886 0 if both elements are equal
887 1 if second is "smaller" that first
888 -1 if first is "smaller" that second
890 if (dependent_on(n1, n2))
893 if (dependent_on(n2, n1))
900 * Walker: links all Call/alloc/Free nodes to the Block they are contained.
902 static void link_calls_in_block_walker(ir_node *irn, void *data)
904 opcode code = get_irn_opcode(irn);
906 if (code == iro_Call ||
907 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
908 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
909 be_abi_irg_t *env = data;
910 ir_node *bl = get_nodes_block(irn);
911 void *save = get_irn_link(bl);
913 if (code == iro_Call)
914 env->call->flags.bits.irg_is_leaf = 0;
916 set_irn_link(irn, save);
917 set_irn_link(bl, irn);
923 * Process all Call nodes inside a basic block.
924 * Note that the link field of the block must contain a linked list of all
925 * Call nodes inside the Block. We first order this list according to data dependency
926 * and that connect the calls together.
928 static void process_calls_in_block(ir_node *bl, void *data)
930 be_abi_irg_t *env = data;
931 ir_node *curr_sp = env->init_sp;
935 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
936 obstack_ptr_grow(&env->obst, irn);
938 /* If there were call nodes in the block. */
942 ir_node *copy = NULL;
945 nodes = obstack_finish(&env->obst);
947 /* order the call nodes according to data dependency */
948 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
950 for(i = n - 1; i >= 0; --i) {
951 ir_node *irn = nodes[i];
953 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
954 switch(get_irn_opcode(irn)) {
956 curr_sp = adjust_call(env, irn, curr_sp, copy);
959 curr_sp = adjust_alloc(env, irn, curr_sp, ©);
962 curr_sp = adjust_free(env, irn, curr_sp);
969 obstack_free(&env->obst, nodes);
971 /* Keep the last stack state in the block by tying it to Keep node */
973 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
974 pmap_insert(env->keep_map, bl, keep);
977 set_irn_link(bl, curr_sp);
978 } /* process_calls_in_block */
981 * Adjust all call nodes in the graph to the ABI conventions.
983 static void process_calls(be_abi_irg_t *env)
985 ir_graph *irg = env->birg->irg;
987 env->call->flags.bits.irg_is_leaf = 1;
988 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
990 ir_heights = heights_new(env->birg->irg);
991 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
992 heights_free(ir_heights);
996 static ir_node *setup_frame(be_abi_irg_t *env)
998 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
999 const arch_register_t *sp = isa->sp;
1000 const arch_register_t *bp = isa->bp;
1001 be_abi_call_flags_bits_t flags = env->call->flags.bits;
1002 ir_graph *irg = env->birg->irg;
1003 ir_node *bl = get_irg_start_block(irg);
1004 ir_node *no_mem = get_irg_no_mem(irg);
1005 ir_node *old_frame = get_irg_frame(irg);
1006 ir_node *stack = pmap_get(env->regs, (void *) sp);
1007 ir_node *frame = pmap_get(env->regs, (void *) bp);
1009 int stack_nr = get_Proj_proj(stack);
1011 if(flags.try_omit_fp) {
1012 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
1017 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
1019 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
1020 if(!flags.fp_free) {
1021 be_set_constr_single_reg(frame, -1, bp);
1022 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
1023 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
1026 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
1029 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
1030 env->init_sp = stack;
1031 set_irg_frame(irg, frame);
1032 edges_reroute(old_frame, frame, irg);
1037 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
1039 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1040 const arch_register_t *sp = isa->sp;
1041 const arch_register_t *bp = isa->bp;
1042 ir_graph *irg = env->birg->irg;
1043 ir_node *ret_mem = get_Return_mem(ret);
1044 ir_node *frame = get_irg_frame(irg);
1045 ir_node *bl = get_nodes_block(ret);
1046 ir_node *stack = get_irn_link(bl);
1050 if(env->call->flags.bits.try_omit_fp) {
1051 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
1055 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
1056 be_set_constr_single_reg(stack, -1, sp);
1057 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
1060 pmap_foreach(env->regs, ent) {
1061 const arch_register_t *reg = ent->key;
1062 ir_node *irn = ent->value;
1065 obstack_ptr_grow(&env->obst, stack);
1067 obstack_ptr_grow(&env->obst, frame);
1068 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1069 obstack_ptr_grow(obst, irn);
1076 * Computes the stack argument layout type.
1077 * Changes a possibly allocated value param type by moving
1078 * entities to the stack layout type.
1080 * @param env the ABI environment
1081 * @param call the current call ABI
1082 * @param method_type the method type
1083 * @param param_map an array mapping method arguments to the stack layout type
1085 * @return the stack argument layout type
1087 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, entity ***param_map)
1089 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1090 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1091 int n = get_method_n_params(method_type);
1092 int curr = inc > 0 ? 0 : n - 1;
1098 ir_type *val_param_tp = get_method_value_param_type(method_type);
1099 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1102 *param_map = map = obstack_alloc(&env->obst, n * sizeof(entity *));
1103 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1104 for (i = 0; i < n; ++i, curr += inc) {
1105 ir_type *param_type = get_method_param_type(method_type, curr);
1106 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1109 if (arg->on_stack) {
1111 /* the entity was already created, move it to the param type */
1112 arg->stack_ent = get_method_value_param_ent(method_type, i);
1113 remove_struct_member(val_param_tp, arg->stack_ent);
1114 set_entity_owner(arg->stack_ent, res);
1115 add_struct_member(res, arg->stack_ent);
1116 /* must be automatic to set a fixed layout */
1117 set_entity_allocation(arg->stack_ent, allocation_automatic);
1120 snprintf(buf, sizeof(buf), "param_%d", i);
1121 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1123 ofs += arg->space_before;
1124 ofs = round_up2(ofs, arg->alignment);
1125 set_entity_offset_bytes(arg->stack_ent, ofs);
1126 ofs += arg->space_after;
1127 ofs += get_type_size_bytes(param_type);
1128 map[i] = arg->stack_ent;
1131 set_type_size_bytes(res, ofs);
1132 set_type_state(res, layout_fixed);
1137 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1140 struct obstack obst;
1142 obstack_init(&obst);
1144 /* Create a Perm after the RegParams node to delimit it. */
1145 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1146 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1151 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1152 const arch_register_t *reg = &cls->regs[j];
1153 ir_node *irn = pmap_get(regs, (void *) reg);
1155 if(irn && !arch_register_type_is(reg, ignore)) {
1157 obstack_ptr_grow(&obst, irn);
1158 set_irn_link(irn, (void *) reg);
1162 obstack_ptr_grow(&obst, NULL);
1163 in = obstack_finish(&obst);
1165 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1166 for(j = 0; j < n_regs; ++j) {
1167 ir_node *arg = in[j];
1168 arch_register_t *reg = get_irn_link(arg);
1169 pmap_insert(regs, reg, arg);
1170 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1173 obstack_free(&obst, in);
1176 obstack_free(&obst, NULL);
1181 const arch_register_t *reg;
1185 static int cmp_regs(const void *a, const void *b)
1187 const reg_node_map_t *p = a;
1188 const reg_node_map_t *q = b;
1190 if(p->reg->reg_class == q->reg->reg_class)
1191 return p->reg->index - q->reg->index;
1193 return p->reg->reg_class - q->reg->reg_class;
1196 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1199 int n = pmap_count(reg_map);
1201 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1203 pmap_foreach(reg_map, ent) {
1204 res[i].reg = ent->key;
1205 res[i].irn = ent->value;
1209 qsort(res, n, sizeof(res[0]), cmp_regs);
1214 * Creates a barrier.
1216 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1218 ir_graph *irg = env->birg->irg;
1219 int n_regs = pmap_count(regs);
1225 rm = reg_map_to_arr(&env->obst, regs);
1227 for(n = 0; n < n_regs; ++n)
1228 obstack_ptr_grow(&env->obst, rm[n].irn);
1231 obstack_ptr_grow(&env->obst, *mem);
1235 in = (ir_node **) obstack_finish(&env->obst);
1236 irn = be_new_Barrier(irg, bl, n, in);
1237 obstack_free(&env->obst, in);
1239 for(n = 0; n < n_regs; ++n) {
1240 const arch_register_t *reg = rm[n].reg;
1242 int pos = BE_OUT_POS(n);
1245 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1246 be_node_set_reg_class(irn, n, reg->reg_class);
1248 be_set_constr_single_reg(irn, n, reg);
1249 be_set_constr_single_reg(irn, pos, reg);
1250 be_node_set_reg_class(irn, pos, reg->reg_class);
1251 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1253 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1254 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1255 flags |= arch_irn_flags_ignore;
1257 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1258 flags |= arch_irn_flags_modify_sp;
1260 be_node_set_flags(irn, pos, flags);
1262 pmap_insert(regs, (void *) reg, proj);
1266 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1269 obstack_free(&env->obst, rm);
1274 * Creates a be_Return for a Return node.
1276 * @param @env the abi environment
1277 * @param irn the Return node or NULL if there was none
1278 * @param bl the block where the be_Retun should be placed
1279 * @param mem the current memory
1280 * @param n_res number of return results
1282 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1283 be_abi_call_t *call = env->call;
1284 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1286 pmap *reg_map = pmap_create();
1287 ir_node *keep = pmap_get(env->keep_map, bl);
1293 const arch_register_t **regs;
1297 get the valid stack node in this block.
1298 If we had a call in that block there is a Keep constructed by process_calls()
1299 which points to the last stack modification in that block. we'll use
1300 it then. Else we use the stack from the start block and let
1301 the ssa construction fix the usage.
1303 stack = be_abi_reg_map_get(env->regs, isa->sp);
1305 ir_node *bad = new_r_Bad(env->birg->irg);
1306 stack = get_irn_n(keep, 0);
1307 set_nodes_block(keep, bad);
1308 set_irn_n(keep, 0, bad);
1309 // exchange(keep, new_r_Bad(env->birg->irg));
1312 /* Insert results for Return into the register map. */
1313 for(i = 0; i < n_res; ++i) {
1314 ir_node *res = get_Return_res(irn, i);
1315 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1316 assert(arg->in_reg && "return value must be passed in register");
1317 pmap_insert(reg_map, (void *) arg->reg, res);
1320 /* Add uses of the callee save registers. */
1321 pmap_foreach(env->regs, ent) {
1322 const arch_register_t *reg = ent->key;
1323 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1324 pmap_insert(reg_map, ent->key, ent->value);
1327 be_abi_reg_map_set(reg_map, isa->sp, stack);
1329 /* Make the Epilogue node and call the arch's epilogue maker. */
1330 create_barrier(env, bl, &mem, reg_map, 1);
1331 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1334 Maximum size of the in array for Return nodes is
1335 return args + callee save/ignore registers + memory + stack pointer
1337 in_max = pmap_count(reg_map) + n_res + 2;
1339 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1340 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1343 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1348 /* clear SP entry, since it has already been grown. */
1349 pmap_insert(reg_map, (void *) isa->sp, NULL);
1350 for(i = 0; i < n_res; ++i) {
1351 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1353 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1354 regs[n++] = arg->reg;
1356 /* Clear the map entry to mark the register as processed. */
1357 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1360 /* grow the rest of the stuff. */
1361 pmap_foreach(reg_map, ent) {
1364 regs[n++] = ent->key;
1368 /* The in array for the new back end return is now ready. */
1369 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1371 /* Set the register classes of the return's parameter accordingly. */
1372 for(i = 0; i < n; ++i)
1374 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1376 /* Free the space of the Epilog's in array and the register <-> proj map. */
1377 obstack_free(&env->obst, in);
1378 pmap_destroy(reg_map);
1383 typedef struct lower_frame_sels_env_t {
1385 entity *value_param_list; /**< the list of all value param entities */
1386 } lower_frame_sels_env_t;
1389 * Walker: Replaces Sels of frame type and
1390 * value param type entities by FrameAddress.
1392 static void lower_frame_sels_walker(ir_node *irn, void *data)
1394 lower_frame_sels_env_t *ctx = data;
1397 ir_graph *irg = current_ir_graph;
1398 ir_node *frame = get_irg_frame(irg);
1399 ir_node *param_base = get_irg_value_param_base(irg);
1400 ir_node *ptr = get_Sel_ptr(irn);
1402 if (ptr == frame || ptr == param_base) {
1403 be_abi_irg_t *env = ctx->env;
1404 entity *ent = get_Sel_entity(irn);
1405 ir_node *bl = get_nodes_block(irn);
1408 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1411 /* check, if it's a param sel and if have not seen this entity immediatly before */
1412 if (ptr == param_base && ctx->value_param_list != ent) {
1413 set_entity_link(ent, ctx->value_param_list);
1414 ctx->value_param_list = ent;
1421 * Check if a value parameter is transmitted as a register.
1422 * This might happen if the address of an parameter is taken which is
1423 * transmitted in registers.
1425 * Note that on some architectures this case must be handled specially
1426 * because the place of the backing store is determined by their ABI.
1428 * In the default case we move the entity to the frame type and create
1429 * a backing store into the first block.
1431 static void fix_address_of_parameter_access(be_abi_irg_t *env, entity *value_param_list) {
1432 be_abi_call_t *call = env->call;
1433 ir_graph *irg = env->birg->irg;
1434 entity *ent, *next_ent, *new_list;
1436 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1439 for (ent = value_param_list; ent; ent = next_ent) {
1440 int i = get_struct_member_index(get_entity_owner(ent), ent);
1441 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1443 next_ent = get_entity_link(ent);
1445 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1446 set_entity_link(ent, new_list);
1451 /* ok, change the graph */
1452 ir_node *start_bl = get_irg_start_block(irg);
1453 ir_node *first_bl = NULL;
1454 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1455 const ir_edge_t *edge;
1456 optimization_state_t state;
1459 foreach_block_succ(start_bl, edge) {
1460 ir_node *succ = get_edge_src_irn(edge);
1461 if (start_bl != succ) {
1467 /* we had already removed critical edges, so the following
1468 assertion should be always true. */
1469 assert(get_Block_n_cfgpreds(first_bl) == 1);
1471 /* now create backing stores */
1472 frame = get_irg_frame(irg);
1473 imem = get_irg_initial_mem(irg);
1475 save_optimization_state(&state);
1477 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1478 restore_optimization_state(&state);
1480 /* reroute all edges to the new memory source */
1481 edges_reroute(imem, nmem, irg);
1485 args = get_irg_args(irg);
1486 args_bl = get_nodes_block(args);
1487 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1488 int i = get_struct_member_index(get_entity_owner(ent), ent);
1489 ir_type *tp = get_entity_type(ent);
1490 ir_mode *mode = get_type_mode(tp);
1493 /* address for the backing store */
1494 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1497 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1499 /* the backing store itself */
1500 store = new_r_Store(irg, first_bl, mem, addr,
1501 new_r_Proj(irg, args_bl, args, mode, i));
1503 /* the new memory Proj gets the last Proj from store */
1504 set_Proj_pred(nmem, store);
1505 set_Proj_proj(nmem, pn_Store_M);
1507 /* move all entities to the frame type */
1508 frame_tp = get_irg_frame_type(irg);
1509 offset = get_type_size_bytes(frame_tp);
1510 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1511 ir_type *tp = get_entity_type(ent);
1512 int align = get_type_alignment_bytes(tp);
1514 offset += align - 1;
1516 set_entity_owner(ent, frame_tp);
1517 add_class_member(frame_tp, ent);
1518 /* must be automatic to set a fixed layout */
1519 set_entity_allocation(ent, allocation_automatic);
1520 set_entity_offset_bytes(ent, offset);
1521 offset += get_type_size_bytes(tp);
1523 set_type_size_bytes(frame_tp, offset);
1528 * Modify the irg itself and the frame type.
1530 static void modify_irg(be_abi_irg_t *env)
1532 be_abi_call_t *call = env->call;
1533 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1534 const arch_register_t *sp = arch_isa_sp(isa);
1535 ir_graph *irg = env->birg->irg;
1536 ir_node *bl = get_irg_start_block(irg);
1537 ir_node *end = get_irg_end_block(irg);
1538 ir_node *mem = get_irg_initial_mem(irg);
1539 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1540 pset *dont_save = pset_new_ptr(8);
1546 const arch_register_t *fp_reg;
1547 ir_node *frame_pointer;
1549 ir_node *reg_params_bl;
1552 const ir_edge_t *edge;
1553 ir_type *arg_type, *bet_type;
1554 lower_frame_sels_env_t ctx;
1557 bitset_t *used_proj_nr;
1558 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1560 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1562 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1564 ctx.value_param_list = NULL;
1565 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1567 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1568 env->regs = pmap_create();
1570 used_proj_nr = bitset_alloca(1024);
1571 n_params = get_method_n_params(method_type);
1572 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1573 memset(args, 0, n_params * sizeof(args[0]));
1575 /* Check if a value parameter is transmitted as a register.
1576 * This might happen if the address of an parameter is taken which is
1577 * transmitted in registers.
1579 * Note that on some architectures this case must be handled specially
1580 * because the place of the backing store is determined by their ABI.
1582 * In the default case we move the entity to the frame type and create
1583 * a backing store into the first block.
1585 fix_address_of_parameter_access(env, ctx.value_param_list);
1587 /* Fill the argument vector */
1588 arg_tuple = get_irg_args(irg);
1589 foreach_out_edge(arg_tuple, edge) {
1590 ir_node *irn = get_edge_src_irn(edge);
1591 int nr = get_Proj_proj(irn);
1593 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1596 arg_type = compute_arg_type(env, call, method_type, ¶m_map);
1597 bet_type = call->cb->get_between_type(env->cb);
1598 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map);
1600 /* Count the register params and add them to the number of Projs for the RegParams node */
1601 for(i = 0; i < n_params; ++i) {
1602 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1603 if(arg->in_reg && args[i]) {
1604 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1605 assert(i == get_Proj_proj(args[i]));
1607 /* For now, associate the register with the old Proj from Start representing that argument. */
1608 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1609 bitset_set(used_proj_nr, i);
1610 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1614 /* Collect all callee-save registers */
1615 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1616 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1617 for(j = 0; j < cls->n_regs; ++j) {
1618 const arch_register_t *reg = &cls->regs[j];
1619 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1620 pmap_insert(env->regs, (void *) reg, NULL);
1624 pmap_insert(env->regs, (void *) sp, NULL);
1625 pmap_insert(env->regs, (void *) isa->bp, NULL);
1626 reg_params_bl = get_irg_start_block(irg);
1627 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1628 add_irn_dep(env->reg_params, get_irg_start(irg));
1631 * make proj nodes for the callee save registers.
1632 * memorize them, since Return nodes get those as inputs.
1634 * Note, that if a register corresponds to an argument, the regs map contains
1635 * the old Proj from start for that argument.
1638 rm = reg_map_to_arr(&env->obst, env->regs);
1639 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1640 arch_register_t *reg = (void *) rm[i].reg;
1641 ir_node *arg_proj = rm[i].irn;
1642 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1644 int pos = BE_OUT_POS((int) nr);
1650 bitset_set(used_proj_nr, nr);
1651 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1652 pmap_insert(env->regs, (void *) reg, proj);
1653 be_set_constr_single_reg(env->reg_params, pos, reg);
1654 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1657 * If the register is an ignore register,
1658 * The Proj for that register shall also be ignored during register allocation.
1660 if(arch_register_type_is(reg, ignore))
1661 flags |= arch_irn_flags_ignore;
1664 flags |= arch_irn_flags_modify_sp;
1666 be_node_set_flags(env->reg_params, pos, flags);
1668 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1670 obstack_free(&env->obst, rm);
1672 /* Generate the Prologue */
1673 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1675 /* do the stack allocation BEFORE the barrier, or spill code
1676 might be added before it */
1677 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1678 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
1679 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1681 env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0);
1683 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1684 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1686 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1687 set_irg_frame(irg, frame_pointer);
1688 pset_insert_ptr(env->ignore_regs, fp_reg);
1690 /* Now, introduce stack param nodes for all parameters passed on the stack */
1691 for(i = 0; i < n_params; ++i) {
1692 ir_node *arg_proj = args[i];
1693 ir_node *repl = NULL;
1695 if(arg_proj != NULL) {
1696 be_abi_call_arg_t *arg;
1697 ir_type *param_type;
1698 int nr = get_Proj_proj(arg_proj);
1700 nr = MIN(nr, n_params);
1701 arg = get_call_arg(call, 0, nr);
1702 param_type = get_method_param_type(method_type, nr);
1705 repl = pmap_get(env->regs, (void *) arg->reg);
1708 else if(arg->on_stack) {
1709 /* For atomic parameters which are actually used, we create a StackParam node. */
1710 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1711 ir_mode *mode = get_type_mode(param_type);
1712 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1713 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1716 /* The stack parameter is not primitive (it is a struct or array),
1717 we thus will create a node representing the parameter's address
1720 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1724 assert(repl != NULL);
1725 edges_reroute(args[i], repl, irg);
1729 /* All Return nodes hang on the End node, so look for them there. */
1730 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1731 ir_node *irn = get_Block_cfgpred(end, i);
1733 if (is_Return(irn)) {
1734 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1738 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return than,
1739 the code is dead and will never be executed. */
1741 del_pset(dont_save);
1742 obstack_free(&env->obst, args);
1745 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1747 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1748 ir_node *old_frame = get_irg_frame(birg->irg);
1749 ir_graph *irg = birg->irg;
1753 optimization_state_t state;
1755 be_omit_fp = birg->main_env->options->omit_fp;
1757 obstack_init(&env->obst);
1759 env->isa = birg->main_env->arch_env->isa;
1760 env->method_type = get_entity_type(get_irg_entity(irg));
1761 env->call = be_abi_call_new();
1762 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1764 env->ignore_regs = pset_new_ptr_default();
1765 env->keep_map = pmap_create();
1766 env->dce_survivor = new_survive_dce();
1768 env->stack_phis = pset_new_ptr(16);
1769 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1770 to another Unknown or the stack pointer gets used */
1771 save_optimization_state(&state);
1773 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1774 restore_optimization_state(&state);
1775 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1777 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1778 env->irn_ops.impl = &abi_irn_ops;
1780 /* Lower all call nodes in the IRG. */
1784 Beware: init backend abi call object after processing calls,
1785 otherwise some information might be not yet available.
1787 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1789 /* Process the IRG */
1792 /* We don't need the keep map anymore. */
1793 pmap_destroy(env->keep_map);
1795 /* reroute the stack origin of the calls to the true stack origin. */
1796 edges_reroute(dummy, env->init_sp, irg);
1797 edges_reroute(old_frame, get_irg_frame(irg), irg);
1799 /* Make some important node pointers survive the dead node elimination. */
1800 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1801 pmap_foreach(env->regs, ent)
1802 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1804 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1806 env->call->cb->done(env->cb);
1811 void be_abi_free(be_abi_irg_t *env)
1813 free_survive_dce(env->dce_survivor);
1814 del_pset(env->stack_phis);
1815 del_pset(env->ignore_regs);
1816 pmap_destroy(env->regs);
1817 obstack_free(&env->obst, NULL);
1818 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1822 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
1824 arch_register_t *reg;
1826 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
1827 if(reg->reg_class == cls)
1828 bitset_set(bs, reg->index);
1831 /* Returns the stack layout from a abi environment. */
1832 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
1839 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1840 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1841 | _| | |> < ___) | || (_| | (__| <
1842 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1846 struct fix_stack_walker_info {
1848 const arch_env_t *aenv;
1852 * Walker. Collect all stack modifying nodes.
1854 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1856 struct fix_stack_walker_info *info = data;
1861 if (arch_irn_is(info->aenv, irn, modify_sp)) {
1862 assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T);
1863 pset_insert_ptr(info->nodes, irn);
1867 void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
1869 dom_front_info_t *df;
1870 pset *stack_nodes = pset_new_ptr(16);
1871 struct fix_stack_walker_info info;
1873 info.nodes = stack_nodes;
1874 info.aenv = env->birg->main_env->arch_env;
1876 /* We need dominance frontiers for fix up */
1877 df = be_compute_dominance_frontiers(env->birg->irg);
1878 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
1879 pset_insert_ptr(stack_nodes, env->init_sp);
1880 be_ssa_constr_set_phis(df, lv, stack_nodes, env->stack_phis);
1881 del_pset(stack_nodes);
1883 /* free these dominance frontiers */
1884 be_free_dominance_frontiers(df);
1887 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1889 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1890 int omit_fp = env->call->flags.bits.try_omit_fp;
1893 sched_foreach(bl, irn) {
1896 Check, if the node relates to an entity on the stack frame.
1897 If so, set the true offset (including the bias) for that
1900 entity *ent = arch_get_frame_entity(arch_env, irn);
1902 int offset = get_stack_entity_offset(env->frame, ent, bias);
1903 arch_set_frame_offset(arch_env, irn, offset);
1904 DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
1908 If the node modifies the stack pointer by a constant offset,
1909 record that in the bias.
1911 if(arch_irn_is(arch_env, irn, modify_sp)) {
1912 int ofs = arch_get_sp_bias(arch_env, irn);
1914 if(be_is_IncSP(irn)) {
1915 if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
1916 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1917 be_set_IncSP_offset(irn, ofs);
1918 } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
1919 ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1920 be_set_IncSP_offset(irn, ofs);
1933 * A helper struct for the bias walker.
1936 be_abi_irg_t *env; /**< The ABI irg environment. */
1937 int start_block_bias; /**< The bias at the end of the start block. */
1938 ir_node *start_block; /**< The start block of the current graph. */
1942 * Block-Walker: fix all stack offsets
1944 static void stack_bias_walker(ir_node *bl, void *data)
1946 struct bias_walk *bw = data;
1947 if (bl != bw->start_block) {
1948 process_stack_bias(bw->env, bl, bw->start_block_bias);
1952 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1954 ir_graph *irg = env->birg->irg;
1955 struct bias_walk bw;
1957 stack_frame_compute_initial_offset(env->frame);
1958 // stack_layout_dump(stdout, env->frame);
1960 /* Determine the stack bias at the end of the start block. */
1961 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1963 /* fix the bias is all other blocks */
1965 bw.start_block = get_irg_start_block(irg);
1966 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1969 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1971 assert(arch_register_type_is(reg, callee_save));
1972 assert(pmap_contains(abi->regs, (void *) reg));
1973 return pmap_get(abi->regs, (void *) reg);
1976 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1978 assert(arch_register_type_is(reg, ignore));
1979 assert(pmap_contains(abi->regs, (void *) reg));
1980 return pmap_get(abi->regs, (void *) reg);
1983 ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi)
1985 return abi->start_barrier;
1989 _____ _____ _ _ _ _ _ _
1990 |_ _| __ \| \ | | | | | | | | |
1991 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1992 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1993 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1994 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1996 for Phi nodes which are created due to stack modifying nodes
1997 such as IncSP, AddSP and SetSP.
1999 These Phis are always to be ignored by the reg alloc and are
2000 fixed on the SP register of the ISA.
2003 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
2005 const be_abi_irg_t *abi = get_abi_from_handler(handler);
2006 const void *res = NULL;
2008 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
2009 res = &abi->irn_ops;
2014 static void be_abi_limited(void *data, bitset_t *bs)
2016 be_abi_irg_t *abi = data;
2017 bitset_clear_all(bs);
2018 bitset_set(bs, abi->isa->sp->index);
2021 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
2023 be_abi_irg_t *abi = get_abi_from_ops(self);
2024 const arch_register_t *reg = abi->isa->sp;
2026 memset(req, 0, sizeof(req[0]));
2028 if(pos == BE_OUT_POS(0)) {
2029 req->cls = reg->reg_class;
2030 req->type = arch_register_req_type_limited;
2031 req->limited = be_abi_limited;
2032 req->limited_env = abi;
2035 else if(pos >= 0 && pos < get_irn_arity(irn)) {
2036 req->cls = reg->reg_class;
2037 req->type = arch_register_req_type_normal;
2043 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
2047 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
2049 const be_abi_irg_t *abi = get_abi_from_ops(self);
2050 return abi->isa->sp;
2053 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
2055 return arch_irn_class_normal;
2058 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
2060 return arch_irn_flags_ignore | arch_irn_flags_modify_sp;
2063 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
2068 static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent)
2072 static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias)
2076 static int abi_get_sp_bias(const void *self, const ir_node *irn)
2081 static const arch_irn_ops_if_t abi_irn_ops = {
2082 abi_get_irn_reg_req,
2087 abi_get_frame_entity,
2088 abi_set_frame_entity,
2089 abi_set_frame_offset,
2091 NULL, /* get_inverse */
2092 NULL, /* get_op_estimated_cost */
2093 NULL, /* possible_memory_operand */
2094 NULL, /* perform_memory_operand */
2097 static const arch_irn_handler_t abi_irn_handler = {
2102 * Returns non-zero if the ABI has omitted the frame pointer in
2103 * the current graph.
2105 int be_abi_omit_fp(const be_abi_irg_t *abi) {
2106 return abi->call->flags.bits.try_omit_fp;