4 * @author Sebastian Hack
19 #include "irgraph_t.h"
22 #include "iredges_t.h"
25 #include "irprintf_t.h"
33 #include "besched_t.h"
35 #define MAX(x, y) ((x) > (y) ? (x) : (y))
36 #define MIN(x, y) ((x) < (y) ? (x) : (y))
38 typedef struct _be_abi_call_arg_t {
39 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
40 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
41 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
44 const arch_register_t *reg;
47 unsigned space_before;
51 struct _be_abi_call_t {
52 be_abi_call_flags_t flags;
53 const be_abi_callbacks_t *cb;
54 ir_type *between_type;
58 #define N_FRAME_TYPES 3
61 * This type describes the stack layout.
62 * The stack is divided into 3 parts:
63 * - arg_type: A struct type describing the stack arguments and it's order.
64 * - between_type: A struct type describing the stack layout between arguments
66 * - frame_type: A class type descibing the frame layout
68 typedef struct _be_stack_layout_t {
69 ir_type *arg_type; /**< A type describing the stack argument layout. */
70 ir_type *between_type; /**< A type describing the "between" layout. */
71 ir_type *frame_type; /**< The frame type. */
73 ir_type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
76 int stack_dir; /**< -1 for decreasing, 1 for increasing. */
79 struct _be_abi_irg_t {
81 be_stack_layout_t *frame; /**< The stack frame model. */
82 const be_irg_t *birg; /**< The back end IRG. */
83 const arch_isa_t *isa; /**< The isa. */
84 survive_dce_t *dce_survivor;
86 be_abi_call_t *call; /**< The ABI call information. */
87 ir_type *method_type; /**< The type of the method of the IRG. */
89 ir_node *init_sp; /**< The node representing the stack pointer
90 at the start of the function. */
92 ir_node *reg_params; /**< The reg params node. */
93 pmap *regs; /**< A map of all callee-save and ignore regs to
94 their Projs to the RegParams node. */
96 pset *stack_phis; /**< The set of all Phi nodes inserted due to
97 stack pointer modifying nodes. */
99 int start_block_bias; /**< The stack bias at the end of the start block. */
101 void *cb; /**< ABI Callback self pointer. */
103 pmap *keep_map; /**< mapping blocks to keep nodes. */
104 pset *ignore_regs; /**< Additional registers which shall be ignored. */
106 arch_irn_handler_t irn_handler;
107 arch_irn_ops_t irn_ops;
108 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
111 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
112 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
114 /* Forward, since be need it in be_abi_introduce(). */
115 static const arch_irn_ops_if_t abi_irn_ops;
116 static const arch_irn_handler_t abi_irn_handler;
118 /* Flag: if set, try to omit the frame pointer if called by the backend */
122 _ ____ ___ ____ _ _ _ _
123 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
124 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
125 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
126 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
128 These callbacks are used by the backend to set the parameters
129 for a specific call type.
133 * Set compare function: compares two ABI call object arguments.
135 static int cmp_call_arg(const void *a, const void *b, size_t n)
137 const be_abi_call_arg_t *p = a, *q = b;
138 return !(p->is_res == q->is_res && p->pos == q->pos);
142 * Get or set an ABI call object argument.
144 * @param call the abi call
145 * @param is_res true for call results, false for call arguments
146 * @param pos position of the argument
147 * @param do_insert true if the argument is set, false if it's retrieved
149 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
151 be_abi_call_arg_t arg;
154 memset(&arg, 0, sizeof(arg));
158 hash = is_res * 128 + pos;
161 ? set_insert(call->params, &arg, sizeof(arg), hash)
162 : set_find(call->params, &arg, sizeof(arg), hash);
166 * Retrieve an ABI call object argument.
168 * @param call the ABI call object
169 * @param is_res true for call results, false for call arguments
170 * @param pos position of the argument
172 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
174 return get_or_set_call_arg(call, is_res, pos, 0);
177 /* Set the flags for a call. */
178 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
184 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
186 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
188 arg->alignment = alignment;
189 arg->space_before = space_before;
190 arg->space_after = space_after;
191 assert(alignment > 0 && "Alignment must be greater than 0");
194 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
196 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
201 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
203 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
208 /* Get the flags of a ABI call object. */
209 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
215 * Constructor for a new ABI call object.
217 * @return the new ABI call object
219 static be_abi_call_t *be_abi_call_new()
221 be_abi_call_t *call = xmalloc(sizeof(call[0]));
223 call->params = new_set(cmp_call_arg, 16);
226 call->flags.bits.try_omit_fp = be_omit_fp;
231 * Destructor for an ABI call object.
233 static void be_abi_call_free(be_abi_call_t *call)
235 del_set(call->params);
241 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
242 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
243 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
244 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
247 Handling of the stack frame. It is composed of three types:
248 1) The type of the arguments which are pushed on the stack.
249 2) The "between type" which consists of stuff the call of the
250 function pushes on the stack (like the return address and
251 the old base pointer for ia32).
252 3) The Firm frame type which consists of all local variables
256 static int get_stack_entity_offset(be_stack_layout_t *frame, entity *ent, int bias)
258 ir_type *t = get_entity_owner(ent);
259 int ofs = get_entity_offset_bytes(ent);
263 /* Find the type the entity is contained in. */
264 for(index = 0; index < N_FRAME_TYPES; ++index) {
265 if(frame->order[index] == t)
269 /* Add the size of all the types below the one of the entity to the entity's offset */
270 for(i = 0; i < index; ++i)
271 ofs += get_type_size_bytes(frame->order[i]);
273 /* correct the offset by the initial position of the frame pointer */
274 ofs -= frame->initial_offset;
276 /* correct the offset with the current bias. */
283 * Retrieve the entity with given offset from a frame type.
285 static entity *search_ent_with_offset(ir_type *t, int offset)
289 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
290 entity *ent = get_compound_member(t, i);
291 if(get_entity_offset_bytes(ent) == offset)
298 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
300 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
301 entity *ent = search_ent_with_offset(base, 0);
302 frame->initial_offset = 0;
303 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
304 return frame->initial_offset;
308 * Initializes the frame layout from parts
310 * @param frame the stack layout that will be initialized
311 * @param args the stack argument layout type
312 * @param between the between layout type
313 * @param locals the method frame type
314 * @param stack_dir the stack direction
316 * @return the initialized stack layout
318 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
319 ir_type *between, ir_type *locals, int stack_dir)
321 frame->arg_type = args;
322 frame->between_type = between;
323 frame->frame_type = locals;
324 frame->initial_offset = 0;
325 frame->stack_dir = stack_dir;
326 frame->order[1] = between;
329 frame->order[0] = args;
330 frame->order[2] = locals;
333 frame->order[0] = locals;
334 frame->order[2] = args;
339 /** Dumps the stack layout to file. */
340 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
344 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
345 for (j = 0; j < N_FRAME_TYPES; ++j) {
346 ir_type *t = frame->order[j];
348 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
349 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
350 entity *ent = get_compound_member(t, i);
351 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
357 * Returns non-zero if the call argument at given position
358 * is transfered on the stack.
360 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
362 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
363 return arg && !arg->in_reg;
373 Adjustment of the calls inside a graph.
378 * Transform a call node.
379 * @param env The ABI environment for the current irg.
380 * @param irn The call node.
381 * @param curr_sp The stack pointer node to use.
382 * @return The stack pointer after the call.
384 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
386 ir_graph *irg = env->birg->irg;
387 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
388 be_abi_call_t *call = be_abi_call_new();
389 ir_type *mt = get_Call_type(irn);
390 ir_node *call_ptr = get_Call_ptr(irn);
391 int n_params = get_method_n_params(mt);
392 ir_node *curr_mem = get_Call_mem(irn);
393 ir_node *bl = get_nodes_block(irn);
394 pset *results = pset_new_ptr(8);
395 pset *caller_save = pset_new_ptr(8);
397 int stack_dir = arch_isa_stack_dir(isa);
398 const arch_register_t *sp = arch_isa_sp(isa);
399 ir_mode *mach_mode = sp->reg_class->mode;
400 struct obstack *obst = &env->obst;
401 ir_node *no_mem = get_irg_no_mem(irg);
402 int no_alloc = call->flags.bits.frame_is_setup_on_call;
404 ir_node *res_proj = NULL;
405 int curr_res_proj = pn_Call_max;
412 const ir_edge_t *edge;
417 /* Let the isa fill out the abi description for that call node. */
418 arch_isa_get_call_abi(isa, mt, call);
420 /* Insert code to put the stack arguments on the stack. */
421 assert(get_Call_n_params(irn) == n_params);
422 for(i = 0; i < n_params; ++i) {
423 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
426 stack_size += arg->space_before;
427 stack_size = round_up2(stack_size, arg->alignment);
428 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
429 stack_size += arg->space_after;
430 obstack_int_grow(obst, i);
434 pos = obstack_finish(obst);
436 /* Collect all arguments which are passed in registers. */
437 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
438 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
439 if(arg && arg->in_reg) {
440 obstack_int_grow(obst, i);
444 low_args = obstack_finish(obst);
446 /* If there are some parameters which shall be passed on the stack. */
449 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
452 * Reverse list of stack parameters if call arguments are from left to right.
453 * We must them reverse again in they are pushed (not stored) and the stack
454 * direction is downwards.
456 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
457 for(i = 0; i < n_pos >> 1; ++i) {
458 int other = n_pos - i - 1;
466 * If the stack is decreasing and we do not want to store sequentially,
467 * or someone else allocated the call frame
468 * we allocate as much space on the stack all parameters need, by
469 * moving the stack pointer along the stack's direction.
471 if(stack_dir < 0 && !do_seq && !no_alloc) {
472 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
475 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
476 for(i = 0; i < n_pos; ++i) {
478 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
479 ir_node *param = get_Call_param(irn, p);
480 ir_node *addr = curr_sp;
482 ir_type *param_type = get_method_param_type(mt, p);
483 int param_size = get_type_size_bytes(param_type) + arg->space_after;
486 * If we wanted to build the arguments sequentially,
487 * the stack pointer for the next must be incremented,
488 * and the memory value propagated.
492 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
493 param_size + arg->space_before, be_stack_dir_expand);
496 curr_ofs += arg->space_before;
497 curr_ofs = round_up2(curr_ofs, arg->alignment);
499 /* Make the expression to compute the argument's offset. */
501 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
502 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
506 /* Insert a store for primitive arguments. */
507 if (is_atomic_type(param_type)) {
508 mem = new_r_Store(irg, bl, curr_mem, addr, param);
509 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
512 /* Make a mem copy for compound arguments. */
514 assert(mode_is_reference(get_irn_mode(param)));
515 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
516 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
519 curr_ofs += param_size;
524 obstack_ptr_grow(obst, mem);
527 in = (ir_node **) obstack_finish(obst);
529 /* We need the sync only, if we didn't build the stores sequentially. */
531 curr_mem = new_r_Sync(irg, bl, n_pos, in);
532 obstack_free(obst, in);
535 /* Collect caller save registers */
536 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
538 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
539 for(j = 0; j < cls->n_regs; ++j) {
540 const arch_register_t *reg = arch_register_for_index(cls, j);
541 if(arch_register_type_is(reg, caller_save))
542 pset_insert_ptr(caller_save, (void *) reg);
546 /* search the greatest result proj number */
548 /* TODO: what if the result is NOT used? Currently there is
549 * no way to detect this later, especially there is no way to
550 * see this in the proj numbers.
551 * While this is ok for the register allocator, it is bad for
552 * backends which need to change the be_Call further (x87 simulator
553 * for instance. However for this particular case the call_type is
556 foreach_out_edge(irn, edge) {
557 const ir_edge_t *res_edge;
558 ir_node *irn = get_edge_src_irn(edge);
560 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
562 foreach_out_edge(irn, res_edge) {
564 be_abi_call_arg_t *arg;
565 ir_node *res = get_edge_src_irn(res_edge);
567 assert(is_Proj(res));
569 proj = get_Proj_proj(res);
570 arg = get_call_arg(call, 1, proj);
573 shift the proj number to the right, since we will drop the
574 unspeakable Proj_T from the Call. Therefore, all real argument
575 Proj numbers must be increased by pn_be_Call_first_res
577 proj += pn_be_Call_first_res;
578 set_Proj_proj(res, proj);
579 obstack_ptr_grow(obst, res);
581 if(proj > curr_res_proj)
582 curr_res_proj = proj;
584 pset_remove_ptr(caller_save, arg->reg);
585 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
592 obstack_ptr_grow(obst, NULL);
593 res_projs = obstack_finish(obst);
595 /* make the back end call node and set its register requirements. */
596 for(i = 0; i < n_low_args; ++i)
597 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
599 in = obstack_finish(obst);
601 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
602 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
603 curr_res_proj + pset_count(caller_save), n_low_args, in,
605 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
609 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
610 curr_res_proj + pset_count(caller_save), n_low_args, in,
615 Set the register class of the call address to the same as the stack pointer's.
616 That' probably buggy for some architectures.
618 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
620 /* Set the register classes and constraints of the Call parameters. */
621 for(i = 0; i < n_low_args; ++i) {
622 int index = low_args[i];
623 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
624 assert(arg->reg != NULL);
626 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
629 /* Set the register constraints of the results. */
630 for(i = 0; res_projs[i]; ++i) {
631 ir_node *irn = res_projs[i];
632 int proj = get_Proj_proj(irn);
634 /* Correct Proj number since it has been adjusted! (see above) */
635 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
638 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
640 obstack_free(obst, in);
641 exchange(irn, low_call);
643 /* redirect the result projs to the lowered call instead of the Proj_T */
644 for(i = 0; res_projs[i]; ++i)
645 set_Proj_pred(res_projs[i], low_call);
647 /* Make additional projs for the caller save registers
648 and the Keep node which keeps them alive. */
649 if(pset_count(caller_save) > 0) {
650 const arch_register_t *reg;
654 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
655 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
657 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
658 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
659 set_irn_link(proj, (void *) reg);
660 obstack_ptr_grow(obst, proj);
664 in = (ir_node **) obstack_finish(obst);
665 keep = be_new_Keep(NULL, irg, bl, n, in);
666 for(i = 0; i < n; ++i) {
667 const arch_register_t *reg = get_irn_link(in[i]);
668 be_node_set_reg_class(keep, i, reg->reg_class);
670 obstack_free(obst, in);
673 /* Clean up the stack. */
675 ir_node *mem_proj = NULL;
677 foreach_out_edge(low_call, edge) {
678 ir_node *irn = get_edge_src_irn(edge);
679 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
686 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
688 /* Clean up the stack frame if we allocated it */
690 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
693 be_abi_call_free(call);
694 obstack_free(obst, pos);
696 del_pset(caller_save);
703 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
705 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
707 if (get_Alloc_where(alloc) == stack_alloc) {
708 ir_node *bl = get_nodes_block(alloc);
709 ir_graph *irg = get_irn_irg(bl);
710 ir_node *alloc_mem = NULL;
711 ir_node *alloc_res = NULL;
713 const ir_edge_t *edge;
716 foreach_out_edge(alloc, edge) {
717 ir_node *irn = get_edge_src_irn(edge);
719 assert(is_Proj(irn));
720 switch(get_Proj_proj(irn)) {
732 /* Beware: currently Alloc nodes without a result might happen,
733 only escape analysis kills them and this phase runs only for object
734 oriented source. We kill the Alloc here. */
735 if (alloc_res == NULL) {
736 exchange(alloc_mem, get_Alloc_mem(alloc));
740 /* The stack pointer will be modified in an unknown manner.
741 We cannot omit it. */
742 env->call->flags.bits.try_omit_fp = 0;
743 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
745 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
747 if(alloc_mem != NULL)
748 exchange(alloc_mem, new_r_NoMem(irg));
757 * Walker for dependent_on().
758 * This function searches a node tgt recursively from a given node
759 * but is restricted to the given block.
760 * @return 1 if tgt was reachable from curr, 0 if not.
762 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
766 if(get_irn_visited(curr) >= visited_nr)
769 set_irn_visited(curr, visited_nr);
770 if(get_nodes_block(curr) != bl)
776 /* Phi functions stop the recursion inside a basic block */
778 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
779 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
788 * Check if a node is somehow data dependent on another one.
789 * both nodes must be in the same basic block.
790 * @param n1 The first node.
791 * @param n2 The second node.
792 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
794 static int dependent_on(ir_node *n1, ir_node *n2)
796 ir_node *bl = get_nodes_block(n1);
797 ir_graph *irg = get_irn_irg(bl);
798 long vis_nr = get_irg_visited(irg) + 1;
800 assert(bl == get_nodes_block(n2));
801 set_irg_visited(irg, vis_nr);
802 return check_dependence(n1, n2, bl, vis_nr);
805 static int cmp_call_dependecy(const void *c1, const void *c2)
807 ir_node *n1 = *(ir_node **) c1;
808 ir_node *n2 = *(ir_node **) c2;
811 Classical qsort() comparison function behavior:
812 0 if both elements are equal
813 1 if second is "smaller" that first
814 -1 if first is "smaller" that second
816 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
820 * Walker: links all Call nodes to the Block they are contained.
822 static void link_calls_in_block_walker(ir_node *irn, void *data)
825 be_abi_irg_t *env = data;
826 ir_node *bl = get_nodes_block(irn);
827 void *save = get_irn_link(bl);
829 env->call->flags.bits.irg_is_leaf = 0;
831 set_irn_link(irn, save);
832 set_irn_link(bl, irn);
838 * Process all Call nodes inside a basic block.
839 * Note that the link field of the block must contain a linked list of all
840 * Call nodes inside the Block. We first order this list according to data dependency
841 * and that connect the calls together.
843 static void process_calls_in_block(ir_node *bl, void *data)
845 be_abi_irg_t *env = data;
846 ir_node *curr_sp = env->init_sp;
850 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
851 obstack_ptr_grow(&env->obst, irn);
853 /* If there were call nodes in the block. */
859 nodes = obstack_finish(&env->obst);
861 /* order the call nodes according to data dependency */
862 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
864 for(i = n - 1; i >= 0; --i) {
865 ir_node *irn = nodes[i];
867 switch(get_irn_opcode(irn)) {
869 curr_sp = adjust_call(env, irn, curr_sp);
872 curr_sp = adjust_alloc(env, irn, curr_sp);
879 obstack_free(&env->obst, nodes);
881 /* Keep the last stack state in the block by tying it to Keep node */
883 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
884 pmap_insert(env->keep_map, bl, keep);
887 set_irn_link(bl, curr_sp);
891 * Adjust all call nodes in the graph to the ABI conventions.
893 static void process_calls(be_abi_irg_t *env)
895 ir_graph *irg = env->birg->irg;
897 env->call->flags.bits.irg_is_leaf = 1;
898 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
899 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
902 static void collect_return_walker(ir_node *irn, void *data)
904 if(get_irn_opcode(irn) == iro_Return) {
905 struct obstack *obst = data;
906 obstack_ptr_grow(obst, irn);
911 static ir_node *setup_frame(be_abi_irg_t *env)
913 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
914 const arch_register_t *sp = isa->sp;
915 const arch_register_t *bp = isa->bp;
916 be_abi_call_flags_bits_t flags = env->call->flags.bits;
917 ir_graph *irg = env->birg->irg;
918 ir_node *bl = get_irg_start_block(irg);
919 ir_node *no_mem = get_irg_no_mem(irg);
920 ir_node *old_frame = get_irg_frame(irg);
921 ir_node *stack = pmap_get(env->regs, (void *) sp);
922 ir_node *frame = pmap_get(env->regs, (void *) bp);
924 int stack_nr = get_Proj_proj(stack);
926 if(flags.try_omit_fp) {
927 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
932 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
934 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
936 be_set_constr_single_reg(frame, -1, bp);
937 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
938 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
941 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
944 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
945 env->init_sp = stack;
946 set_irg_frame(irg, frame);
947 edges_reroute(old_frame, frame, irg);
952 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
954 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
955 const arch_register_t *sp = isa->sp;
956 const arch_register_t *bp = isa->bp;
957 ir_graph *irg = env->birg->irg;
958 ir_node *ret_mem = get_Return_mem(ret);
959 ir_node *frame = get_irg_frame(irg);
960 ir_node *bl = get_nodes_block(ret);
961 ir_node *stack = get_irn_link(bl);
965 if(env->call->flags.bits.try_omit_fp) {
966 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
970 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
971 be_set_constr_single_reg(stack, -1, sp);
972 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
975 pmap_foreach(env->regs, ent) {
976 const arch_register_t *reg = ent->key;
977 ir_node *irn = ent->value;
980 obstack_ptr_grow(&env->obst, stack);
982 obstack_ptr_grow(&env->obst, frame);
983 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
984 obstack_ptr_grow(obst, irn);
991 * Computes the stack argument layout type.
992 * Changes a possibly allocated value param type by moving
993 * entities to the stack layout type.
995 * @param env the ABI environment
996 * @param call the current call ABI
997 * @param method_type the method type
999 * @return the stack argument layout type
1001 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
1003 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1004 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1005 int n = get_method_n_params(method_type);
1006 int curr = inc > 0 ? 0 : n - 1;
1012 ir_type *val_param_tp = get_method_value_param_type(method_type);
1013 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1015 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1016 for (i = 0; i < n; ++i, curr += inc) {
1017 ir_type *param_type = get_method_param_type(method_type, curr);
1018 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1020 if (arg->on_stack) {
1022 /* the entity was already created, move it to the param type */
1023 arg->stack_ent = get_method_value_param_ent(method_type, i);
1024 remove_struct_member(val_param_tp, arg->stack_ent);
1025 set_entity_owner(arg->stack_ent, res);
1026 add_struct_member(res, arg->stack_ent);
1027 /* must be automatic to set a fixed layout */
1028 set_entity_allocation(arg->stack_ent, allocation_automatic);
1031 snprintf(buf, sizeof(buf), "param_%d", i);
1032 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1034 ofs += arg->space_before;
1035 ofs = round_up2(ofs, arg->alignment);
1036 set_entity_offset_bytes(arg->stack_ent, ofs);
1037 ofs += arg->space_after;
1038 ofs += get_type_size_bytes(param_type);
1041 set_type_size_bytes(res, ofs);
1042 set_type_state(res, layout_fixed);
1046 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1049 struct obstack obst;
1051 obstack_init(&obst);
1053 /* Create a Perm after the RegParams node to delimit it. */
1054 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1055 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1060 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1061 const arch_register_t *reg = &cls->regs[j];
1062 ir_node *irn = pmap_get(regs, (void *) reg);
1064 if(irn && !arch_register_type_is(reg, ignore)) {
1066 obstack_ptr_grow(&obst, irn);
1067 set_irn_link(irn, (void *) reg);
1071 obstack_ptr_grow(&obst, NULL);
1072 in = obstack_finish(&obst);
1074 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1075 for(j = 0; j < n_regs; ++j) {
1076 ir_node *arg = in[j];
1077 arch_register_t *reg = get_irn_link(arg);
1078 pmap_insert(regs, reg, arg);
1079 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1082 obstack_free(&obst, in);
1085 obstack_free(&obst, NULL);
1089 const arch_register_t *reg;
1093 static int cmp_regs(const void *a, const void *b)
1095 const reg_node_map_t *p = a;
1096 const reg_node_map_t *q = b;
1098 if(p->reg->reg_class == q->reg->reg_class)
1099 return p->reg->index - q->reg->index;
1101 return p->reg->reg_class - q->reg->reg_class;
1104 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1107 int n = pmap_count(reg_map);
1109 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1111 pmap_foreach(reg_map, ent) {
1112 res[i].reg = ent->key;
1113 res[i].irn = ent->value;
1117 qsort(res, n, sizeof(res[0]), cmp_regs);
1122 * Creates a barrier.
1124 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1126 ir_graph *irg = env->birg->irg;
1127 int n_regs = pmap_count(regs);
1133 rm = reg_map_to_arr(&env->obst, regs);
1135 for(n = 0; n < n_regs; ++n)
1136 obstack_ptr_grow(&env->obst, rm[n].irn);
1139 obstack_ptr_grow(&env->obst, *mem);
1143 in = (ir_node **) obstack_finish(&env->obst);
1144 irn = be_new_Barrier(irg, bl, n, in);
1145 obstack_free(&env->obst, in);
1147 for(n = 0; n < n_regs; ++n) {
1148 const arch_register_t *reg = rm[n].reg;
1150 int pos = BE_OUT_POS(n);
1153 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1154 be_node_set_reg_class(irn, n, reg->reg_class);
1156 be_set_constr_single_reg(irn, n, reg);
1157 be_set_constr_single_reg(irn, pos, reg);
1158 be_node_set_reg_class(irn, pos, reg->reg_class);
1159 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1161 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1162 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1163 flags |= arch_irn_flags_ignore;
1165 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1166 flags |= arch_irn_flags_modify_sp;
1168 be_node_set_flags(irn, pos, flags);
1170 pmap_insert(regs, (void *) reg, proj);
1174 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1177 obstack_free(&env->obst, rm);
1182 * Creates a be_Return for a Return node.
1184 * @param @env the abi environment
1185 * @param irn the Return node or NULL if there was none
1186 * @param bl the block where the be_Retun should be placed
1187 * @param mem the current memory
1188 * @param n_res number of return results
1190 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1191 be_abi_call_t *call = env->call;
1192 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1194 pmap *reg_map = pmap_create();
1195 ir_node *keep = pmap_get(env->keep_map, bl);
1201 const arch_register_t **regs;
1205 get the valid stack node in this block.
1206 If we had a call in that block there is a Keep constructed by process_calls()
1207 which points to the last stack modification in that block. we'll use
1208 it then. Else we use the stack from the start block and let
1209 the ssa construction fix the usage.
1211 stack = be_abi_reg_map_get(env->regs, isa->sp);
1213 ir_node *bad = new_r_Bad(env->birg->irg);
1214 stack = get_irn_n(keep, 0);
1215 set_nodes_block(keep, bad);
1216 set_irn_n(keep, 0, bad);
1217 // exchange(keep, new_r_Bad(env->birg->irg));
1220 /* Insert results for Return into the register map. */
1221 for(i = 0; i < n_res; ++i) {
1222 ir_node *res = get_Return_res(irn, i);
1223 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1224 assert(arg->in_reg && "return value must be passed in register");
1225 pmap_insert(reg_map, (void *) arg->reg, res);
1228 /* Add uses of the callee save registers. */
1229 pmap_foreach(env->regs, ent) {
1230 const arch_register_t *reg = ent->key;
1231 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1232 pmap_insert(reg_map, ent->key, ent->value);
1235 be_abi_reg_map_set(reg_map, isa->sp, stack);
1237 /* Make the Epilogue node and call the arch's epilogue maker. */
1238 create_barrier(env, bl, &mem, reg_map, 1);
1239 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1242 Maximum size of the in array for Return nodes is
1243 return args + callee save/ignore registers + memory + stack pointer
1245 in_max = pmap_count(reg_map) + n_res + 2;
1247 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1248 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1251 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1256 /* clear SP entry, since it has already been grown. */
1257 pmap_insert(reg_map, (void *) isa->sp, NULL);
1258 for(i = 0; i < n_res; ++i) {
1259 ir_node *res = get_Return_res(irn, i);
1260 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1262 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1263 regs[n++] = arg->reg;
1265 /* Clear the map entry to mark the register as processed. */
1266 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1269 /* grow the rest of the stuff. */
1270 pmap_foreach(reg_map, ent) {
1273 regs[n++] = ent->key;
1277 /* The in array for the new back end return is now ready. */
1278 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1280 /* Set the register classes of the return's parameter accordingly. */
1281 for(i = 0; i < n; ++i)
1283 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1285 /* Free the space of the Epilog's in array and the register <-> proj map. */
1286 obstack_free(&env->obst, in);
1287 pmap_destroy(reg_map);
1292 typedef struct lower_frame_sels_env_t {
1294 entity *value_param_list; /**< the list of all value param antities */
1295 } lower_frame_sels_env_t;
1298 * Walker: Replaces Sels of frame type and
1299 * value param type entities by FrameAddress.
1301 static void lower_frame_sels_walker(ir_node *irn, void *data)
1303 lower_frame_sels_env_t *ctx = data;
1306 ir_graph *irg = current_ir_graph;
1307 ir_node *frame = get_irg_frame(irg);
1308 ir_node *param_base = get_irg_value_param_base(irg);
1309 ir_node *ptr = get_Sel_ptr(irn);
1311 if (ptr == frame || ptr == param_base) {
1312 be_abi_irg_t *env = ctx->env;
1313 entity *ent = get_Sel_entity(irn);
1314 ir_node *bl = get_nodes_block(irn);
1317 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1320 if (ptr == param_base) {
1321 set_entity_link(ent, ctx->value_param_list);
1322 ctx->value_param_list = ent;
1329 * Check if a value parameter is transmitted as a register.
1330 * This might happen if the address of an parameter is taken which is
1331 * transmitted in registers.
1333 * Note that on some architectures this case must be handled specially
1334 * because the place of the backing store is determined by their ABI.
1336 * In the default case we move the entity to the frame type and create
1337 * a backing store into the first block.
1339 static void fix_address_of_parameter_access(be_abi_irg_t *env, entity *value_param_list) {
1340 be_abi_call_t *call = env->call;
1341 ir_graph *irg = env->birg->irg;
1342 entity *ent, *next_ent, *new_list;
1344 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1347 for (ent = value_param_list; ent; ent = next_ent) {
1348 int i = get_struct_member_index(get_entity_owner(ent), ent);
1349 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1351 next_ent = get_entity_link(ent);
1353 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1354 set_entity_link(ent, new_list);
1359 /* ok, change the graph */
1360 ir_node *start_bl = get_irg_start_block(irg);
1361 ir_node *first_bl = NULL;
1362 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1363 const ir_edge_t *edge;
1364 optimization_state_t state;
1367 foreach_block_succ(start_bl, edge) {
1368 ir_node *succ = get_edge_src_irn(edge);
1369 if (start_bl != succ) {
1375 /* we had already removed critical edges, so the following
1376 assertion should be always true. */
1377 assert(get_Block_n_cfgpreds(first_bl) == 1);
1379 /* now create backing stores */
1380 frame = get_irg_frame(irg);
1381 imem = get_irg_initial_mem(irg);
1383 save_optimization_state(&state);
1385 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1386 restore_optimization_state(&state);
1388 /* reroute all edges to the new memory source */
1389 edges_reroute(imem, nmem, irg);
1393 args = get_irg_args(irg);
1394 args_bl = get_nodes_block(args);
1395 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1396 int i = get_struct_member_index(get_entity_owner(ent), ent);
1397 ir_type *tp = get_entity_type(ent);
1398 ir_mode *mode = get_type_mode(tp);
1401 /* address for the backing store */
1402 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1405 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1407 /* the backing store itself */
1408 store = new_r_Store(irg, first_bl, mem, addr,
1409 new_r_Proj(irg, args_bl, args, mode, i));
1411 /* the new memory Proj gets the last Proj from store */
1412 set_Proj_pred(nmem, store);
1413 set_Proj_proj(nmem, pn_Store_M);
1415 /* move all entities to the frame type */
1416 frame_tp = get_irg_frame_type(irg);
1417 offset = get_type_size_bytes(frame_tp);
1418 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1419 ir_type *tp = get_entity_type(ent);
1420 int align = get_type_alignment_bytes(tp);
1422 offset += align - 1;
1424 set_entity_owner(ent, frame_tp);
1425 add_class_member(frame_tp, ent);
1426 /* must be automatic to set a fixed layout */
1427 set_entity_allocation(ent, allocation_automatic);
1428 set_entity_offset_bytes(ent, offset);
1429 offset += get_type_size_bytes(tp);
1431 set_type_size_bytes(frame_tp, offset);
1436 * Modify the irg itself and the frame type.
1438 static void modify_irg(be_abi_irg_t *env)
1440 be_abi_call_t *call = env->call;
1441 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1442 const arch_register_t *sp = arch_isa_sp(isa);
1443 ir_graph *irg = env->birg->irg;
1444 ir_node *bl = get_irg_start_block(irg);
1445 ir_node *end = get_irg_end_block(irg);
1446 ir_node *no_mem = get_irg_no_mem(irg);
1447 ir_node *mem = get_irg_initial_mem(irg);
1448 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1449 pset *dont_save = pset_new_ptr(8);
1455 const arch_register_t *fp_reg;
1456 ir_node *frame_pointer;
1458 ir_node *reg_params_bl;
1461 const ir_edge_t *edge;
1462 ir_type *arg_type, *bet_type;
1463 lower_frame_sels_env_t ctx;
1465 bitset_t *used_proj_nr;
1466 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1468 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1470 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1472 ctx.value_param_list = NULL;
1473 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1475 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1476 env->regs = pmap_create();
1478 used_proj_nr = bitset_alloca(1024);
1479 n_params = get_method_n_params(method_type);
1480 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1481 memset(args, 0, n_params * sizeof(args[0]));
1483 /* Check if a value parameter is transmitted as a register.
1484 * This might happen if the address of an parameter is taken which is
1485 * transmitted in registers.
1487 * Note that on some architectures this case must be handled specially
1488 * because the place of the backing store is determined by their ABI.
1490 * In the default case we move the entity to the frame type and create
1491 * a backing store into the first block.
1493 fix_address_of_parameter_access(env, ctx.value_param_list);
1495 /* Fill the argument vector */
1496 arg_tuple = get_irg_args(irg);
1497 foreach_out_edge(arg_tuple, edge) {
1498 ir_node *irn = get_edge_src_irn(edge);
1499 int nr = get_Proj_proj(irn);
1501 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1504 arg_type = compute_arg_type(env, call, method_type);
1505 bet_type = call->cb->get_between_type(env->cb);
1506 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1508 /* Count the register params and add them to the number of Projs for the RegParams node */
1509 for(i = 0; i < n_params; ++i) {
1510 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1511 if(arg->in_reg && args[i]) {
1512 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1513 assert(i == get_Proj_proj(args[i]));
1515 /* For now, associate the register with the old Proj from Start representing that argument. */
1516 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1517 bitset_set(used_proj_nr, i);
1518 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1522 /* Collect all callee-save registers */
1523 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1524 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1525 for(j = 0; j < cls->n_regs; ++j) {
1526 const arch_register_t *reg = &cls->regs[j];
1527 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1528 pmap_insert(env->regs, (void *) reg, NULL);
1532 pmap_insert(env->regs, (void *) sp, NULL);
1533 pmap_insert(env->regs, (void *) isa->bp, NULL);
1534 reg_params_bl = get_irg_start_block(irg);
1535 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1538 * make proj nodes for the callee save registers.
1539 * memorize them, since Return nodes get those as inputs.
1541 * Note, that if a register corresponds to an argument, the regs map contains
1542 * the old Proj from start for that argument.
1545 rm = reg_map_to_arr(&env->obst, env->regs);
1546 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1547 arch_register_t *reg = (void *) rm[i].reg;
1548 ir_node *arg_proj = rm[i].irn;
1549 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1551 int pos = BE_OUT_POS((int) nr);
1557 bitset_set(used_proj_nr, nr);
1558 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1559 pmap_insert(env->regs, (void *) reg, proj);
1560 be_set_constr_single_reg(env->reg_params, pos, reg);
1561 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1564 * If the register is an ignore register,
1565 * The Proj for that register shall also be ignored during register allocation.
1567 if(arch_register_type_is(reg, ignore))
1568 flags |= arch_irn_flags_ignore;
1571 flags |= arch_irn_flags_modify_sp;
1573 be_node_set_flags(env->reg_params, pos, flags);
1575 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1577 obstack_free(&env->obst, rm);
1579 /* Generate the Prologue */
1580 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1582 /* do the stack allocation BEFORE the barrier, or spill code
1583 might be added before it */
1584 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1585 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1586 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1588 barrier = create_barrier(env, bl, &mem, env->regs, 0);
1590 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1591 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1593 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1594 set_irg_frame(irg, frame_pointer);
1595 pset_insert_ptr(env->ignore_regs, fp_reg);
1597 /* Now, introduce stack param nodes for all parameters passed on the stack */
1598 for(i = 0; i < n_params; ++i) {
1599 ir_node *arg_proj = args[i];
1600 ir_node *repl = NULL;
1602 if(arg_proj != NULL) {
1603 be_abi_call_arg_t *arg;
1604 ir_type *param_type;
1605 int nr = get_Proj_proj(arg_proj);
1607 nr = MIN(nr, n_params);
1608 arg = get_call_arg(call, 0, nr);
1609 param_type = get_method_param_type(method_type, nr);
1612 repl = pmap_get(env->regs, (void *) arg->reg);
1615 else if(arg->on_stack) {
1616 /* For atomic parameters which are actually used, we create a StackParam node. */
1617 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1618 ir_mode *mode = get_type_mode(param_type);
1619 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1620 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1623 /* The stack parameter is not primitive (it is a struct or array),
1624 we thus will create a node representing the parameter's address
1627 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1631 assert(repl != NULL);
1632 edges_reroute(args[i], repl, irg);
1636 /* All Return nodes hang on the End node, so look for them there. */
1637 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1638 ir_node *irn = get_Block_cfgpred(end, i);
1640 if (is_Return(irn)) {
1641 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1645 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return than,
1646 the code is dead and will never be executed. */
1648 del_pset(dont_save);
1649 obstack_free(&env->obst, args);
1653 * Walker: puts all Alloc(stack_alloc) on a obstack
1655 static void collect_alloca_walker(ir_node *irn, void *data)
1657 be_abi_irg_t *env = data;
1658 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1659 obstack_ptr_grow(&env->obst, irn);
1662 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1664 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1665 ir_node *old_frame = get_irg_frame(birg->irg);
1666 ir_graph *irg = birg->irg;
1670 optimization_state_t state;
1672 obstack_init(&env->obst);
1674 env->isa = birg->main_env->arch_env->isa;
1675 env->method_type = get_entity_type(get_irg_entity(irg));
1676 env->call = be_abi_call_new();
1677 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1679 env->ignore_regs = pset_new_ptr_default();
1680 env->keep_map = pmap_create();
1681 env->dce_survivor = new_survive_dce();
1683 env->stack_phis = pset_new_ptr(16);
1684 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1685 to another Unknown or the stack pointer gets used */
1686 save_optimization_state(&state);
1688 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1689 restore_optimization_state(&state);
1690 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1692 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1694 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1695 env->irn_ops.impl = &abi_irn_ops;
1697 /* Lower all call nodes in the IRG. */
1700 /* Process the IRG */
1703 /* We don't need the keep map anymore. */
1704 pmap_destroy(env->keep_map);
1706 /* reroute the stack origin of the calls to the true stack origin. */
1707 edges_reroute(dummy, env->init_sp, irg);
1708 edges_reroute(old_frame, get_irg_frame(irg), irg);
1710 /* Make some important node pointers survive the dead node elimination. */
1711 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1712 pmap_foreach(env->regs, ent)
1713 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1715 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1717 env->call->cb->done(env->cb);
1722 void be_abi_free(be_abi_irg_t *env)
1724 free_survive_dce(env->dce_survivor);
1725 del_pset(env->stack_phis);
1726 del_pset(env->ignore_regs);
1727 pmap_destroy(env->regs);
1728 obstack_free(&env->obst, NULL);
1729 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1733 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
1735 arch_register_t *reg;
1737 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
1738 if(reg->reg_class == cls)
1739 bitset_set(bs, reg->index);
1746 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1747 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1748 | _| | |> < ___) | || (_| | (__| <
1749 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1753 struct fix_stack_walker_info {
1755 const arch_env_t *aenv;
1759 * Walker. Collect all stack modifying nodes.
1761 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1763 struct fix_stack_walker_info *info = data;
1765 if(arch_irn_is(info->aenv, irn, modify_sp))
1766 pset_insert_ptr(info->nodes, irn);
1769 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1771 dom_front_info_t *df;
1772 pset *stack_nodes = pset_new_ptr(16);
1773 struct fix_stack_walker_info info;
1775 info.nodes = stack_nodes;
1776 info.aenv = env->birg->main_env->arch_env;
1778 /* We need dominance frontiers for fix up */
1779 df = be_compute_dominance_frontiers(env->birg->irg);
1780 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
1781 pset_insert_ptr(stack_nodes, env->init_sp);
1782 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1783 del_pset(stack_nodes);
1785 /* Liveness could have changed due to Phi nodes. */
1786 be_liveness(env->birg->irg);
1788 /* free these dominance frontiers */
1789 be_free_dominance_frontiers(df);
1793 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1794 * into -1 or 1, respectively.
1795 * @param irn The node.
1796 * @return 1, if the direction of the IncSP was along, -1 if against.
1798 static int get_dir(ir_node *irn)
1800 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1803 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1805 const arch_env_t *aenv = env->birg->main_env->arch_env;
1806 int omit_fp = env->call->flags.bits.try_omit_fp;
1809 sched_foreach(bl, irn) {
1812 If the node modifies the stack pointer by a constant offset,
1813 record that in the bias.
1815 if(be_is_IncSP(irn)) {
1816 int ofs = be_get_IncSP_offset(irn);
1817 int dir = get_dir(irn);
1819 if(ofs == BE_STACK_FRAME_SIZE) {
1820 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1821 be_set_IncSP_offset(irn, ofs);
1829 Else check, if the node relates to an entity on the stack frame.
1830 If so, set the true offset (including the bias) for that
1834 entity *ent = arch_get_frame_entity(aenv, irn);
1836 int offset = get_stack_entity_offset(env->frame, ent, bias);
1837 arch_set_frame_offset(aenv, irn, offset);
1838 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1847 * A helper struct for the bias walker.
1850 be_abi_irg_t *env; /**< The ABI irg environment. */
1851 int start_block_bias; /**< The bias at the end of the start block. */
1852 ir_node *start_block; /**< The start block of the current graph. */
1856 * Block-Walker: fix all stack offsets
1858 static void stack_bias_walker(ir_node *bl, void *data)
1860 struct bias_walk *bw = data;
1861 if (bl != bw->start_block) {
1862 process_stack_bias(bw->env, bl, bw->start_block_bias);
1866 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1868 ir_graph *irg = env->birg->irg;
1869 struct bias_walk bw;
1871 stack_frame_compute_initial_offset(env->frame);
1872 // stack_layout_dump(stdout, env->frame);
1874 /* Determine the stack bias at the end of the start block. */
1875 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1877 /* fix the bias is all other blocks */
1879 bw.start_block = get_irg_start_block(irg);
1880 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1883 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1885 assert(arch_register_type_is(reg, callee_save));
1886 assert(pmap_contains(abi->regs, (void *) reg));
1887 return pmap_get(abi->regs, (void *) reg);
1891 _____ _____ _ _ _ _ _ _
1892 |_ _| __ \| \ | | | | | | | | |
1893 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1894 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1895 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1896 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1898 for Phi nodes which are created due to stack modifying nodes
1899 such as IncSP, AddSP and SetSP.
1901 These Phis are always to be ignored by the reg alloc and are
1902 fixed on the SP register of the ISA.
1905 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1907 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1908 const void *res = NULL;
1910 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1911 res = &abi->irn_ops;
1916 static void be_abi_limited(void *data, bitset_t *bs)
1918 be_abi_irg_t *abi = data;
1919 bitset_clear_all(bs);
1920 bitset_set(bs, abi->isa->sp->index);
1923 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1925 be_abi_irg_t *abi = get_abi_from_ops(self);
1926 const arch_register_t *reg = abi->isa->sp;
1928 memset(req, 0, sizeof(req[0]));
1930 if(pos == BE_OUT_POS(0)) {
1931 req->cls = reg->reg_class;
1932 req->type = arch_register_req_type_limited;
1933 req->limited = be_abi_limited;
1934 req->limited_env = abi;
1937 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1938 req->cls = reg->reg_class;
1939 req->type = arch_register_req_type_normal;
1945 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1949 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1951 const be_abi_irg_t *abi = get_abi_from_ops(self);
1952 return abi->isa->sp;
1955 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1957 return arch_irn_class_normal;
1960 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1962 return arch_irn_flags_ignore | arch_irn_flags_modify_sp;
1965 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1970 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1974 static const arch_irn_ops_if_t abi_irn_ops = {
1975 abi_get_irn_reg_req,
1980 abi_get_frame_entity,
1984 static const arch_irn_handler_t abi_irn_handler = {