4 * @author Sebastian Hack
19 #include "irgraph_t.h"
22 #include "iredges_t.h"
25 #include "irprintf_t.h"
33 #include "besched_t.h"
35 #define MAX(x, y) ((x) > (y) ? (x) : (y))
36 #define MIN(x, y) ((x) < (y) ? (x) : (y))
38 typedef struct _be_abi_call_arg_t {
39 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
40 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
41 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
44 const arch_register_t *reg;
47 unsigned space_before;
51 struct _be_abi_call_t {
52 be_abi_call_flags_t flags;
53 const be_abi_callbacks_t *cb;
54 ir_type *between_type;
58 #define N_FRAME_TYPES 3
61 * This type describes the stack layout.
62 * The stack is divided into 3 parts:
63 * - arg_type: A struct type describing the stack arguments and it's order.
64 * - between_type: A struct type describing the stack layout between arguments
66 * - frame_type: A class type descibing the frame layout
68 typedef struct _be_stack_layout_t {
69 ir_type *arg_type; /**< A type describing the stack argument layout. */
70 ir_type *between_type; /**< A type describing the "between" layout. */
71 ir_type *frame_type; /**< The frame type. */
73 ir_type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
76 int stack_dir; /**< -1 for decreasing, 1 for increasing. */
79 struct _be_abi_irg_t {
81 be_stack_layout_t *frame; /**< The stack frame model. */
82 const be_irg_t *birg; /**< The back end IRG. */
83 const arch_isa_t *isa; /**< The isa. */
84 survive_dce_t *dce_survivor;
86 be_abi_call_t *call; /**< The ABI call information. */
87 ir_type *method_type; /**< The type of the method of the IRG. */
89 ir_node *init_sp; /**< The node representing the stack pointer
90 at the start of the function. */
92 ir_node *reg_params; /**< The reg params node. */
93 pmap *regs; /**< A map of all callee-save and ignore regs to
94 their Projs to the RegParams node. */
96 pset *stack_phis; /**< The set of all Phi nodes inserted due to
97 stack pointer modifying nodes. */
99 int start_block_bias; /**< The stack bias at the end of the start block. */
101 void *cb; /**< ABI Callback self pointer. */
103 pmap *keep_map; /**< mapping blocks to keep nodes. */
104 pset *ignore_regs; /**< Additional registers which shall be ignored. */
106 arch_irn_handler_t irn_handler;
107 arch_irn_ops_t irn_ops;
108 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
111 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
112 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
114 /* Forward, since be need it in be_abi_introduce(). */
115 static const arch_irn_ops_if_t abi_irn_ops;
116 static const arch_irn_handler_t abi_irn_handler;
118 /* Flag: if set, try to omit the frame pointer if called by the backend */
122 _ ____ ___ ____ _ _ _ _
123 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
124 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
125 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
126 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
128 These callbacks are used by the backend to set the parameters
129 for a specific call type.
133 * Set compare function: compares two ABI call object arguments.
135 static int cmp_call_arg(const void *a, const void *b, size_t n)
137 const be_abi_call_arg_t *p = a, *q = b;
138 return !(p->is_res == q->is_res && p->pos == q->pos);
142 * Get or set an ABI call object argument.
144 * @param call the abi call
145 * @param is_res true for call results, false for call arguments
146 * @param pos position of the argument
147 * @param do_insert true if the argument is set, false if it's retrieved
149 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
151 be_abi_call_arg_t arg;
154 memset(&arg, 0, sizeof(arg));
158 hash = is_res * 128 + pos;
161 ? set_insert(call->params, &arg, sizeof(arg), hash)
162 : set_find(call->params, &arg, sizeof(arg), hash);
166 * Retrieve an ABI call object argument.
168 * @param call the ABI call object
169 * @param is_res true for call results, false for call arguments
170 * @param pos position of the argument
172 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
174 return get_or_set_call_arg(call, is_res, pos, 0);
177 /* Set the flags for a call. */
178 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
184 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
186 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
188 arg->alignment = alignment;
189 arg->space_before = space_before;
190 arg->space_after = space_after;
191 assert(alignment > 0 && "Alignment must be greater than 0");
194 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
196 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
201 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
203 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
208 /* Get the flags of a ABI call object. */
209 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
215 * Constructor for a new ABI call object.
217 * @return the new ABI call object
219 static be_abi_call_t *be_abi_call_new()
221 be_abi_call_t *call = xmalloc(sizeof(call[0]));
223 call->params = new_set(cmp_call_arg, 16);
226 call->flags.bits.try_omit_fp = be_omit_fp;
231 * Destructor for an ABI call object.
233 static void be_abi_call_free(be_abi_call_t *call)
235 del_set(call->params);
241 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
242 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
243 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
244 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
247 Handling of the stack frame. It is composed of three types:
248 1) The type of the arguments which are pushed on the stack.
249 2) The "between type" which consists of stuff the call of the
250 function pushes on the stack (like the return address and
251 the old base pointer for ia32).
252 3) The Firm frame type which consists of all local variables
256 static int get_stack_entity_offset(be_stack_layout_t *frame, entity *ent, int bias)
258 ir_type *t = get_entity_owner(ent);
259 int ofs = get_entity_offset_bytes(ent);
263 /* Find the type the entity is contained in. */
264 for(index = 0; index < N_FRAME_TYPES; ++index) {
265 if(frame->order[index] == t)
269 /* Add the size of all the types below the one of the entity to the entity's offset */
270 for(i = 0; i < index; ++i)
271 ofs += get_type_size_bytes(frame->order[i]);
273 /* correct the offset by the initial position of the frame pointer */
274 ofs -= frame->initial_offset;
276 /* correct the offset with the current bias. */
283 * Retrieve the entity with given offset from a frame type.
285 static entity *search_ent_with_offset(ir_type *t, int offset)
289 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
290 entity *ent = get_compound_member(t, i);
291 if(get_entity_offset_bytes(ent) == offset)
298 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
300 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
301 entity *ent = search_ent_with_offset(base, 0);
302 frame->initial_offset = 0;
303 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
304 return frame->initial_offset;
308 * Initializes the frame layout from parts
310 * @param frame the stack layout that will be initialized
311 * @param args the stack argument layout type
312 * @param between the between layout type
313 * @param locals the method frame type
314 * @param stack_dir the stack direction
316 * @return the initialized stack layout
318 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
319 ir_type *between, ir_type *locals, int stack_dir)
321 frame->arg_type = args;
322 frame->between_type = between;
323 frame->frame_type = locals;
324 frame->initial_offset = 0;
325 frame->stack_dir = stack_dir;
326 frame->order[1] = between;
329 frame->order[0] = args;
330 frame->order[2] = locals;
333 frame->order[0] = locals;
334 frame->order[2] = args;
339 /** Dumps the stack layout to file. */
340 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
344 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
345 for (j = 0; j < N_FRAME_TYPES; ++j) {
346 ir_type *t = frame->order[j];
348 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
349 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
350 entity *ent = get_compound_member(t, i);
351 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
357 * Returns non-zero if the call argument at given position
358 * is transfered on the stack.
360 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
362 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
363 return arg && !arg->in_reg;
373 Adjustment of the calls inside a graph.
378 * Transform a call node.
379 * @param env The ABI environment for the current irg.
380 * @param irn The call node.
381 * @param curr_sp The stack pointer node to use.
382 * @return The stack pointer after the call.
384 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
386 ir_graph *irg = env->birg->irg;
387 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
388 be_abi_call_t *call = be_abi_call_new();
389 ir_type *mt = get_Call_type(irn);
390 ir_node *call_ptr = get_Call_ptr(irn);
391 int n_params = get_method_n_params(mt);
392 ir_node *curr_mem = get_Call_mem(irn);
393 ir_node *bl = get_nodes_block(irn);
394 pset *results = pset_new_ptr(8);
395 pset *caller_save = pset_new_ptr(8);
397 int stack_dir = arch_isa_stack_dir(isa);
398 const arch_register_t *sp = arch_isa_sp(isa);
399 ir_mode *mach_mode = sp->reg_class->mode;
400 struct obstack *obst = &env->obst;
401 ir_node *no_mem = get_irg_no_mem(irg);
402 int no_alloc = call->flags.bits.frame_is_setup_on_call;
404 ir_node *res_proj = NULL;
405 int curr_res_proj = pn_Call_max;
412 const ir_edge_t *edge;
417 /* Let the isa fill out the abi description for that call node. */
418 arch_isa_get_call_abi(isa, mt, call);
420 /* Insert code to put the stack arguments on the stack. */
421 assert(get_Call_n_params(irn) == n_params);
422 for(i = 0; i < n_params; ++i) {
423 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
426 stack_size += arg->space_before;
427 stack_size = round_up2(stack_size, arg->alignment);
428 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
429 stack_size += arg->space_after;
430 obstack_int_grow(obst, i);
434 pos = obstack_finish(obst);
436 /* Collect all arguments which are passed in registers. */
437 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
438 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
439 if(arg && arg->in_reg) {
440 obstack_int_grow(obst, i);
444 low_args = obstack_finish(obst);
446 /* If there are some parameters which shall be passed on the stack. */
449 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
452 * Reverse list of stack parameters if call arguments are from left to right.
453 * We must them reverse again in they are pushed (not stored) and the stack
454 * direction is downwards.
456 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
457 for(i = 0; i < n_pos >> 1; ++i) {
458 int other = n_pos - i - 1;
466 * If the stack is decreasing and we do not want to store sequentially,
467 * or someone else allocated the call frame
468 * we allocate as much space on the stack all parameters need, by
469 * moving the stack pointer along the stack's direction.
471 if(stack_dir < 0 && !do_seq && !no_alloc) {
472 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
475 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
476 for(i = 0; i < n_pos; ++i) {
478 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
479 ir_node *param = get_Call_param(irn, p);
480 ir_node *addr = curr_sp;
482 ir_type *param_type = get_method_param_type(mt, p);
483 int param_size = get_type_size_bytes(param_type) + arg->space_after;
486 * If we wanted to build the arguments sequentially,
487 * the stack pointer for the next must be incremented,
488 * and the memory value propagated.
492 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
493 param_size + arg->space_before, be_stack_dir_expand);
496 curr_ofs += arg->space_before;
497 curr_ofs = round_up2(curr_ofs, arg->alignment);
499 /* Make the expression to compute the argument's offset. */
501 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
502 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
506 /* Insert a store for primitive arguments. */
507 if (is_atomic_type(param_type)) {
508 mem = new_r_Store(irg, bl, curr_mem, addr, param);
509 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
512 /* Make a mem copy for compound arguments. */
514 assert(mode_is_reference(get_irn_mode(param)));
515 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
516 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
519 curr_ofs += param_size;
524 obstack_ptr_grow(obst, mem);
527 in = (ir_node **) obstack_finish(obst);
529 /* We need the sync only, if we didn't build the stores sequentially. */
531 curr_mem = new_r_Sync(irg, bl, n_pos, in);
532 obstack_free(obst, in);
535 /* Collect caller save registers */
536 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
538 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
539 for(j = 0; j < cls->n_regs; ++j) {
540 const arch_register_t *reg = arch_register_for_index(cls, j);
541 if(arch_register_type_is(reg, caller_save))
542 pset_insert_ptr(caller_save, (void *) reg);
546 /* search the greatest result proj number */
548 /* TODO: what if the result is NOT used? Currently there is
549 * no way to detect this later, especially there is no way to
550 * see this in the proj numbers.
551 * While this is ok for the register allocator, it is bad for
552 * backends which need to change the be_Call further (x87 simulator
553 * for instance. However for this particular case the call_type is
556 foreach_out_edge(irn, edge) {
557 const ir_edge_t *res_edge;
558 ir_node *irn = get_edge_src_irn(edge);
560 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
562 foreach_out_edge(irn, res_edge) {
564 be_abi_call_arg_t *arg;
565 ir_node *res = get_edge_src_irn(res_edge);
567 assert(is_Proj(res));
569 proj = get_Proj_proj(res);
570 arg = get_call_arg(call, 1, proj);
573 shift the proj number to the right, since we will drop the
574 unspeakable Proj_T from the Call. Therefore, all real argument
575 Proj numbers must be increased by pn_be_Call_first_res
577 proj += pn_be_Call_first_res;
578 set_Proj_proj(res, proj);
579 obstack_ptr_grow(obst, res);
581 if(proj > curr_res_proj)
582 curr_res_proj = proj;
584 pset_remove_ptr(caller_save, arg->reg);
585 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
592 obstack_ptr_grow(obst, NULL);
593 res_projs = obstack_finish(obst);
595 /* make the back end call node and set its register requirements. */
596 for(i = 0; i < n_low_args; ++i)
597 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
599 in = obstack_finish(obst);
601 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
602 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
603 curr_res_proj + pset_count(caller_save), n_low_args, in,
605 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
609 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
610 curr_res_proj + pset_count(caller_save), n_low_args, in,
615 Set the register class of the call address to the same as the stack pointer's.
616 That' probably buggy for some architectures.
618 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
620 /* Set the register classes and constraints of the Call parameters. */
621 for(i = 0; i < n_low_args; ++i) {
622 int index = low_args[i];
623 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
624 assert(arg->reg != NULL);
626 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
629 /* Set the register constraints of the results. */
630 for(i = 0; res_projs[i]; ++i) {
631 ir_node *irn = res_projs[i];
632 int proj = get_Proj_proj(irn);
634 /* Correct Proj number since it has been adjusted! (see above) */
635 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
638 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
640 obstack_free(obst, in);
641 exchange(irn, low_call);
643 /* redirect the result projs to the lowered call instead of the Proj_T */
644 for(i = 0; res_projs[i]; ++i)
645 set_Proj_pred(res_projs[i], low_call);
647 /* Make additional projs for the caller save registers
648 and the Keep node which keeps them alive. */
649 if(pset_count(caller_save) > 0) {
650 const arch_register_t *reg;
654 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
655 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
657 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
658 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
659 set_irn_link(proj, (void *) reg);
660 obstack_ptr_grow(obst, proj);
664 in = (ir_node **) obstack_finish(obst);
665 keep = be_new_Keep(NULL, irg, bl, n, in);
666 for(i = 0; i < n; ++i) {
667 const arch_register_t *reg = get_irn_link(in[i]);
668 be_node_set_reg_class(keep, i, reg->reg_class);
670 obstack_free(obst, in);
673 /* Clean up the stack. */
675 ir_node *mem_proj = NULL;
677 foreach_out_edge(low_call, edge) {
678 ir_node *irn = get_edge_src_irn(edge);
679 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
686 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
688 /* Clean up the stack frame if we allocated it */
690 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
693 be_abi_call_free(call);
694 obstack_free(obst, pos);
696 del_pset(caller_save);
703 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
705 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
707 if (get_Alloc_where(alloc) == stack_alloc) {
708 ir_node *bl = get_nodes_block(alloc);
709 ir_graph *irg = get_irn_irg(bl);
710 ir_node *alloc_mem = NULL;
711 ir_node *alloc_res = NULL;
713 const ir_edge_t *edge;
716 foreach_out_edge(alloc, edge) {
717 ir_node *irn = get_edge_src_irn(edge);
719 assert(is_Proj(irn));
720 switch(get_Proj_proj(irn)) {
732 /* Beware: currently Alloc nodes without a result might happen,
733 only escape analysis kills them and this phase runs only for object
734 oriented source. We kill the Alloc here. */
735 if (alloc_res == NULL) {
736 exchange(alloc_mem, get_Alloc_mem(alloc));
740 /* The stack pointer will be modified in an unknown manner.
741 We cannot omit it. */
742 env->call->flags.bits.try_omit_fp = 0;
743 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
745 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
747 if(alloc_mem != NULL)
748 exchange(alloc_mem, new_r_NoMem(irg));
757 * Walker for dependent_on().
758 * This function searches a node tgt recursively from a given node
759 * but is restricted to the given block.
760 * @return 1 if tgt was reachable from curr, 0 if not.
762 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
766 if(get_irn_visited(curr) >= visited_nr)
769 set_irn_visited(curr, visited_nr);
770 if(get_nodes_block(curr) != bl)
776 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
777 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
785 * Check if a node is somehow data dependent on another one.
786 * both nodes must be in the same basic block.
787 * @param n1 The first node.
788 * @param n2 The second node.
789 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
791 static int dependent_on(ir_node *n1, ir_node *n2)
793 ir_node *bl = get_nodes_block(n1);
794 ir_graph *irg = get_irn_irg(bl);
795 long vis_nr = get_irg_visited(irg) + 1;
797 assert(bl == get_nodes_block(n2));
798 set_irg_visited(irg, vis_nr);
799 return check_dependence(n1, n2, bl, vis_nr);
802 static int cmp_call_dependecy(const void *c1, const void *c2)
804 ir_node *n1 = *(ir_node **) c1;
805 ir_node *n2 = *(ir_node **) c2;
808 Classical qsort() comparison function behavior:
809 0 if both elements are equal
810 1 if second is "smaller" that first
811 -1 if first is "smaller" that second
813 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
817 * Walker: links all Call nodes to the Block they are contained.
819 static void link_calls_in_block_walker(ir_node *irn, void *data)
822 be_abi_irg_t *env = data;
823 ir_node *bl = get_nodes_block(irn);
824 void *save = get_irn_link(bl);
826 env->call->flags.bits.irg_is_leaf = 0;
828 set_irn_link(irn, save);
829 set_irn_link(bl, irn);
835 * Process all Call nodes inside a basic block.
836 * Note that the link field of the block must contain a linked list of all
837 * Call nodes inside the Block. We first order this list according to data dependency
838 * and that connect the calls together.
840 static void process_calls_in_block(ir_node *bl, void *data)
842 be_abi_irg_t *env = data;
843 ir_node *curr_sp = env->init_sp;
847 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
848 obstack_ptr_grow(&env->obst, irn);
850 /* If there were call nodes in the block. */
856 nodes = obstack_finish(&env->obst);
858 /* order the call nodes according to data dependency */
859 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
861 for(i = n - 1; i >= 0; --i) {
862 ir_node *irn = nodes[i];
864 switch(get_irn_opcode(irn)) {
866 curr_sp = adjust_call(env, irn, curr_sp);
869 curr_sp = adjust_alloc(env, irn, curr_sp);
876 obstack_free(&env->obst, nodes);
878 /* Keep the last stack state in the block by tying it to Keep node */
880 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
881 pmap_insert(env->keep_map, bl, keep);
884 set_irn_link(bl, curr_sp);
888 * Adjust all call nodes in the graph to the ABI conventions.
890 static void process_calls(be_abi_irg_t *env)
892 ir_graph *irg = env->birg->irg;
894 env->call->flags.bits.irg_is_leaf = 1;
895 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
896 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
899 static void collect_return_walker(ir_node *irn, void *data)
901 if(get_irn_opcode(irn) == iro_Return) {
902 struct obstack *obst = data;
903 obstack_ptr_grow(obst, irn);
908 static ir_node *setup_frame(be_abi_irg_t *env)
910 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
911 const arch_register_t *sp = isa->sp;
912 const arch_register_t *bp = isa->bp;
913 be_abi_call_flags_bits_t flags = env->call->flags.bits;
914 ir_graph *irg = env->birg->irg;
915 ir_node *bl = get_irg_start_block(irg);
916 ir_node *no_mem = get_irg_no_mem(irg);
917 ir_node *old_frame = get_irg_frame(irg);
918 ir_node *stack = pmap_get(env->regs, (void *) sp);
919 ir_node *frame = pmap_get(env->regs, (void *) bp);
921 int stack_nr = get_Proj_proj(stack);
923 if(flags.try_omit_fp) {
924 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
929 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
931 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
933 be_set_constr_single_reg(frame, -1, bp);
934 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
935 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
938 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
941 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
942 env->init_sp = stack;
943 set_irg_frame(irg, frame);
944 edges_reroute(old_frame, frame, irg);
949 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
951 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
952 const arch_register_t *sp = isa->sp;
953 const arch_register_t *bp = isa->bp;
954 ir_graph *irg = env->birg->irg;
955 ir_node *ret_mem = get_Return_mem(ret);
956 ir_node *frame = get_irg_frame(irg);
957 ir_node *bl = get_nodes_block(ret);
958 ir_node *stack = get_irn_link(bl);
962 if(env->call->flags.bits.try_omit_fp) {
963 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
967 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
968 be_set_constr_single_reg(stack, -1, sp);
969 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
972 pmap_foreach(env->regs, ent) {
973 const arch_register_t *reg = ent->key;
974 ir_node *irn = ent->value;
977 obstack_ptr_grow(&env->obst, stack);
979 obstack_ptr_grow(&env->obst, frame);
980 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
981 obstack_ptr_grow(obst, irn);
988 * Computes the stack argument layout type.
989 * Changes a possibly allocated value param type by moving
990 * entities to the stack layout type.
992 * @param env the ABI environment
993 * @param call the current call ABI
994 * @param method_type the method type
996 * @return the stack argument layout type
998 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
1000 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1001 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1002 int n = get_method_n_params(method_type);
1003 int curr = inc > 0 ? 0 : n - 1;
1009 ir_type *val_param_tp = get_method_value_param_type(method_type);
1010 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1012 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1013 for (i = 0; i < n; ++i, curr += inc) {
1014 ir_type *param_type = get_method_param_type(method_type, curr);
1015 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1017 if (arg->on_stack) {
1019 /* the entity was already created, move it to the param type */
1020 arg->stack_ent = get_method_value_param_ent(method_type, i);
1021 remove_struct_member(val_param_tp, arg->stack_ent);
1022 set_entity_owner(arg->stack_ent, res);
1023 add_struct_member(res, arg->stack_ent);
1024 /* must be automatic to set a fixed layout */
1025 set_entity_allocation(arg->stack_ent, allocation_automatic);
1028 snprintf(buf, sizeof(buf), "param_%d", i);
1029 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1031 ofs += arg->space_before;
1032 ofs = round_up2(ofs, arg->alignment);
1033 set_entity_offset_bytes(arg->stack_ent, ofs);
1034 ofs += arg->space_after;
1035 ofs += get_type_size_bytes(param_type);
1038 set_type_size_bytes(res, ofs);
1039 set_type_state(res, layout_fixed);
1043 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1046 struct obstack obst;
1048 obstack_init(&obst);
1050 /* Create a Perm after the RegParams node to delimit it. */
1051 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1052 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1057 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1058 const arch_register_t *reg = &cls->regs[j];
1059 ir_node *irn = pmap_get(regs, (void *) reg);
1061 if(irn && !arch_register_type_is(reg, ignore)) {
1063 obstack_ptr_grow(&obst, irn);
1064 set_irn_link(irn, (void *) reg);
1068 obstack_ptr_grow(&obst, NULL);
1069 in = obstack_finish(&obst);
1071 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1072 for(j = 0; j < n_regs; ++j) {
1073 ir_node *arg = in[j];
1074 arch_register_t *reg = get_irn_link(arg);
1075 pmap_insert(regs, reg, arg);
1076 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1079 obstack_free(&obst, in);
1082 obstack_free(&obst, NULL);
1086 const arch_register_t *reg;
1090 static int cmp_regs(const void *a, const void *b)
1092 const reg_node_map_t *p = a;
1093 const reg_node_map_t *q = b;
1095 if(p->reg->reg_class == q->reg->reg_class)
1096 return p->reg->index - q->reg->index;
1098 return p->reg->reg_class - q->reg->reg_class;
1101 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1104 int n = pmap_count(reg_map);
1106 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1108 pmap_foreach(reg_map, ent) {
1109 res[i].reg = ent->key;
1110 res[i].irn = ent->value;
1114 qsort(res, n, sizeof(res[0]), cmp_regs);
1119 * Creates a barrier.
1121 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1123 ir_graph *irg = env->birg->irg;
1124 int n_regs = pmap_count(regs);
1130 rm = reg_map_to_arr(&env->obst, regs);
1132 for(n = 0; n < n_regs; ++n)
1133 obstack_ptr_grow(&env->obst, rm[n].irn);
1136 obstack_ptr_grow(&env->obst, *mem);
1140 in = (ir_node **) obstack_finish(&env->obst);
1141 irn = be_new_Barrier(irg, bl, n, in);
1142 obstack_free(&env->obst, in);
1144 for(n = 0; n < n_regs; ++n) {
1145 const arch_register_t *reg = rm[n].reg;
1147 int pos = BE_OUT_POS(n);
1150 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1151 be_node_set_reg_class(irn, n, reg->reg_class);
1153 be_set_constr_single_reg(irn, n, reg);
1154 be_set_constr_single_reg(irn, pos, reg);
1155 be_node_set_reg_class(irn, pos, reg->reg_class);
1156 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1158 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1159 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1160 flags |= arch_irn_flags_ignore;
1162 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1163 flags |= arch_irn_flags_modify_sp;
1165 be_node_set_flags(irn, pos, flags);
1167 pmap_insert(regs, (void *) reg, proj);
1171 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1174 obstack_free(&env->obst, rm);
1179 * Creates a be_Return for a Return node.
1181 * @param @env the abi environment
1182 * @param irn the Return node or NULL if there was none
1183 * @param bl the block where the be_Retun should be placed
1184 * @param mem the current memory
1185 * @param n_res number of return results
1187 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1188 be_abi_call_t *call = env->call;
1189 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1191 pmap *reg_map = pmap_create();
1192 ir_node *keep = pmap_get(env->keep_map, bl);
1198 const arch_register_t **regs;
1202 get the valid stack node in this block.
1203 If we had a call in that block there is a Keep constructed by process_calls()
1204 which points to the last stack modification in that block. we'll use
1205 it then. Else we use the stack from the start block and let
1206 the ssa construction fix the usage.
1208 stack = be_abi_reg_map_get(env->regs, isa->sp);
1210 ir_node *bad = new_r_Bad(env->birg->irg);
1211 stack = get_irn_n(keep, 0);
1212 set_nodes_block(keep, bad);
1213 set_irn_n(keep, 0, bad);
1214 // exchange(keep, new_r_Bad(env->birg->irg));
1217 /* Insert results for Return into the register map. */
1218 for(i = 0; i < n_res; ++i) {
1219 ir_node *res = get_Return_res(irn, i);
1220 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1221 assert(arg->in_reg && "return value must be passed in register");
1222 pmap_insert(reg_map, (void *) arg->reg, res);
1225 /* Add uses of the callee save registers. */
1226 pmap_foreach(env->regs, ent) {
1227 const arch_register_t *reg = ent->key;
1228 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1229 pmap_insert(reg_map, ent->key, ent->value);
1232 be_abi_reg_map_set(reg_map, isa->sp, stack);
1234 /* Make the Epilogue node and call the arch's epilogue maker. */
1235 create_barrier(env, bl, &mem, reg_map, 1);
1236 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1239 Maximum size of the in array for Return nodes is
1240 return args + callee save/ignore registers + memory + stack pointer
1242 in_max = pmap_count(reg_map) + n_res + 2;
1244 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1245 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1248 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1253 /* clear SP entry, since it has already been grown. */
1254 pmap_insert(reg_map, (void *) isa->sp, NULL);
1255 for(i = 0; i < n_res; ++i) {
1256 ir_node *res = get_Return_res(irn, i);
1257 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1259 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1260 regs[n++] = arg->reg;
1262 /* Clear the map entry to mark the register as processed. */
1263 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1266 /* grow the rest of the stuff. */
1267 pmap_foreach(reg_map, ent) {
1270 regs[n++] = ent->key;
1274 /* The in array for the new back end return is now ready. */
1275 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1277 /* Set the register classes of the return's parameter accordingly. */
1278 for(i = 0; i < n; ++i)
1280 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1282 /* Free the space of the Epilog's in array and the register <-> proj map. */
1283 obstack_free(&env->obst, in);
1284 pmap_destroy(reg_map);
1289 typedef struct lower_frame_sels_env_t {
1291 entity *value_param_list; /**< the list of all value param antities */
1292 } lower_frame_sels_env_t;
1295 * Walker: Replaces Sels of frame type and
1296 * value param type entities by FrameAddress.
1298 static void lower_frame_sels_walker(ir_node *irn, void *data)
1300 lower_frame_sels_env_t *ctx = data;
1303 ir_graph *irg = current_ir_graph;
1304 ir_node *frame = get_irg_frame(irg);
1305 ir_node *param_base = get_irg_value_param_base(irg);
1306 ir_node *ptr = get_Sel_ptr(irn);
1308 if (ptr == frame || ptr == param_base) {
1309 be_abi_irg_t *env = ctx->env;
1310 entity *ent = get_Sel_entity(irn);
1311 ir_node *bl = get_nodes_block(irn);
1314 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1317 if (ptr == param_base) {
1318 set_entity_link(ent, ctx->value_param_list);
1319 ctx->value_param_list = ent;
1326 * Check if a value parameter is transmitted as a register.
1327 * This might happen if the address of an parameter is taken which is
1328 * transmitted in registers.
1330 * Note that on some architectures this case must be handled specially
1331 * because the place of the backing store is determined by their ABI.
1333 * In the default case we move the entity to the frame type and create
1334 * a backing store into the first block.
1336 static void fix_address_of_parameter_access(be_abi_irg_t *env, entity *value_param_list) {
1337 be_abi_call_t *call = env->call;
1338 ir_graph *irg = env->birg->irg;
1339 entity *ent, *next_ent, *new_list;
1341 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1344 for (ent = value_param_list; ent; ent = next_ent) {
1345 int i = get_struct_member_index(get_entity_owner(ent), ent);
1346 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1348 next_ent = get_entity_link(ent);
1350 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1351 set_entity_link(ent, new_list);
1356 /* ok, change the graph */
1357 ir_node *start_bl = get_irg_start_block(irg);
1358 ir_node *first_bl = NULL;
1359 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1360 const ir_edge_t *edge;
1361 optimization_state_t state;
1364 foreach_block_succ(start_bl, edge) {
1365 ir_node *succ = get_edge_src_irn(edge);
1366 if (start_bl != succ) {
1372 /* we had already removed critical edges, so the following
1373 assertion should be always true. */
1374 assert(get_Block_n_cfgpreds(first_bl) == 1);
1376 /* now create backing stores */
1377 frame = get_irg_frame(irg);
1378 imem = get_irg_initial_mem(irg);
1380 save_optimization_state(&state);
1382 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1383 restore_optimization_state(&state);
1385 /* reroute all edges to the new memory source */
1386 edges_reroute(imem, nmem, irg);
1390 args = get_irg_args(irg);
1391 args_bl = get_nodes_block(args);
1392 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1393 int i = get_struct_member_index(get_entity_owner(ent), ent);
1394 ir_type *tp = get_entity_type(ent);
1395 ir_mode *mode = get_type_mode(tp);
1398 /* address for the backing store */
1399 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1402 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1404 /* the backing store itself */
1405 store = new_r_Store(irg, first_bl, mem, addr,
1406 new_r_Proj(irg, args_bl, args, mode, i));
1408 /* the new memory Proj gets the last Proj from store */
1409 set_Proj_pred(nmem, store);
1410 set_Proj_proj(nmem, pn_Store_M);
1412 /* move all entities to the frame type */
1413 frame_tp = get_irg_frame_type(irg);
1414 offset = get_type_size_bytes(frame_tp);
1415 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1416 ir_type *tp = get_entity_type(ent);
1417 int align = get_type_alignment_bytes(tp);
1419 offset += align - 1;
1421 set_entity_owner(ent, frame_tp);
1422 add_class_member(frame_tp, ent);
1423 /* must be automatic to set a fixed layout */
1424 set_entity_allocation(ent, allocation_automatic);
1425 set_entity_offset_bytes(ent, offset);
1426 offset += get_type_size_bytes(tp);
1428 set_type_size_bytes(frame_tp, offset);
1433 * Modify the irg itself and the frame type.
1435 static void modify_irg(be_abi_irg_t *env)
1437 be_abi_call_t *call = env->call;
1438 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1439 const arch_register_t *sp = arch_isa_sp(isa);
1440 ir_graph *irg = env->birg->irg;
1441 ir_node *bl = get_irg_start_block(irg);
1442 ir_node *end = get_irg_end_block(irg);
1443 ir_node *no_mem = get_irg_no_mem(irg);
1444 ir_node *mem = get_irg_initial_mem(irg);
1445 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1446 pset *dont_save = pset_new_ptr(8);
1452 const arch_register_t *fp_reg;
1453 ir_node *frame_pointer;
1455 ir_node *reg_params_bl;
1458 const ir_edge_t *edge;
1459 ir_type *arg_type, *bet_type;
1460 lower_frame_sels_env_t ctx;
1462 bitset_t *used_proj_nr;
1463 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1465 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1467 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1469 ctx.value_param_list = NULL;
1470 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1472 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1473 env->regs = pmap_create();
1475 used_proj_nr = bitset_alloca(1024);
1476 n_params = get_method_n_params(method_type);
1477 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1478 memset(args, 0, n_params * sizeof(args[0]));
1480 /* Check if a value parameter is transmitted as a register.
1481 * This might happen if the address of an parameter is taken which is
1482 * transmitted in registers.
1484 * Note that on some architectures this case must be handled specially
1485 * because the place of the backing store is determined by their ABI.
1487 * In the default case we move the entity to the frame type and create
1488 * a backing store into the first block.
1490 fix_address_of_parameter_access(env, ctx.value_param_list);
1492 /* Fill the argument vector */
1493 arg_tuple = get_irg_args(irg);
1494 foreach_out_edge(arg_tuple, edge) {
1495 ir_node *irn = get_edge_src_irn(edge);
1496 int nr = get_Proj_proj(irn);
1498 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1501 arg_type = compute_arg_type(env, call, method_type);
1502 bet_type = call->cb->get_between_type(env->cb);
1503 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1505 /* Count the register params and add them to the number of Projs for the RegParams node */
1506 for(i = 0; i < n_params; ++i) {
1507 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1508 if(arg->in_reg && args[i]) {
1509 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1510 assert(i == get_Proj_proj(args[i]));
1512 /* For now, associate the register with the old Proj from Start representing that argument. */
1513 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1514 bitset_set(used_proj_nr, i);
1515 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1519 /* Collect all callee-save registers */
1520 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1521 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1522 for(j = 0; j < cls->n_regs; ++j) {
1523 const arch_register_t *reg = &cls->regs[j];
1524 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1525 pmap_insert(env->regs, (void *) reg, NULL);
1529 pmap_insert(env->regs, (void *) sp, NULL);
1530 pmap_insert(env->regs, (void *) isa->bp, NULL);
1531 reg_params_bl = get_irg_start_block(irg);
1532 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1535 * make proj nodes for the callee save registers.
1536 * memorize them, since Return nodes get those as inputs.
1538 * Note, that if a register corresponds to an argument, the regs map contains
1539 * the old Proj from start for that argument.
1542 rm = reg_map_to_arr(&env->obst, env->regs);
1543 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1544 arch_register_t *reg = (void *) rm[i].reg;
1545 ir_node *arg_proj = rm[i].irn;
1546 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1548 int pos = BE_OUT_POS((int) nr);
1554 bitset_set(used_proj_nr, nr);
1555 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1556 pmap_insert(env->regs, (void *) reg, proj);
1557 be_set_constr_single_reg(env->reg_params, pos, reg);
1558 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1561 * If the register is an ignore register,
1562 * The Proj for that register shall also be ignored during register allocation.
1564 if(arch_register_type_is(reg, ignore))
1565 flags |= arch_irn_flags_ignore;
1568 flags |= arch_irn_flags_modify_sp;
1570 be_node_set_flags(env->reg_params, pos, flags);
1572 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1574 obstack_free(&env->obst, rm);
1576 /* Generate the Prologue */
1577 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1579 /* do the stack allocation BEFORE the barrier, or spill code
1580 might be added before it */
1581 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1582 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1583 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1585 barrier = create_barrier(env, bl, &mem, env->regs, 0);
1587 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1588 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1590 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1591 set_irg_frame(irg, frame_pointer);
1592 pset_insert_ptr(env->ignore_regs, fp_reg);
1594 /* Now, introduce stack param nodes for all parameters passed on the stack */
1595 for(i = 0; i < n_params; ++i) {
1596 ir_node *arg_proj = args[i];
1597 ir_node *repl = NULL;
1599 if(arg_proj != NULL) {
1600 be_abi_call_arg_t *arg;
1601 ir_type *param_type;
1602 int nr = get_Proj_proj(arg_proj);
1604 nr = MIN(nr, n_params);
1605 arg = get_call_arg(call, 0, nr);
1606 param_type = get_method_param_type(method_type, nr);
1609 repl = pmap_get(env->regs, (void *) arg->reg);
1612 else if(arg->on_stack) {
1613 /* For atomic parameters which are actually used, we create a StackParam node. */
1614 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1615 ir_mode *mode = get_type_mode(param_type);
1616 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1617 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1620 /* The stack parameter is not primitive (it is a struct or array),
1621 we thus will create a node representing the parameter's address
1624 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1628 assert(repl != NULL);
1629 edges_reroute(args[i], repl, irg);
1633 /* All Return nodes hang on the End node, so look for them there. */
1634 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1635 ir_node *irn = get_Block_cfgpred(end, i);
1637 if (is_Return(irn)) {
1638 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1642 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return than,
1643 the code is dead and will never be executed. */
1645 del_pset(dont_save);
1646 obstack_free(&env->obst, args);
1650 * Walker: puts all Alloc(stack_alloc) on a obstack
1652 static void collect_alloca_walker(ir_node *irn, void *data)
1654 be_abi_irg_t *env = data;
1655 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1656 obstack_ptr_grow(&env->obst, irn);
1659 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1661 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1662 ir_node *old_frame = get_irg_frame(birg->irg);
1663 ir_graph *irg = birg->irg;
1667 optimization_state_t state;
1669 obstack_init(&env->obst);
1671 env->isa = birg->main_env->arch_env->isa;
1672 env->method_type = get_entity_type(get_irg_entity(irg));
1673 env->call = be_abi_call_new();
1674 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1676 env->ignore_regs = pset_new_ptr_default();
1677 env->keep_map = pmap_create();
1678 env->dce_survivor = new_survive_dce();
1680 env->stack_phis = pset_new_ptr(16);
1681 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1682 to another Unknown or the stack pointer gets used */
1683 save_optimization_state(&state);
1685 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1686 restore_optimization_state(&state);
1687 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1689 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1691 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1692 env->irn_ops.impl = &abi_irn_ops;
1694 /* Lower all call nodes in the IRG. */
1697 /* Process the IRG */
1700 /* We don't need the keep map anymore. */
1701 pmap_destroy(env->keep_map);
1703 /* reroute the stack origin of the calls to the true stack origin. */
1704 edges_reroute(dummy, env->init_sp, irg);
1705 edges_reroute(old_frame, get_irg_frame(irg), irg);
1707 /* Make some important node pointers survive the dead node elimination. */
1708 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1709 pmap_foreach(env->regs, ent)
1710 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1712 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1714 env->call->cb->done(env->cb);
1719 void be_abi_free(be_abi_irg_t *env)
1721 free_survive_dce(env->dce_survivor);
1722 del_pset(env->stack_phis);
1723 del_pset(env->ignore_regs);
1724 pmap_destroy(env->regs);
1725 obstack_free(&env->obst, NULL);
1726 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1730 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
1732 arch_register_t *reg;
1734 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
1735 if(reg->reg_class == cls)
1736 bitset_set(bs, reg->index);
1743 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1744 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1745 | _| | |> < ___) | || (_| | (__| <
1746 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1750 struct fix_stack_walker_info {
1752 const arch_env_t *aenv;
1756 * Walker. Collect all stack modifying nodes.
1758 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1760 struct fix_stack_walker_info *info = data;
1762 if(arch_irn_is(info->aenv, irn, modify_sp))
1763 pset_insert_ptr(info->nodes, irn);
1766 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1768 dom_front_info_t *df;
1769 pset *stack_nodes = pset_new_ptr(16);
1770 struct fix_stack_walker_info info;
1772 info.nodes = stack_nodes;
1773 info.aenv = env->birg->main_env->arch_env;
1775 /* We need dominance frontiers for fix up */
1776 df = be_compute_dominance_frontiers(env->birg->irg);
1777 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
1778 pset_insert_ptr(stack_nodes, env->init_sp);
1779 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1780 del_pset(stack_nodes);
1782 /* Liveness could have changed due to Phi nodes. */
1783 be_liveness(env->birg->irg);
1785 /* free these dominance frontiers */
1786 be_free_dominance_frontiers(df);
1790 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1791 * into -1 or 1, respectively.
1792 * @param irn The node.
1793 * @return 1, if the direction of the IncSP was along, -1 if against.
1795 static int get_dir(ir_node *irn)
1797 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1800 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1802 const arch_env_t *aenv = env->birg->main_env->arch_env;
1803 int omit_fp = env->call->flags.bits.try_omit_fp;
1806 sched_foreach(bl, irn) {
1809 If the node modifies the stack pointer by a constant offset,
1810 record that in the bias.
1812 if(be_is_IncSP(irn)) {
1813 int ofs = be_get_IncSP_offset(irn);
1814 int dir = get_dir(irn);
1816 if(ofs == BE_STACK_FRAME_SIZE) {
1817 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1818 be_set_IncSP_offset(irn, ofs);
1826 Else check, if the node relates to an entity on the stack frame.
1827 If so, set the true offset (including the bias) for that
1831 entity *ent = arch_get_frame_entity(aenv, irn);
1833 int offset = get_stack_entity_offset(env->frame, ent, bias);
1834 arch_set_frame_offset(aenv, irn, offset);
1835 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1844 * A helper struct for the bias walker.
1847 be_abi_irg_t *env; /**< The ABI irg environment. */
1848 int start_block_bias; /**< The bias at the end of the start block. */
1849 ir_node *start_block; /**< The start block of the current graph. */
1853 * Block-Walker: fix all stack offsets
1855 static void stack_bias_walker(ir_node *bl, void *data)
1857 struct bias_walk *bw = data;
1858 if (bl != bw->start_block) {
1859 process_stack_bias(bw->env, bl, bw->start_block_bias);
1863 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1865 ir_graph *irg = env->birg->irg;
1866 struct bias_walk bw;
1868 stack_frame_compute_initial_offset(env->frame);
1869 // stack_layout_dump(stdout, env->frame);
1871 /* Determine the stack bias at the end of the start block. */
1872 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1874 /* fix the bias is all other blocks */
1876 bw.start_block = get_irg_start_block(irg);
1877 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1880 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1882 assert(arch_register_type_is(reg, callee_save));
1883 assert(pmap_contains(abi->regs, (void *) reg));
1884 return pmap_get(abi->regs, (void *) reg);
1888 _____ _____ _ _ _ _ _ _
1889 |_ _| __ \| \ | | | | | | | | |
1890 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1891 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1892 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1893 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1895 for Phi nodes which are created due to stack modifying nodes
1896 such as IncSP, AddSP and SetSP.
1898 These Phis are always to be ignored by the reg alloc and are
1899 fixed on the SP register of the ISA.
1902 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1904 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1905 const void *res = NULL;
1907 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1908 res = &abi->irn_ops;
1913 static void be_abi_limited(void *data, bitset_t *bs)
1915 be_abi_irg_t *abi = data;
1916 bitset_clear_all(bs);
1917 bitset_set(bs, abi->isa->sp->index);
1920 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1922 be_abi_irg_t *abi = get_abi_from_ops(self);
1923 const arch_register_t *reg = abi->isa->sp;
1925 memset(req, 0, sizeof(req[0]));
1927 if(pos == BE_OUT_POS(0)) {
1928 req->cls = reg->reg_class;
1929 req->type = arch_register_req_type_limited;
1930 req->limited = be_abi_limited;
1931 req->limited_env = abi;
1934 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1935 req->cls = reg->reg_class;
1936 req->type = arch_register_req_type_normal;
1942 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1946 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1948 const be_abi_irg_t *abi = get_abi_from_ops(self);
1949 return abi->isa->sp;
1952 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1954 return arch_irn_class_normal;
1957 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1959 return arch_irn_flags_ignore | arch_irn_flags_modify_sp;
1962 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1967 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1971 static const arch_irn_ops_if_t abi_irn_ops = {
1972 abi_get_irn_reg_req,
1977 abi_get_frame_entity,
1981 static const arch_irn_handler_t abi_irn_handler = {