4 * @author Sebastian Hack
19 #include "irgraph_t.h"
22 #include "iredges_t.h"
25 #include "irprintf_t.h"
37 #include "besched_t.h"
39 typedef struct _be_abi_call_arg_t {
40 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
41 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
42 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
45 const arch_register_t *reg;
48 unsigned space_before;
52 struct _be_abi_call_t {
53 be_abi_call_flags_t flags;
54 const be_abi_callbacks_t *cb;
55 ir_type *between_type;
57 const arch_register_class_t *cls_addr;
60 struct _be_abi_irg_t {
62 be_stack_layout_t *frame; /**< The stack frame model. */
63 const be_irg_t *birg; /**< The back end IRG. */
64 const arch_isa_t *isa; /**< The isa. */
65 survive_dce_t *dce_survivor;
67 be_abi_call_t *call; /**< The ABI call information. */
68 ir_type *method_type; /**< The type of the method of the IRG. */
70 ir_node *init_sp; /**< The node representing the stack pointer
71 at the start of the function. */
73 ir_node *start_barrier; /**< The barrier of the start block */
75 ir_node *reg_params; /**< The reg params node. */
76 pmap *regs; /**< A map of all callee-save and ignore regs to
77 their Projs to the RegParams node. */
79 pset *stack_phis; /**< The set of all Phi nodes inserted due to
80 stack pointer modifying nodes. */
82 int start_block_bias; /**< The stack bias at the end of the start block. */
84 void *cb; /**< ABI Callback self pointer. */
86 pmap *keep_map; /**< mapping blocks to keep nodes. */
87 pset *ignore_regs; /**< Additional registers which shall be ignored. */
89 arch_irn_handler_t irn_handler;
90 arch_irn_ops_t irn_ops;
91 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
94 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
95 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
97 /* Forward, since be need it in be_abi_introduce(). */
98 static const arch_irn_ops_if_t abi_irn_ops;
99 static const arch_irn_handler_t abi_irn_handler;
100 static heights_t *ir_heights;
102 /* Flag: if set, try to omit the frame pointer if called by the backend */
103 static int be_omit_fp = 1;
106 _ ____ ___ ____ _ _ _ _
107 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
108 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
109 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
110 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
112 These callbacks are used by the backend to set the parameters
113 for a specific call type.
117 * Set compare function: compares two ABI call object arguments.
119 static int cmp_call_arg(const void *a, const void *b, size_t n)
121 const be_abi_call_arg_t *p = a, *q = b;
122 return !(p->is_res == q->is_res && p->pos == q->pos);
126 * Get or set an ABI call object argument.
128 * @param call the abi call
129 * @param is_res true for call results, false for call arguments
130 * @param pos position of the argument
131 * @param do_insert true if the argument is set, false if it's retrieved
133 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
135 be_abi_call_arg_t arg;
138 memset(&arg, 0, sizeof(arg));
142 hash = is_res * 128 + pos;
145 ? set_insert(call->params, &arg, sizeof(arg), hash)
146 : set_find(call->params, &arg, sizeof(arg), hash);
150 * Retrieve an ABI call object argument.
152 * @param call the ABI call object
153 * @param is_res true for call results, false for call arguments
154 * @param pos position of the argument
156 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
158 return get_or_set_call_arg(call, is_res, pos, 0);
161 /* Set the flags for a call. */
162 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
169 /* Set register class for call address */
170 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
172 call->cls_addr = cls;
176 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
178 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
180 arg->alignment = alignment;
181 arg->space_before = space_before;
182 arg->space_after = space_after;
183 assert(alignment > 0 && "Alignment must be greater than 0");
186 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
188 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
193 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
195 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
200 /* Get the flags of a ABI call object. */
201 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
207 * Constructor for a new ABI call object.
209 * @return the new ABI call object
211 static be_abi_call_t *be_abi_call_new(void)
213 be_abi_call_t *call = xmalloc(sizeof(call[0]));
216 call->params = new_set(cmp_call_arg, 16);
218 call->cls_addr = NULL;
220 call->flags.bits.try_omit_fp = be_omit_fp;
226 * Destructor for an ABI call object.
228 static void be_abi_call_free(be_abi_call_t *call)
230 del_set(call->params);
236 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
237 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
238 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
239 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
242 Handling of the stack frame. It is composed of three types:
243 1) The type of the arguments which are pushed on the stack.
244 2) The "between type" which consists of stuff the call of the
245 function pushes on the stack (like the return address and
246 the old base pointer for ia32).
247 3) The Firm frame type which consists of all local variables
251 static int get_stack_entity_offset(be_stack_layout_t *frame, entity *ent, int bias)
253 ir_type *t = get_entity_owner(ent);
254 int ofs = get_entity_offset_bytes(ent);
258 /* Find the type the entity is contained in. */
259 for(index = 0; index < N_FRAME_TYPES; ++index) {
260 if(frame->order[index] == t)
264 /* Add the size of all the types below the one of the entity to the entity's offset */
265 for(i = 0; i < index; ++i)
266 ofs += get_type_size_bytes(frame->order[i]);
268 /* correct the offset by the initial position of the frame pointer */
269 ofs -= frame->initial_offset;
271 /* correct the offset with the current bias. */
278 * Retrieve the entity with given offset from a frame type.
280 static entity *search_ent_with_offset(ir_type *t, int offset)
284 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
285 entity *ent = get_compound_member(t, i);
286 if(get_entity_offset_bytes(ent) == offset)
293 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
295 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
296 entity *ent = search_ent_with_offset(base, 0);
298 frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
300 return frame->initial_offset;
304 * Initializes the frame layout from parts
306 * @param frame the stack layout that will be initialized
307 * @param args the stack argument layout type
308 * @param between the between layout type
309 * @param locals the method frame type
310 * @param stack_dir the stack direction
311 * @param param_map an array mapping method argument positions to the stack argument type
313 * @return the initialized stack layout
315 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
316 ir_type *between, ir_type *locals, int stack_dir,
319 frame->arg_type = args;
320 frame->between_type = between;
321 frame->frame_type = locals;
322 frame->initial_offset = 0;
323 frame->stack_dir = stack_dir;
324 frame->order[1] = between;
325 frame->param_map = param_map;
328 frame->order[0] = args;
329 frame->order[2] = locals;
332 frame->order[0] = locals;
333 frame->order[2] = args;
339 /** Dumps the stack layout to file. */
340 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
344 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
345 for (j = 0; j < N_FRAME_TYPES; ++j) {
346 ir_type *t = frame->order[j];
348 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
349 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
350 entity *ent = get_compound_member(t, i);
351 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
358 * Returns non-zero if the call argument at given position
359 * is transfered on the stack.
361 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
363 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
364 return arg && !arg->in_reg;
374 Adjustment of the calls inside a graph.
379 * Transform a call node.
380 * @param env The ABI environment for the current irg.
381 * @param irn The call node.
382 * @param curr_sp The stack pointer node to use.
383 * @return The stack pointer after the call.
385 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
387 ir_graph *irg = env->birg->irg;
388 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
389 be_abi_call_t *call = be_abi_call_new();
390 ir_type *mt = get_Call_type(irn);
391 ir_node *call_ptr = get_Call_ptr(irn);
392 int n_params = get_method_n_params(mt);
393 ir_node *curr_mem = get_Call_mem(irn);
394 ir_node *bl = get_nodes_block(irn);
395 pset *results = pset_new_ptr(8);
396 pset *caller_save = pset_new_ptr(8);
398 int stack_dir = arch_isa_stack_dir(isa);
399 const arch_register_t *sp = arch_isa_sp(isa);
400 ir_mode *mach_mode = sp->reg_class->mode;
401 struct obstack *obst = &env->obst;
402 int no_alloc = call->flags.bits.frame_is_setup_on_call;
404 ir_node *res_proj = NULL;
405 int curr_res_proj = pn_Call_max;
412 const ir_edge_t *edge;
417 /* Let the isa fill out the abi description for that call node. */
418 arch_isa_get_call_abi(isa, mt, call);
420 /* Insert code to put the stack arguments on the stack. */
421 assert(get_Call_n_params(irn) == n_params);
422 for(i = 0; i < n_params; ++i) {
423 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
426 int arg_size = get_type_size_bytes(get_method_param_type(mt, i));
428 stack_size += round_up2(arg->space_before, arg->alignment);
429 stack_size += round_up2(arg_size, arg->alignment);
430 stack_size += round_up2(arg->space_after, arg->alignment);
431 obstack_int_grow(obst, i);
435 pos = obstack_finish(obst);
437 /* Collect all arguments which are passed in registers. */
438 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
439 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
440 if(arg && arg->in_reg) {
441 obstack_int_grow(obst, i);
445 low_args = obstack_finish(obst);
447 /* If there are some parameters which shall be passed on the stack. */
450 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
453 * Reverse list of stack parameters if call arguments are from left to right.
454 * We must them reverse again in they are pushed (not stored) and the stack
455 * direction is downwards.
457 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
458 for(i = 0; i < n_pos >> 1; ++i) {
459 int other = n_pos - i - 1;
467 * If the stack is decreasing and we do not want to store sequentially,
468 * or someone else allocated the call frame
469 * we allocate as much space on the stack all parameters need, by
470 * moving the stack pointer along the stack's direction.
472 if(stack_dir < 0 && !do_seq && !no_alloc) {
473 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
475 add_irn_dep(curr_sp, alloca_copy);
481 obstack_ptr_grow(obst, get_Call_mem(irn));
482 curr_mem = new_NoMem();
484 curr_mem = get_Call_mem(irn);
487 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
488 for(i = 0; i < n_pos; ++i) {
490 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
491 ir_node *param = get_Call_param(irn, p);
492 ir_node *addr = curr_sp;
494 ir_type *param_type = get_method_param_type(mt, p);
495 int param_size = get_type_size_bytes(param_type) + arg->space_after;
498 * If we wanted to build the arguments sequentially,
499 * the stack pointer for the next must be incremented,
500 * and the memory value propagated.
504 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
506 add_irn_dep(curr_sp, alloca_copy);
509 add_irn_dep(curr_sp, curr_mem);
512 curr_ofs += arg->space_before;
513 curr_ofs = round_up2(curr_ofs, arg->alignment);
515 /* Make the expression to compute the argument's offset. */
517 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
518 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
522 /* Insert a store for primitive arguments. */
523 if (is_atomic_type(param_type)) {
525 store = new_r_Store(irg, bl, curr_mem, addr, param);
526 mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
529 /* Make a mem copy for compound arguments. */
533 assert(mode_is_reference(get_irn_mode(param)));
534 copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
535 mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
538 curr_ofs += param_size;
543 obstack_ptr_grow(obst, mem);
546 in = (ir_node **) obstack_finish(obst);
548 /* We need the sync only, if we didn't build the stores sequentially. */
551 curr_mem = new_r_Sync(irg, bl, n_pos + 1, in);
553 curr_mem = get_Call_mem(irn);
556 obstack_free(obst, in);
559 /* Collect caller save registers */
560 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
562 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
563 for(j = 0; j < cls->n_regs; ++j) {
564 const arch_register_t *reg = arch_register_for_index(cls, j);
565 if(arch_register_type_is(reg, caller_save))
566 pset_insert_ptr(caller_save, (void *) reg);
570 /* search the greatest result proj number */
572 /* TODO: what if the result is NOT used? Currently there is
573 * no way to detect this later, especially there is no way to
574 * see this in the proj numbers.
575 * While this is ok for the register allocator, it is bad for
576 * backends which need to change the be_Call further (x87 simulator
577 * for instance. However for this particular case the call_type is
580 foreach_out_edge(irn, edge) {
581 const ir_edge_t *res_edge;
582 ir_node *irn = get_edge_src_irn(edge);
584 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
586 foreach_out_edge(irn, res_edge) {
588 be_abi_call_arg_t *arg;
589 ir_node *res = get_edge_src_irn(res_edge);
591 assert(is_Proj(res));
593 proj = get_Proj_proj(res);
594 arg = get_call_arg(call, 1, proj);
597 shift the proj number to the right, since we will drop the
598 unspeakable Proj_T from the Call. Therefore, all real argument
599 Proj numbers must be increased by pn_be_Call_first_res
601 proj += pn_be_Call_first_res;
602 set_Proj_proj(res, proj);
603 obstack_ptr_grow(obst, res);
605 if(proj > curr_res_proj)
606 curr_res_proj = proj;
608 pset_remove_ptr(caller_save, arg->reg);
609 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
616 obstack_ptr_grow(obst, NULL);
617 res_projs = obstack_finish(obst);
619 /* make the back end call node and set its register requirements. */
620 for(i = 0; i < n_low_args; ++i)
621 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
623 in = obstack_finish(obst);
625 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
626 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
627 curr_res_proj + pset_count(caller_save), n_low_args, in,
629 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
633 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
634 curr_res_proj + pset_count(caller_save), n_low_args, in,
638 Set the register class of the call address to the same as the stack pointer's
639 if it's not set by the backend in the abi callback.
641 be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr ? call->cls_addr : sp->reg_class);
643 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
645 /* Set the register classes and constraints of the Call parameters. */
646 for(i = 0; i < n_low_args; ++i) {
647 int index = low_args[i];
648 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
649 assert(arg->reg != NULL);
651 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
654 /* Set the register constraints of the results. */
655 for(i = 0; res_projs[i]; ++i) {
656 ir_node *irn = res_projs[i];
657 int proj = get_Proj_proj(irn);
659 /* Correct Proj number since it has been adjusted! (see above) */
660 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
663 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
665 obstack_free(obst, in);
666 exchange(irn, low_call);
668 /* redirect the result projs to the lowered call instead of the Proj_T */
669 for(i = 0; res_projs[i]; ++i)
670 set_Proj_pred(res_projs[i], low_call);
672 /* Make additional projs for the caller save registers
673 and the Keep node which keeps them alive. */
674 if(pset_count(caller_save) > 0) {
675 const arch_register_t *reg;
679 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
680 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
682 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
683 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
684 set_irn_link(proj, (void *) reg);
685 obstack_ptr_grow(obst, proj);
689 in = (ir_node **) obstack_finish(obst);
690 keep = be_new_Keep(NULL, irg, bl, n, in);
691 for(i = 0; i < n; ++i) {
692 const arch_register_t *reg = get_irn_link(in[i]);
693 be_node_set_reg_class(keep, i, reg->reg_class);
695 obstack_free(obst, in);
698 /* Clean up the stack. */
700 ir_node *mem_proj = NULL;
702 foreach_out_edge(low_call, edge) {
703 ir_node *irn = get_edge_src_irn(edge);
704 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
711 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
712 keep_alive(mem_proj);
715 /* Clean up the stack frame if we allocated it */
717 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
718 add_irn_dep(curr_sp, mem_proj);
720 add_irn_dep(curr_sp, alloca_copy);
726 be_abi_call_free(call);
727 obstack_free(obst, pos);
729 del_pset(caller_save);
736 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
738 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
740 if (get_Alloc_where(alloc) == stack_alloc) {
741 ir_node *bl = get_nodes_block(alloc);
742 ir_graph *irg = get_irn_irg(bl);
743 ir_node *alloc_mem = NULL;
744 ir_node *alloc_res = NULL;
746 const ir_edge_t *edge;
752 foreach_out_edge(alloc, edge) {
753 ir_node *irn = get_edge_src_irn(edge);
755 assert(is_Proj(irn));
756 switch(get_Proj_proj(irn)) {
768 /* Beware: currently Alloc nodes without a result might happen,
769 only escape analysis kills them and this phase runs only for object
770 oriented source. We kill the Alloc here. */
771 if (alloc_res == NULL && alloc_mem) {
772 exchange(alloc_mem, get_Alloc_mem(alloc));
776 /* The stack pointer will be modified in an unknown manner.
777 We cannot omit it. */
778 env->call->flags.bits.try_omit_fp = 0;
779 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
781 if(alloc_mem != NULL) {
785 addsp_mem = new_r_Proj(irg, bl, new_alloc, mode_M, pn_be_AddSP_M);
787 // We need to sync the output mem of the AddSP with the input mem
788 // edge into the alloc node
789 ins[0] = get_Alloc_mem(alloc);
791 sync = new_r_Sync(irg, bl, 2, ins);
793 exchange(alloc_mem, sync);
796 exchange(alloc, new_alloc);
798 /* fix projnum of alloca res */
799 set_Proj_proj(alloc_res, pn_be_AddSP_res);
801 addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
803 /* copy the address away, since it could be used after further stack pointer modifications. */
804 /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
805 *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
807 /* Let all users of the Alloc() result now point to the copy. */
808 edges_reroute(alloc_res, copy, irg);
810 /* Rewire the copy appropriately. */
811 set_irn_n(copy, be_pos_Copy_op, addr);
820 * The Free is transformed into a back end free node and connected to the stack nodes.
822 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
824 if (get_Free_where(free) == stack_alloc) {
825 ir_node *bl = get_nodes_block(free);
826 ir_graph *irg = get_irn_irg(bl);
827 ir_node *addsp, *mem, *res;
829 /* The stack pointer will be modified in an unknown manner.
830 We cannot omit it. */
831 env->call->flags.bits.try_omit_fp = 0;
832 addsp = be_new_SubSP(env->isa->sp, irg, bl, curr_sp, get_Free_size(free));
834 mem = new_r_Proj(irg, bl, addsp, mode_M, pn_be_SubSP_M);
835 res = new_r_Proj(irg, bl, addsp, mode_P_data, pn_be_SubSP_res);
843 /* the following function is replaced by the usage of the heights module */
846 * Walker for dependent_on().
847 * This function searches a node tgt recursively from a given node
848 * but is restricted to the given block.
849 * @return 1 if tgt was reachable from curr, 0 if not.
851 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
855 if (get_nodes_block(curr) != bl)
861 /* Phi functions stop the recursion inside a basic block */
862 if (! is_Phi(curr)) {
863 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
864 if (check_dependence(get_irn_n(curr, i), tgt, bl))
874 * Check if a node is somehow data dependent on another one.
875 * both nodes must be in the same basic block.
876 * @param n1 The first node.
877 * @param n2 The second node.
878 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
880 static int dependent_on(ir_node *n1, ir_node *n2)
882 ir_node *bl = get_nodes_block(n1);
884 assert(bl == get_nodes_block(n2));
886 return heights_reachable_in_block(ir_heights, n1, n2);
887 //return check_dependence(n1, n2, bl);
890 static int cmp_call_dependecy(const void *c1, const void *c2)
892 ir_node *n1 = *(ir_node **) c1;
893 ir_node *n2 = *(ir_node **) c2;
896 Classical qsort() comparison function behavior:
897 0 if both elements are equal
898 1 if second is "smaller" that first
899 -1 if first is "smaller" that second
901 if (dependent_on(n1, n2))
904 if (dependent_on(n2, n1))
911 * Walker: links all Call/alloc/Free nodes to the Block they are contained.
913 static void link_calls_in_block_walker(ir_node *irn, void *data)
915 opcode code = get_irn_opcode(irn);
917 if (code == iro_Call ||
918 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
919 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
920 be_abi_irg_t *env = data;
921 ir_node *bl = get_nodes_block(irn);
922 void *save = get_irn_link(bl);
924 if (code == iro_Call)
925 env->call->flags.bits.irg_is_leaf = 0;
927 set_irn_link(irn, save);
928 set_irn_link(bl, irn);
934 * Process all Call nodes inside a basic block.
935 * Note that the link field of the block must contain a linked list of all
936 * Call nodes inside the Block. We first order this list according to data dependency
937 * and that connect the calls together.
939 static void process_calls_in_block(ir_node *bl, void *data)
941 be_abi_irg_t *env = data;
942 ir_node *curr_sp = env->init_sp;
946 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
947 obstack_ptr_grow(&env->obst, irn);
949 /* If there were call nodes in the block. */
953 ir_node *copy = NULL;
956 nodes = obstack_finish(&env->obst);
958 /* order the call nodes according to data dependency */
959 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
961 for(i = n - 1; i >= 0; --i) {
962 ir_node *irn = nodes[i];
964 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
965 switch(get_irn_opcode(irn)) {
967 curr_sp = adjust_call(env, irn, curr_sp, copy);
970 curr_sp = adjust_alloc(env, irn, curr_sp, ©);
973 curr_sp = adjust_free(env, irn, curr_sp);
980 obstack_free(&env->obst, nodes);
982 /* Keep the last stack state in the block by tying it to Keep node */
984 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
985 pmap_insert(env->keep_map, bl, keep);
988 set_irn_link(bl, curr_sp);
989 } /* process_calls_in_block */
992 * Adjust all call nodes in the graph to the ABI conventions.
994 static void process_calls(be_abi_irg_t *env)
996 ir_graph *irg = env->birg->irg;
998 env->call->flags.bits.irg_is_leaf = 1;
999 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
1001 ir_heights = heights_new(env->birg->irg);
1002 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
1003 heights_free(ir_heights);
1007 static ir_node *setup_frame(be_abi_irg_t *env)
1009 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1010 const arch_register_t *sp = isa->sp;
1011 const arch_register_t *bp = isa->bp;
1012 be_abi_call_flags_bits_t flags = env->call->flags.bits;
1013 ir_graph *irg = env->birg->irg;
1014 ir_node *bl = get_irg_start_block(irg);
1015 ir_node *no_mem = get_irg_no_mem(irg);
1016 ir_node *old_frame = get_irg_frame(irg);
1017 ir_node *stack = pmap_get(env->regs, (void *) sp);
1018 ir_node *frame = pmap_get(env->regs, (void *) bp);
1020 int stack_nr = get_Proj_proj(stack);
1022 if(flags.try_omit_fp) {
1023 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
1028 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
1030 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
1031 if(!flags.fp_free) {
1032 be_set_constr_single_reg(frame, -1, bp);
1033 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
1034 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
1037 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
1040 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
1041 env->init_sp = stack;
1042 set_irg_frame(irg, frame);
1043 edges_reroute(old_frame, frame, irg);
1048 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
1050 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1051 const arch_register_t *sp = isa->sp;
1052 const arch_register_t *bp = isa->bp;
1053 ir_graph *irg = env->birg->irg;
1054 ir_node *ret_mem = get_Return_mem(ret);
1055 ir_node *frame = get_irg_frame(irg);
1056 ir_node *bl = get_nodes_block(ret);
1057 ir_node *stack = get_irn_link(bl);
1061 if(env->call->flags.bits.try_omit_fp) {
1062 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
1066 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
1067 be_set_constr_single_reg(stack, -1, sp);
1068 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
1071 pmap_foreach(env->regs, ent) {
1072 const arch_register_t *reg = ent->key;
1073 ir_node *irn = ent->value;
1076 obstack_ptr_grow(&env->obst, stack);
1078 obstack_ptr_grow(&env->obst, frame);
1079 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1080 obstack_ptr_grow(obst, irn);
1087 * Computes the stack argument layout type.
1088 * Changes a possibly allocated value param type by moving
1089 * entities to the stack layout type.
1091 * @param env the ABI environment
1092 * @param call the current call ABI
1093 * @param method_type the method type
1094 * @param param_map an array mapping method arguments to the stack layout type
1096 * @return the stack argument layout type
1098 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, entity ***param_map)
1100 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1101 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1102 int n = get_method_n_params(method_type);
1103 int curr = inc > 0 ? 0 : n - 1;
1109 ir_type *val_param_tp = get_method_value_param_type(method_type);
1110 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1113 *param_map = map = obstack_alloc(&env->obst, n * sizeof(entity *));
1114 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1115 for (i = 0; i < n; ++i, curr += inc) {
1116 ir_type *param_type = get_method_param_type(method_type, curr);
1117 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1120 if (arg->on_stack) {
1122 /* the entity was already created, move it to the param type */
1123 arg->stack_ent = get_method_value_param_ent(method_type, i);
1124 remove_struct_member(val_param_tp, arg->stack_ent);
1125 set_entity_owner(arg->stack_ent, res);
1126 add_struct_member(res, arg->stack_ent);
1127 /* must be automatic to set a fixed layout */
1128 set_entity_allocation(arg->stack_ent, allocation_automatic);
1131 snprintf(buf, sizeof(buf), "param_%d", i);
1132 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1134 ofs += arg->space_before;
1135 ofs = round_up2(ofs, arg->alignment);
1136 set_entity_offset_bytes(arg->stack_ent, ofs);
1137 ofs += arg->space_after;
1138 ofs += get_type_size_bytes(param_type);
1139 map[i] = arg->stack_ent;
1142 set_type_size_bytes(res, ofs);
1143 set_type_state(res, layout_fixed);
1148 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1151 struct obstack obst;
1153 obstack_init(&obst);
1155 /* Create a Perm after the RegParams node to delimit it. */
1156 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1157 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1162 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1163 const arch_register_t *reg = &cls->regs[j];
1164 ir_node *irn = pmap_get(regs, (void *) reg);
1166 if(irn && !arch_register_type_is(reg, ignore)) {
1168 obstack_ptr_grow(&obst, irn);
1169 set_irn_link(irn, (void *) reg);
1173 obstack_ptr_grow(&obst, NULL);
1174 in = obstack_finish(&obst);
1176 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1177 for(j = 0; j < n_regs; ++j) {
1178 ir_node *arg = in[j];
1179 arch_register_t *reg = get_irn_link(arg);
1180 pmap_insert(regs, reg, arg);
1181 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1184 obstack_free(&obst, in);
1187 obstack_free(&obst, NULL);
1192 const arch_register_t *reg;
1196 static int cmp_regs(const void *a, const void *b)
1198 const reg_node_map_t *p = a;
1199 const reg_node_map_t *q = b;
1201 if(p->reg->reg_class == q->reg->reg_class)
1202 return p->reg->index - q->reg->index;
1204 return p->reg->reg_class - q->reg->reg_class;
1207 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1210 int n = pmap_count(reg_map);
1212 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1214 pmap_foreach(reg_map, ent) {
1215 res[i].reg = ent->key;
1216 res[i].irn = ent->value;
1220 qsort(res, n, sizeof(res[0]), cmp_regs);
1225 * Creates a barrier.
1227 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1229 ir_graph *irg = env->birg->irg;
1230 int n_regs = pmap_count(regs);
1236 rm = reg_map_to_arr(&env->obst, regs);
1238 for(n = 0; n < n_regs; ++n)
1239 obstack_ptr_grow(&env->obst, rm[n].irn);
1242 obstack_ptr_grow(&env->obst, *mem);
1246 in = (ir_node **) obstack_finish(&env->obst);
1247 irn = be_new_Barrier(irg, bl, n, in);
1248 obstack_free(&env->obst, in);
1250 for(n = 0; n < n_regs; ++n) {
1251 const arch_register_t *reg = rm[n].reg;
1253 int pos = BE_OUT_POS(n);
1256 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1257 be_node_set_reg_class(irn, n, reg->reg_class);
1259 be_set_constr_single_reg(irn, n, reg);
1260 be_set_constr_single_reg(irn, pos, reg);
1261 be_node_set_reg_class(irn, pos, reg->reg_class);
1262 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1264 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1265 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1266 flags |= arch_irn_flags_ignore;
1268 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1269 flags |= arch_irn_flags_modify_sp;
1271 be_node_set_flags(irn, pos, flags);
1273 pmap_insert(regs, (void *) reg, proj);
1277 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1280 obstack_free(&env->obst, rm);
1285 * Creates a be_Return for a Return node.
1287 * @param @env the abi environment
1288 * @param irn the Return node or NULL if there was none
1289 * @param bl the block where the be_Retun should be placed
1290 * @param mem the current memory
1291 * @param n_res number of return results
1293 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1294 be_abi_call_t *call = env->call;
1295 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1297 pmap *reg_map = pmap_create();
1298 ir_node *keep = pmap_get(env->keep_map, bl);
1304 const arch_register_t **regs;
1308 get the valid stack node in this block.
1309 If we had a call in that block there is a Keep constructed by process_calls()
1310 which points to the last stack modification in that block. we'll use
1311 it then. Else we use the stack from the start block and let
1312 the ssa construction fix the usage.
1314 stack = be_abi_reg_map_get(env->regs, isa->sp);
1316 ir_node *bad = new_r_Bad(env->birg->irg);
1317 stack = get_irn_n(keep, 0);
1318 set_nodes_block(keep, bad);
1319 set_irn_n(keep, 0, bad);
1320 // exchange(keep, new_r_Bad(env->birg->irg));
1323 /* Insert results for Return into the register map. */
1324 for(i = 0; i < n_res; ++i) {
1325 ir_node *res = get_Return_res(irn, i);
1326 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1327 assert(arg->in_reg && "return value must be passed in register");
1328 pmap_insert(reg_map, (void *) arg->reg, res);
1331 /* Add uses of the callee save registers. */
1332 pmap_foreach(env->regs, ent) {
1333 const arch_register_t *reg = ent->key;
1334 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1335 pmap_insert(reg_map, ent->key, ent->value);
1338 be_abi_reg_map_set(reg_map, isa->sp, stack);
1340 /* Make the Epilogue node and call the arch's epilogue maker. */
1341 create_barrier(env, bl, &mem, reg_map, 1);
1342 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1345 Maximum size of the in array for Return nodes is
1346 return args + callee save/ignore registers + memory + stack pointer
1348 in_max = pmap_count(reg_map) + n_res + 2;
1350 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1351 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1354 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1359 /* clear SP entry, since it has already been grown. */
1360 pmap_insert(reg_map, (void *) isa->sp, NULL);
1361 for(i = 0; i < n_res; ++i) {
1362 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1364 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1365 regs[n++] = arg->reg;
1367 /* Clear the map entry to mark the register as processed. */
1368 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1371 /* grow the rest of the stuff. */
1372 pmap_foreach(reg_map, ent) {
1375 regs[n++] = ent->key;
1379 /* The in array for the new back end return is now ready. */
1380 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1382 /* Set the register classes of the return's parameter accordingly. */
1383 for(i = 0; i < n; ++i)
1385 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1387 /* Free the space of the Epilog's in array and the register <-> proj map. */
1388 obstack_free(&env->obst, in);
1389 pmap_destroy(reg_map);
1394 typedef struct lower_frame_sels_env_t {
1396 entity *value_param_list; /**< the list of all value param entities */
1397 } lower_frame_sels_env_t;
1400 * Walker: Replaces Sels of frame type and
1401 * value param type entities by FrameAddress.
1403 static void lower_frame_sels_walker(ir_node *irn, void *data)
1405 lower_frame_sels_env_t *ctx = data;
1408 ir_graph *irg = current_ir_graph;
1409 ir_node *frame = get_irg_frame(irg);
1410 ir_node *param_base = get_irg_value_param_base(irg);
1411 ir_node *ptr = get_Sel_ptr(irn);
1413 if (ptr == frame || ptr == param_base) {
1414 be_abi_irg_t *env = ctx->env;
1415 entity *ent = get_Sel_entity(irn);
1416 ir_node *bl = get_nodes_block(irn);
1419 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1422 /* check, if it's a param sel and if have not seen this entity immediatly before */
1423 if (ptr == param_base && ctx->value_param_list != ent) {
1424 set_entity_link(ent, ctx->value_param_list);
1425 ctx->value_param_list = ent;
1432 * Check if a value parameter is transmitted as a register.
1433 * This might happen if the address of an parameter is taken which is
1434 * transmitted in registers.
1436 * Note that on some architectures this case must be handled specially
1437 * because the place of the backing store is determined by their ABI.
1439 * In the default case we move the entity to the frame type and create
1440 * a backing store into the first block.
1442 static void fix_address_of_parameter_access(be_abi_irg_t *env, entity *value_param_list) {
1443 be_abi_call_t *call = env->call;
1444 ir_graph *irg = env->birg->irg;
1445 entity *ent, *next_ent, *new_list;
1447 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1450 for (ent = value_param_list; ent; ent = next_ent) {
1451 int i = get_struct_member_index(get_entity_owner(ent), ent);
1452 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1454 next_ent = get_entity_link(ent);
1456 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1457 set_entity_link(ent, new_list);
1462 /* ok, change the graph */
1463 ir_node *start_bl = get_irg_start_block(irg);
1464 ir_node *first_bl = NULL;
1465 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1466 const ir_edge_t *edge;
1467 optimization_state_t state;
1470 foreach_block_succ(start_bl, edge) {
1471 ir_node *succ = get_edge_src_irn(edge);
1472 if (start_bl != succ) {
1478 /* we had already removed critical edges, so the following
1479 assertion should be always true. */
1480 assert(get_Block_n_cfgpreds(first_bl) == 1);
1482 /* now create backing stores */
1483 frame = get_irg_frame(irg);
1484 imem = get_irg_initial_mem(irg);
1486 save_optimization_state(&state);
1488 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1489 restore_optimization_state(&state);
1491 /* reroute all edges to the new memory source */
1492 edges_reroute(imem, nmem, irg);
1496 args = get_irg_args(irg);
1497 args_bl = get_nodes_block(args);
1498 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1499 int i = get_struct_member_index(get_entity_owner(ent), ent);
1500 ir_type *tp = get_entity_type(ent);
1501 ir_mode *mode = get_type_mode(tp);
1504 /* address for the backing store */
1505 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1508 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1510 /* the backing store itself */
1511 store = new_r_Store(irg, first_bl, mem, addr,
1512 new_r_Proj(irg, args_bl, args, mode, i));
1514 /* the new memory Proj gets the last Proj from store */
1515 set_Proj_pred(nmem, store);
1516 set_Proj_proj(nmem, pn_Store_M);
1518 /* move all entities to the frame type */
1519 frame_tp = get_irg_frame_type(irg);
1520 offset = get_type_size_bytes(frame_tp);
1521 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1522 ir_type *tp = get_entity_type(ent);
1523 int align = get_type_alignment_bytes(tp);
1525 offset += align - 1;
1527 set_entity_owner(ent, frame_tp);
1528 add_class_member(frame_tp, ent);
1529 /* must be automatic to set a fixed layout */
1530 set_entity_allocation(ent, allocation_automatic);
1531 set_entity_offset_bytes(ent, offset);
1532 offset += get_type_size_bytes(tp);
1534 set_type_size_bytes(frame_tp, offset);
1539 * Modify the irg itself and the frame type.
1541 static void modify_irg(be_abi_irg_t *env)
1543 be_abi_call_t *call = env->call;
1544 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1545 const arch_register_t *sp = arch_isa_sp(isa);
1546 ir_graph *irg = env->birg->irg;
1547 ir_node *bl = get_irg_start_block(irg);
1548 ir_node *end = get_irg_end_block(irg);
1549 ir_node *mem = get_irg_initial_mem(irg);
1550 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1551 pset *dont_save = pset_new_ptr(8);
1557 const arch_register_t *fp_reg;
1558 ir_node *frame_pointer;
1560 ir_node *reg_params_bl;
1563 const ir_edge_t *edge;
1564 ir_type *arg_type, *bet_type;
1565 lower_frame_sels_env_t ctx;
1568 bitset_t *used_proj_nr;
1569 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1571 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1573 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1575 ctx.value_param_list = NULL;
1576 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1578 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1579 env->regs = pmap_create();
1581 used_proj_nr = bitset_alloca(1024);
1582 n_params = get_method_n_params(method_type);
1583 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1584 memset(args, 0, n_params * sizeof(args[0]));
1586 /* Check if a value parameter is transmitted as a register.
1587 * This might happen if the address of an parameter is taken which is
1588 * transmitted in registers.
1590 * Note that on some architectures this case must be handled specially
1591 * because the place of the backing store is determined by their ABI.
1593 * In the default case we move the entity to the frame type and create
1594 * a backing store into the first block.
1596 fix_address_of_parameter_access(env, ctx.value_param_list);
1598 /* Fill the argument vector */
1599 arg_tuple = get_irg_args(irg);
1600 foreach_out_edge(arg_tuple, edge) {
1601 ir_node *irn = get_edge_src_irn(edge);
1602 int nr = get_Proj_proj(irn);
1604 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1607 arg_type = compute_arg_type(env, call, method_type, ¶m_map);
1608 bet_type = call->cb->get_between_type(env->cb);
1609 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map);
1611 /* Count the register params and add them to the number of Projs for the RegParams node */
1612 for(i = 0; i < n_params; ++i) {
1613 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1614 if(arg->in_reg && args[i]) {
1615 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1616 assert(i == get_Proj_proj(args[i]));
1618 /* For now, associate the register with the old Proj from Start representing that argument. */
1619 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1620 bitset_set(used_proj_nr, i);
1621 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1625 /* Collect all callee-save registers */
1626 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1627 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1628 for(j = 0; j < cls->n_regs; ++j) {
1629 const arch_register_t *reg = &cls->regs[j];
1630 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1631 pmap_insert(env->regs, (void *) reg, NULL);
1635 pmap_insert(env->regs, (void *) sp, NULL);
1636 pmap_insert(env->regs, (void *) isa->bp, NULL);
1637 reg_params_bl = get_irg_start_block(irg);
1638 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1639 add_irn_dep(env->reg_params, get_irg_start(irg));
1642 * make proj nodes for the callee save registers.
1643 * memorize them, since Return nodes get those as inputs.
1645 * Note, that if a register corresponds to an argument, the regs map contains
1646 * the old Proj from start for that argument.
1649 rm = reg_map_to_arr(&env->obst, env->regs);
1650 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1651 arch_register_t *reg = (void *) rm[i].reg;
1652 ir_node *arg_proj = rm[i].irn;
1653 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1655 int pos = BE_OUT_POS((int) nr);
1661 bitset_set(used_proj_nr, nr);
1662 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1663 pmap_insert(env->regs, (void *) reg, proj);
1664 be_set_constr_single_reg(env->reg_params, pos, reg);
1665 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1668 * If the register is an ignore register,
1669 * The Proj for that register shall also be ignored during register allocation.
1671 if(arch_register_type_is(reg, ignore))
1672 flags |= arch_irn_flags_ignore;
1675 flags |= arch_irn_flags_modify_sp;
1677 be_node_set_flags(env->reg_params, pos, flags);
1679 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1681 obstack_free(&env->obst, rm);
1683 /* Generate the Prologue */
1684 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1686 /* do the stack allocation BEFORE the barrier, or spill code
1687 might be added before it */
1688 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1689 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
1690 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1692 env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0);
1694 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1695 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1697 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1698 set_irg_frame(irg, frame_pointer);
1699 pset_insert_ptr(env->ignore_regs, fp_reg);
1701 /* Now, introduce stack param nodes for all parameters passed on the stack */
1702 for(i = 0; i < n_params; ++i) {
1703 ir_node *arg_proj = args[i];
1704 ir_node *repl = NULL;
1706 if(arg_proj != NULL) {
1707 be_abi_call_arg_t *arg;
1708 ir_type *param_type;
1709 int nr = get_Proj_proj(arg_proj);
1711 nr = MIN(nr, n_params);
1712 arg = get_call_arg(call, 0, nr);
1713 param_type = get_method_param_type(method_type, nr);
1716 repl = pmap_get(env->regs, (void *) arg->reg);
1719 else if(arg->on_stack) {
1720 /* For atomic parameters which are actually used, we create a StackParam node. */
1721 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1722 ir_mode *mode = get_type_mode(param_type);
1723 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1724 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1727 /* The stack parameter is not primitive (it is a struct or array),
1728 we thus will create a node representing the parameter's address
1731 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1735 assert(repl != NULL);
1736 edges_reroute(args[i], repl, irg);
1740 /* All Return nodes hang on the End node, so look for them there. */
1741 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1742 ir_node *irn = get_Block_cfgpred(end, i);
1744 if (is_Return(irn)) {
1745 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1749 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return than,
1750 the code is dead and will never be executed. */
1752 del_pset(dont_save);
1753 obstack_free(&env->obst, args);
1756 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1758 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1759 ir_node *old_frame = get_irg_frame(birg->irg);
1760 ir_graph *irg = birg->irg;
1764 optimization_state_t state;
1766 be_omit_fp = birg->main_env->options->omit_fp;
1768 obstack_init(&env->obst);
1770 env->isa = birg->main_env->arch_env->isa;
1771 env->method_type = get_entity_type(get_irg_entity(irg));
1772 env->call = be_abi_call_new();
1773 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1775 env->ignore_regs = pset_new_ptr_default();
1776 env->keep_map = pmap_create();
1777 env->dce_survivor = new_survive_dce();
1779 env->stack_phis = pset_new_ptr(16);
1780 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1781 to another Unknown or the stack pointer gets used */
1782 save_optimization_state(&state);
1784 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1785 restore_optimization_state(&state);
1786 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1788 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1789 env->irn_ops.impl = &abi_irn_ops;
1791 /* Lower all call nodes in the IRG. */
1795 Beware: init backend abi call object after processing calls,
1796 otherwise some information might be not yet available.
1798 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1800 /* Process the IRG */
1803 /* We don't need the keep map anymore. */
1804 pmap_destroy(env->keep_map);
1806 /* reroute the stack origin of the calls to the true stack origin. */
1807 edges_reroute(dummy, env->init_sp, irg);
1808 edges_reroute(old_frame, get_irg_frame(irg), irg);
1810 /* Make some important node pointers survive the dead node elimination. */
1811 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1812 pmap_foreach(env->regs, ent)
1813 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1815 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1817 env->call->cb->done(env->cb);
1822 void be_abi_free(be_abi_irg_t *env)
1824 free_survive_dce(env->dce_survivor);
1825 del_pset(env->stack_phis);
1826 del_pset(env->ignore_regs);
1827 pmap_destroy(env->regs);
1828 obstack_free(&env->obst, NULL);
1829 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1833 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
1835 arch_register_t *reg;
1837 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
1838 if(reg->reg_class == cls)
1839 bitset_set(bs, reg->index);
1842 /* Returns the stack layout from a abi environment. */
1843 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
1850 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1851 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1852 | _| | |> < ___) | || (_| | (__| <
1853 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1857 struct fix_stack_walker_info {
1859 const arch_env_t *aenv;
1863 * Walker. Collect all stack modifying nodes.
1865 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1867 struct fix_stack_walker_info *info = data;
1872 if (arch_irn_is(info->aenv, irn, modify_sp)) {
1873 assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T);
1874 pset_insert_ptr(info->nodes, irn);
1878 void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
1880 dom_front_info_t *df;
1881 pset *stack_nodes = pset_new_ptr(16);
1882 struct fix_stack_walker_info info;
1884 info.nodes = stack_nodes;
1885 info.aenv = env->birg->main_env->arch_env;
1887 /* We need dominance frontiers for fix up */
1888 df = be_compute_dominance_frontiers(env->birg->irg);
1889 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
1890 pset_insert_ptr(stack_nodes, env->init_sp);
1891 be_ssa_constr_set_phis(df, lv, stack_nodes, env->stack_phis);
1892 del_pset(stack_nodes);
1894 /* free these dominance frontiers */
1895 be_free_dominance_frontiers(df);
1898 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1900 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1901 int omit_fp = env->call->flags.bits.try_omit_fp;
1904 sched_foreach(bl, irn) {
1907 Check, if the node relates to an entity on the stack frame.
1908 If so, set the true offset (including the bias) for that
1911 entity *ent = arch_get_frame_entity(arch_env, irn);
1913 int offset = get_stack_entity_offset(env->frame, ent, bias);
1914 arch_set_frame_offset(arch_env, irn, offset);
1915 DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
1919 If the node modifies the stack pointer by a constant offset,
1920 record that in the bias.
1922 if(arch_irn_is(arch_env, irn, modify_sp)) {
1923 int ofs = arch_get_sp_bias(arch_env, irn);
1925 if(be_is_IncSP(irn)) {
1926 if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
1927 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1928 be_set_IncSP_offset(irn, ofs);
1929 } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
1930 ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1931 be_set_IncSP_offset(irn, ofs);
1944 * A helper struct for the bias walker.
1947 be_abi_irg_t *env; /**< The ABI irg environment. */
1948 int start_block_bias; /**< The bias at the end of the start block. */
1949 ir_node *start_block; /**< The start block of the current graph. */
1953 * Block-Walker: fix all stack offsets
1955 static void stack_bias_walker(ir_node *bl, void *data)
1957 struct bias_walk *bw = data;
1958 if (bl != bw->start_block) {
1959 process_stack_bias(bw->env, bl, bw->start_block_bias);
1963 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1965 ir_graph *irg = env->birg->irg;
1966 struct bias_walk bw;
1968 stack_frame_compute_initial_offset(env->frame);
1969 // stack_layout_dump(stdout, env->frame);
1971 /* Determine the stack bias at the end of the start block. */
1972 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1974 /* fix the bias is all other blocks */
1976 bw.start_block = get_irg_start_block(irg);
1977 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1980 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1982 assert(arch_register_type_is(reg, callee_save));
1983 assert(pmap_contains(abi->regs, (void *) reg));
1984 return pmap_get(abi->regs, (void *) reg);
1987 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1989 assert(arch_register_type_is(reg, ignore));
1990 assert(pmap_contains(abi->regs, (void *) reg));
1991 return pmap_get(abi->regs, (void *) reg);
1994 ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi)
1996 return abi->start_barrier;
2000 _____ _____ _ _ _ _ _ _
2001 |_ _| __ \| \ | | | | | | | | |
2002 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
2003 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
2004 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
2005 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
2007 for Phi nodes which are created due to stack modifying nodes
2008 such as IncSP, AddSP and SetSP.
2010 These Phis are always to be ignored by the reg alloc and are
2011 fixed on the SP register of the ISA.
2014 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
2016 const be_abi_irg_t *abi = get_abi_from_handler(handler);
2017 const void *res = NULL;
2019 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
2020 res = &abi->irn_ops;
2025 static void be_abi_limited(void *data, bitset_t *bs)
2027 be_abi_irg_t *abi = data;
2028 bitset_clear_all(bs);
2029 bitset_set(bs, abi->isa->sp->index);
2032 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
2034 be_abi_irg_t *abi = get_abi_from_ops(self);
2035 const arch_register_t *reg = abi->isa->sp;
2037 memset(req, 0, sizeof(req[0]));
2039 if(pos == BE_OUT_POS(0)) {
2040 req->cls = reg->reg_class;
2041 req->type = arch_register_req_type_limited;
2042 req->limited = be_abi_limited;
2043 req->limited_env = abi;
2046 else if(pos >= 0 && pos < get_irn_arity(irn)) {
2047 req->cls = reg->reg_class;
2048 req->type = arch_register_req_type_normal;
2054 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
2058 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
2060 const be_abi_irg_t *abi = get_abi_from_ops(self);
2061 return abi->isa->sp;
2064 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
2066 return arch_irn_class_normal;
2069 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
2071 return arch_irn_flags_ignore | arch_irn_flags_modify_sp;
2074 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
2079 static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent)
2083 static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias)
2087 static int abi_get_sp_bias(const void *self, const ir_node *irn)
2092 static const arch_irn_ops_if_t abi_irn_ops = {
2093 abi_get_irn_reg_req,
2098 abi_get_frame_entity,
2099 abi_set_frame_entity,
2100 abi_set_frame_offset,
2102 NULL, /* get_inverse */
2103 NULL, /* get_op_estimated_cost */
2104 NULL, /* possible_memory_operand */
2105 NULL, /* perform_memory_operand */
2108 static const arch_irn_handler_t abi_irn_handler = {
2113 * Returns non-zero if the ABI has omitted the frame pointer in
2114 * the current graph.
2116 int be_abi_omit_fp(const be_abi_irg_t *abi) {
2117 return abi->call->flags.bits.try_omit_fp;