4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
30 #include "raw_bitset.h"
37 #include "besched_t.h"
41 typedef struct _be_abi_call_arg_t {
42 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
43 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
44 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
47 const arch_register_t *reg;
50 unsigned space_before;
54 struct _be_abi_call_t {
55 be_abi_call_flags_t flags;
56 const be_abi_callbacks_t *cb;
57 ir_type *between_type;
59 const arch_register_class_t *cls_addr;
62 struct _be_abi_irg_t {
64 be_stack_layout_t *frame; /**< The stack frame model. */
65 be_irg_t *birg; /**< The back end IRG. */
66 const arch_isa_t *isa; /**< The isa. */
67 survive_dce_t *dce_survivor;
69 be_abi_call_t *call; /**< The ABI call information. */
70 ir_type *method_type; /**< The type of the method of the IRG. */
72 ir_node *init_sp; /**< The node representing the stack pointer
73 at the start of the function. */
75 ir_node *start_barrier; /**< The barrier of the start block */
77 ir_node *reg_params; /**< The reg params node. */
78 pmap *regs; /**< A map of all callee-save and ignore regs to
79 their Projs to the RegParams node. */
81 int start_block_bias; /**< The stack bias at the end of the start block. */
83 void *cb; /**< ABI Callback self pointer. */
85 pmap *keep_map; /**< mapping blocks to keep nodes. */
86 pset *ignore_regs; /**< Additional registers which shall be ignored. */
88 ir_node **calls; /**< flexible array containing all be_Call nodes */
90 arch_register_req_t sp_req;
91 arch_register_req_t sp_cls_req;
93 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
97 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
98 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
100 /* Forward, since be need it in be_abi_introduce(). */
101 static const arch_irn_ops_if_t abi_irn_ops;
102 static const arch_irn_handler_t abi_irn_handler;
104 static heights_t *ir_heights;
106 /* Flag: if set, try to omit the frame pointer if called by the backend */
107 static int be_omit_fp = 1;
110 _ ____ ___ ____ _ _ _ _
111 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
112 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
113 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
114 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
116 These callbacks are used by the backend to set the parameters
117 for a specific call type.
121 * Set compare function: compares two ABI call object arguments.
123 static int cmp_call_arg(const void *a, const void *b, size_t n)
125 const be_abi_call_arg_t *p = a, *q = b;
126 return !(p->is_res == q->is_res && p->pos == q->pos);
130 * Get or set an ABI call object argument.
132 * @param call the abi call
133 * @param is_res true for call results, false for call arguments
134 * @param pos position of the argument
135 * @param do_insert true if the argument is set, false if it's retrieved
137 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
139 be_abi_call_arg_t arg;
142 memset(&arg, 0, sizeof(arg));
146 hash = is_res * 128 + pos;
149 ? set_insert(call->params, &arg, sizeof(arg), hash)
150 : set_find(call->params, &arg, sizeof(arg), hash);
154 * Retrieve an ABI call object argument.
156 * @param call the ABI call object
157 * @param is_res true for call results, false for call arguments
158 * @param pos position of the argument
160 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
162 return get_or_set_call_arg(call, is_res, pos, 0);
165 /* Set the flags for a call. */
166 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
173 /* Set register class for call address */
174 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
176 call->cls_addr = cls;
180 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
182 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
184 arg->alignment = alignment;
185 arg->space_before = space_before;
186 arg->space_after = space_after;
187 assert(alignment > 0 && "Alignment must be greater than 0");
190 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
192 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
197 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
199 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
204 /* Get the flags of a ABI call object. */
205 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
211 * Constructor for a new ABI call object.
213 * @return the new ABI call object
215 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
217 be_abi_call_t *call = xmalloc(sizeof(call[0]));
220 call->params = new_set(cmp_call_arg, 16);
222 call->cls_addr = cls_addr;
224 call->flags.bits.try_omit_fp = be_omit_fp;
230 * Destructor for an ABI call object.
232 static void be_abi_call_free(be_abi_call_t *call)
234 del_set(call->params);
240 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
241 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
242 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
243 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
246 Handling of the stack frame. It is composed of three types:
247 1) The type of the arguments which are pushed on the stack.
248 2) The "between type" which consists of stuff the call of the
249 function pushes on the stack (like the return address and
250 the old base pointer for ia32).
251 3) The Firm frame type which consists of all local variables
255 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent, int bias)
257 ir_type *t = get_entity_owner(ent);
258 int ofs = get_entity_offset(ent);
262 /* Find the type the entity is contained in. */
263 for(index = 0; index < N_FRAME_TYPES; ++index) {
264 if(frame->order[index] == t)
268 /* Add the size of all the types below the one of the entity to the entity's offset */
269 for(i = 0; i < index; ++i)
270 ofs += get_type_size_bytes(frame->order[i]);
272 /* correct the offset by the initial position of the frame pointer */
273 ofs -= frame->initial_offset;
275 /* correct the offset with the current bias. */
282 * Retrieve the entity with given offset from a frame type.
284 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
288 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
289 ir_entity *ent = get_compound_member(t, i);
290 if(get_entity_offset(ent) == offset)
297 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
299 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
300 ir_entity *ent = search_ent_with_offset(base, 0);
302 frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
304 return frame->initial_offset;
308 * Initializes the frame layout from parts
310 * @param frame the stack layout that will be initialized
311 * @param args the stack argument layout type
312 * @param between the between layout type
313 * @param locals the method frame type
314 * @param stack_dir the stack direction
315 * @param param_map an array mapping method argument positions to the stack argument type
317 * @return the initialized stack layout
319 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
320 ir_type *between, ir_type *locals, int stack_dir,
321 ir_entity *param_map[])
323 frame->arg_type = args;
324 frame->between_type = between;
325 frame->frame_type = locals;
326 frame->initial_offset = 0;
327 frame->stack_dir = stack_dir;
328 frame->order[1] = between;
329 frame->param_map = param_map;
332 frame->order[0] = args;
333 frame->order[2] = locals;
336 frame->order[0] = locals;
337 frame->order[2] = args;
343 /** Dumps the stack layout to file. */
344 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
348 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
349 for (j = 0; j < N_FRAME_TYPES; ++j) {
350 ir_type *t = frame->order[j];
352 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
353 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
354 ir_entity *ent = get_compound_member(t, i);
355 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
362 * Returns non-zero if the call argument at given position
363 * is transfered on the stack.
365 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
367 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
368 return arg && !arg->in_reg;
378 Adjustment of the calls inside a graph.
383 * Transform a call node.
384 * @param env The ABI environment for the current irg.
385 * @param irn The call node.
386 * @param curr_sp The stack pointer node to use.
387 * @return The stack pointer after the call.
389 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
391 ir_graph *irg = env->birg->irg;
392 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
393 ir_type *mt = get_Call_type(irn);
394 ir_node *call_ptr = get_Call_ptr(irn);
395 int n_params = get_method_n_params(mt);
396 ir_node *curr_mem = get_Call_mem(irn);
397 ir_node *bl = get_nodes_block(irn);
398 pset *results = pset_new_ptr(8);
399 pset *caller_save = pset_new_ptr(8);
400 pset *states = pset_new_ptr(2);
402 int stack_dir = arch_isa_stack_dir(isa);
403 const arch_register_t *sp = arch_isa_sp(isa);
404 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
405 ir_mode *mach_mode = sp->reg_class->mode;
406 struct obstack *obst = &env->obst;
407 int no_alloc = call->flags.bits.frame_is_setup_on_call;
409 ir_node *res_proj = NULL;
410 int curr_res_proj = pn_Call_max;
418 const arch_register_t *reg;
419 const ir_edge_t *edge;
424 /* Let the isa fill out the abi description for that call node. */
425 arch_isa_get_call_abi(isa, mt, call);
427 /* Insert code to put the stack arguments on the stack. */
428 assert(get_Call_n_params(irn) == n_params);
429 for(i = 0; i < n_params; ++i) {
430 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
433 int arg_size = get_type_size_bytes(get_method_param_type(mt, i));
435 stack_size += round_up2(arg->space_before, arg->alignment);
436 stack_size += round_up2(arg_size, arg->alignment);
437 stack_size += round_up2(arg->space_after, arg->alignment);
438 obstack_int_grow(obst, i);
442 pos = obstack_finish(obst);
444 /* Collect all arguments which are passed in registers. */
445 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
446 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
447 if(arg && arg->in_reg) {
448 obstack_int_grow(obst, i);
452 low_args = obstack_finish(obst);
454 /* If there are some parameters which shall be passed on the stack. */
457 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
460 * Reverse list of stack parameters if call arguments are from left to right.
461 * We must them reverse again if they are pushed (not stored) and the stack
462 * direction is downwards.
464 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
465 for (i = 0; i < n_pos >> 1; ++i) {
466 int other = n_pos - i - 1;
474 * If the stack is decreasing and we do not want to store sequentially,
475 * or someone else allocated the call frame
476 * we allocate as much space on the stack all parameters need, by
477 * moving the stack pointer along the stack's direction.
479 if(stack_dir < 0 && !do_seq && !no_alloc) {
480 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
482 add_irn_dep(curr_sp, alloca_copy);
488 obstack_ptr_grow(obst, get_Call_mem(irn));
489 curr_mem = new_NoMem();
491 curr_mem = get_Call_mem(irn);
494 for(i = 0; i < n_pos; ++i) {
496 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
497 ir_node *param = get_Call_param(irn, p);
498 ir_node *addr = curr_sp;
500 ir_type *param_type = get_method_param_type(mt, p);
501 int param_size = get_type_size_bytes(param_type) + arg->space_after;
504 * If we wanted to build the arguments sequentially,
505 * the stack pointer for the next must be incremented,
506 * and the memory value propagated.
510 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
512 add_irn_dep(curr_sp, alloca_copy);
515 add_irn_dep(curr_sp, curr_mem);
518 curr_ofs += arg->space_before;
519 curr_ofs = round_up2(curr_ofs, arg->alignment);
521 /* Make the expression to compute the argument's offset. */
523 ir_mode *constmode = mach_mode;
524 if(mode_is_reference(mach_mode)) {
527 addr = new_r_Const_long(irg, bl, constmode, curr_ofs);
528 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
532 /* Insert a store for primitive arguments. */
533 if (is_atomic_type(param_type)) {
535 store = new_r_Store(irg, bl, curr_mem, addr, param);
536 mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
539 /* Make a mem copy for compound arguments. */
543 assert(mode_is_reference(get_irn_mode(param)));
544 copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
545 mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
548 curr_ofs += param_size;
553 obstack_ptr_grow(obst, mem);
556 in = (ir_node **) obstack_finish(obst);
558 /* We need the sync only, if we didn't build the stores sequentially. */
561 curr_mem = new_r_Sync(irg, bl, n_pos + 1, in);
563 curr_mem = get_Call_mem(irn);
566 obstack_free(obst, in);
569 /* Collect caller save registers */
570 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
572 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
573 for(j = 0; j < cls->n_regs; ++j) {
574 const arch_register_t *reg = arch_register_for_index(cls, j);
575 if(arch_register_type_is(reg, caller_save)) {
576 pset_insert_ptr(caller_save, (void *) reg);
578 if(arch_register_type_is(reg, state)) {
579 pset_insert_ptr(caller_save, (void*) reg);
580 pset_insert_ptr(states, (void*) reg);
585 /* search the greatest result proj number */
587 /* TODO: what if the result is NOT used? Currently there is
588 * no way to detect this later, especially there is no way to
589 * see this in the proj numbers.
590 * While this is ok for the register allocator, it is bad for
591 * backends which need to change the be_Call further (x87 simulator
592 * for instance. However for this particular case the call_type is
595 foreach_out_edge(irn, edge) {
596 const ir_edge_t *res_edge;
597 ir_node *irn = get_edge_src_irn(edge);
599 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
601 foreach_out_edge(irn, res_edge) {
603 be_abi_call_arg_t *arg;
604 ir_node *res = get_edge_src_irn(res_edge);
606 assert(is_Proj(res));
608 proj = get_Proj_proj(res);
609 arg = get_call_arg(call, 1, proj);
612 shift the proj number to the right, since we will drop the
613 unspeakable Proj_T from the Call. Therefore, all real argument
614 Proj numbers must be increased by pn_be_Call_first_res
616 proj += pn_be_Call_first_res;
617 set_Proj_proj(res, proj);
618 obstack_ptr_grow(obst, res);
620 if(proj > curr_res_proj)
621 curr_res_proj = proj;
623 pset_remove_ptr(caller_save, arg->reg);
624 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
631 obstack_ptr_grow(obst, NULL);
632 res_projs = obstack_finish(obst);
634 /* make the back end call node and set its register requirements. */
635 for(i = 0; i < n_low_args; ++i) {
636 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
638 foreach_pset(states, reg) {
639 const arch_register_class_t *cls = arch_register_get_class(reg);
641 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
642 ir_fprintf(stderr, "Adding %+F\n", regnode);
644 ir_node *regnode = new_rd_Unknown(irg, arch_register_class_mode(cls));
645 obstack_ptr_grow(obst, regnode);
647 count = n_low_args + pset_count(states);
649 in = obstack_finish(obst);
651 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
652 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
654 curr_res_proj + pset_count(caller_save), count,
655 in, get_Call_type(irn));
656 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
658 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
660 curr_res_proj + pset_count(caller_save),
661 count, in, get_Call_type(irn));
663 ARR_APP1(ir_node*, env->calls, low_call);
666 Set the register class of the call address to
667 the backend provided class (default: stack pointer class)
669 be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr);
671 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
673 /* Set the register classes and constraints of the Call parameters. */
674 for(i = 0; i < n_low_args; ++i) {
675 int index = low_args[i];
676 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
677 assert(arg->reg != NULL);
679 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
682 /* Set the register constraints of the results. */
683 for (i = 0; res_projs[i]; ++i) {
684 int pn = get_Proj_proj(res_projs[i]);
686 /* Correct Proj number since it has been adjusted! (see above) */
687 const be_abi_call_arg_t *arg = get_call_arg(call, 1, pn - pn_Call_max);
689 /* Matze: we need the information about the real mode for later
690 * transforms (signed/unsigend compares, stores...), so leave the fixup
691 * for the backend transform phase... */
694 const arch_register_class_t *cls = arch_register_get_class(arg->reg);
695 ir_mode *mode = arch_register_class_mode(cls);
696 set_irn_mode(irn, mode);
700 be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
702 obstack_free(obst, in);
703 exchange(irn, low_call);
705 /* redirect the result projs to the lowered call instead of the Proj_T */
706 for (i = 0; res_projs[i]; ++i)
707 set_Proj_pred(res_projs[i], low_call);
709 /* set the now unnecessary projT to bad */
710 if(res_proj != NULL) {
711 be_kill_node(res_proj);
714 /* Make additional projs for the caller save registers
715 and the Keep node which keeps them alive. */
716 if (pset_count(caller_save) > 0) {
717 const arch_register_t *reg;
721 for (reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
722 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
724 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
725 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
727 /* a call can produce ignore registers, in this case set the flag and register for the Proj */
728 if (arch_register_type_is(reg, ignore)) {
729 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
730 be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj), arch_irn_flags_ignore);
733 set_irn_link(proj, (void *) reg);
734 obstack_ptr_grow(obst, proj);
738 /* create the Keep for the caller save registers */
739 in = (ir_node **) obstack_finish(obst);
740 keep = be_new_Keep(NULL, irg, bl, n, in);
741 for (i = 0; i < n; ++i) {
742 const arch_register_t *reg = get_irn_link(in[i]);
743 be_node_set_reg_class(keep, i, reg->reg_class);
745 obstack_free(obst, in);
748 /* Clean up the stack. */
750 ir_node *mem_proj = NULL;
752 foreach_out_edge(low_call, edge) {
753 ir_node *irn = get_edge_src_irn(edge);
754 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
761 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
762 keep_alive(mem_proj);
765 /* Clean up the stack frame if we allocated it */
767 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
768 add_irn_dep(curr_sp, mem_proj);
770 add_irn_dep(curr_sp, alloca_copy);
776 be_abi_call_free(call);
777 obstack_free(obst, pos);
780 del_pset(caller_save);
787 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
789 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
797 const ir_edge_t *edge;
804 if (get_Alloc_where(alloc) != stack_alloc) {
809 block = get_nodes_block(alloc);
810 irg = get_irn_irg(block);
813 type = get_Alloc_type(alloc);
815 foreach_out_edge(alloc, edge) {
816 ir_node *irn = get_edge_src_irn(edge);
818 assert(is_Proj(irn));
819 switch(get_Proj_proj(irn)) {
831 /* Beware: currently Alloc nodes without a result might happen,
832 only escape analysis kills them and this phase runs only for object
833 oriented source. We kill the Alloc here. */
834 if (alloc_res == NULL && alloc_mem) {
835 exchange(alloc_mem, get_Alloc_mem(alloc));
839 /* we might need to multiply the size with the element size */
840 if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
841 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
842 ir_node *cnst = new_rd_Const(NULL, irg, block, mode_Iu, tv);
843 ir_node *mul = new_rd_Mul(NULL, irg, block, get_Alloc_size(alloc),
847 size = get_Alloc_size(alloc);
850 /* The stack pointer will be modified in an unknown manner.
851 We cannot omit it. */
852 env->call->flags.bits.try_omit_fp = 0;
853 new_alloc = be_new_AddSP(env->isa->sp, irg, block, curr_sp, size);
855 if(alloc_mem != NULL) {
859 addsp_mem = new_r_Proj(irg, block, new_alloc, mode_M, pn_be_AddSP_M);
861 // We need to sync the output mem of the AddSP with the input mem
862 // edge into the alloc node
863 ins[0] = get_Alloc_mem(alloc);
865 sync = new_r_Sync(irg, block, 2, ins);
867 exchange(alloc_mem, sync);
870 exchange(alloc, new_alloc);
872 /* fix projnum of alloca res */
873 set_Proj_proj(alloc_res, pn_be_AddSP_res);
875 addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
877 /* copy the address away, since it could be used after further stack pointer modifications. */
878 /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
879 *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, block, curr_sp);
881 /* Let all users of the Alloc() result now point to the copy. */
882 edges_reroute(alloc_res, copy, irg);
884 /* Rewire the copy appropriately. */
885 set_irn_n(copy, be_pos_Copy_op, addr);
894 * The Free is transformed into a back end free node and connected to the stack nodes.
896 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
900 ir_node *subsp, *mem, *res, *size, *sync;
904 if (get_Free_where(free) != stack_alloc) {
909 block = get_nodes_block(free);
910 irg = get_irn_irg(block);
911 type = get_Free_type(free);
913 /* we might need to multiply the size with the element size */
914 if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
915 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
916 ir_node *cnst = new_rd_Const(NULL, irg, block, mode_Iu, tv);
917 ir_node *mul = new_rd_Mul(NULL, irg, block, get_Free_size(free),
921 size = get_Free_size(free);
924 /* The stack pointer will be modified in an unknown manner.
925 We cannot omit it. */
926 env->call->flags.bits.try_omit_fp = 0;
927 subsp = be_new_SubSP(env->isa->sp, irg, block, curr_sp, size);
929 mem = new_r_Proj(irg, block, subsp, mode_M, pn_be_SubSP_M);
930 res = new_r_Proj(irg, block, subsp, mode_P_data, pn_be_SubSP_res);
932 /* we need to sync the memory */
933 in[0] = get_Free_mem(free);
935 sync = new_r_Sync(irg, block, 2, in);
937 /* and make the AddSP dependent on the former memory */
938 add_irn_dep(subsp, get_Free_mem(free));
941 exchange(free, sync);
947 /* the following function is replaced by the usage of the heights module */
950 * Walker for dependent_on().
951 * This function searches a node tgt recursively from a given node
952 * but is restricted to the given block.
953 * @return 1 if tgt was reachable from curr, 0 if not.
955 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
959 if (get_nodes_block(curr) != bl)
965 /* Phi functions stop the recursion inside a basic block */
966 if (! is_Phi(curr)) {
967 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
968 if (check_dependence(get_irn_n(curr, i), tgt, bl))
978 * Check if a node is somehow data dependent on another one.
979 * both nodes must be in the same basic block.
980 * @param n1 The first node.
981 * @param n2 The second node.
982 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
984 static int dependent_on(ir_node *n1, ir_node *n2)
986 assert(get_nodes_block(n1) == get_nodes_block(n2));
988 return heights_reachable_in_block(ir_heights, n1, n2);
991 static int cmp_call_dependecy(const void *c1, const void *c2)
993 ir_node *n1 = *(ir_node **) c1;
994 ir_node *n2 = *(ir_node **) c2;
997 Classical qsort() comparison function behavior:
998 0 if both elements are equal
999 1 if second is "smaller" that first
1000 -1 if first is "smaller" that second
1002 if (dependent_on(n1, n2))
1005 if (dependent_on(n2, n1))
1012 * Walker: links all Call/alloc/Free nodes to the Block they are contained.
1014 static void link_calls_in_block_walker(ir_node *irn, void *data)
1016 ir_opcode code = get_irn_opcode(irn);
1018 if (code == iro_Call ||
1019 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1020 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1021 be_abi_irg_t *env = data;
1022 ir_node *bl = get_nodes_block(irn);
1023 void *save = get_irn_link(bl);
1025 if (code == iro_Call)
1026 env->call->flags.bits.irg_is_leaf = 0;
1028 set_irn_link(irn, save);
1029 set_irn_link(bl, irn);
1035 * Process all Call nodes inside a basic block.
1036 * Note that the link field of the block must contain a linked list of all
1037 * Call nodes inside the Block. We first order this list according to data dependency
1038 * and that connect the calls together.
1040 static void process_calls_in_block(ir_node *bl, void *data)
1042 be_abi_irg_t *env = data;
1043 ir_node *curr_sp = env->init_sp;
1047 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
1048 obstack_ptr_grow(&env->obst, irn);
1050 /* If there were call nodes in the block. */
1054 ir_node *copy = NULL;
1057 nodes = obstack_finish(&env->obst);
1059 /* order the call nodes according to data dependency */
1060 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
1062 for(i = n - 1; i >= 0; --i) {
1063 ir_node *irn = nodes[i];
1065 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1066 switch(get_irn_opcode(irn)) {
1068 curr_sp = adjust_call(env, irn, curr_sp, copy);
1071 curr_sp = adjust_alloc(env, irn, curr_sp, ©);
1074 curr_sp = adjust_free(env, irn, curr_sp);
1081 obstack_free(&env->obst, nodes);
1083 /* Keep the last stack state in the block by tying it to Keep node */
1085 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
1086 pmap_insert(env->keep_map, bl, keep);
1089 set_irn_link(bl, curr_sp);
1090 } /* process_calls_in_block */
1093 * Adjust all call nodes in the graph to the ABI conventions.
1095 static void process_calls(be_abi_irg_t *env)
1097 ir_graph *irg = env->birg->irg;
1099 env->call->flags.bits.irg_is_leaf = 1;
1100 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
1102 ir_heights = heights_new(env->birg->irg);
1103 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
1104 heights_free(ir_heights);
1108 static ir_node *setup_frame(be_abi_irg_t *env)
1110 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1111 const arch_register_t *sp = isa->sp;
1112 const arch_register_t *bp = isa->bp;
1113 be_abi_call_flags_bits_t flags = env->call->flags.bits;
1114 ir_graph *irg = env->birg->irg;
1115 ir_node *bl = get_irg_start_block(irg);
1116 ir_node *no_mem = get_irg_no_mem(irg);
1117 ir_node *old_frame = get_irg_frame(irg);
1118 ir_node *stack = pmap_get(env->regs, (void *) sp);
1119 ir_node *frame = pmap_get(env->regs, (void *) bp);
1121 int stack_nr = get_Proj_proj(stack);
1123 if(flags.try_omit_fp) {
1124 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
1129 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
1131 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
1132 if(!flags.fp_free) {
1133 be_set_constr_single_reg(frame, -1, bp);
1134 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
1135 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
1138 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
1141 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
1142 env->init_sp = stack;
1143 set_irg_frame(irg, frame);
1144 edges_reroute(old_frame, frame, irg);
1149 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
1151 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1152 const arch_register_t *sp = isa->sp;
1153 const arch_register_t *bp = isa->bp;
1154 ir_graph *irg = env->birg->irg;
1155 ir_node *ret_mem = get_Return_mem(ret);
1156 ir_node *frame = get_irg_frame(irg);
1157 ir_node *bl = get_nodes_block(ret);
1158 ir_node *stack = get_irn_link(bl);
1162 if(env->call->flags.bits.try_omit_fp) {
1163 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
1167 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
1168 be_set_constr_single_reg(stack, -1, sp);
1169 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
1172 pmap_foreach(env->regs, ent) {
1173 const arch_register_t *reg = ent->key;
1174 ir_node *irn = ent->value;
1177 obstack_ptr_grow(&env->obst, stack);
1179 obstack_ptr_grow(&env->obst, frame);
1180 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1181 obstack_ptr_grow(obst, irn);
1188 * Computes the stack argument layout type.
1189 * Changes a possibly allocated value param type by moving
1190 * entities to the stack layout type.
1192 * @param env the ABI environment
1193 * @param call the current call ABI
1194 * @param method_type the method type
1195 * @param param_map an array mapping method arguments to the stack layout type
1197 * @return the stack argument layout type
1199 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map)
1201 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1202 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1203 int n = get_method_n_params(method_type);
1204 int curr = inc > 0 ? 0 : n - 1;
1210 ir_type *val_param_tp = get_method_value_param_type(method_type);
1211 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1214 *param_map = map = obstack_alloc(&env->obst, n * sizeof(ir_entity *));
1215 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1216 for (i = 0; i < n; ++i, curr += inc) {
1217 ir_type *param_type = get_method_param_type(method_type, curr);
1218 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1221 if (arg->on_stack) {
1223 /* the entity was already created, move it to the param type */
1224 arg->stack_ent = get_method_value_param_ent(method_type, i);
1225 remove_struct_member(val_param_tp, arg->stack_ent);
1226 set_entity_owner(arg->stack_ent, res);
1227 add_struct_member(res, arg->stack_ent);
1228 /* must be automatic to set a fixed layout */
1229 set_entity_allocation(arg->stack_ent, allocation_automatic);
1232 snprintf(buf, sizeof(buf), "param_%d", i);
1233 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1235 ofs += arg->space_before;
1236 ofs = round_up2(ofs, arg->alignment);
1237 set_entity_offset(arg->stack_ent, ofs);
1238 ofs += arg->space_after;
1239 ofs += get_type_size_bytes(param_type);
1240 map[i] = arg->stack_ent;
1243 set_type_size_bytes(res, ofs);
1244 set_type_state(res, layout_fixed);
1249 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1252 struct obstack obst;
1254 obstack_init(&obst);
1256 /* Create a Perm after the RegParams node to delimit it. */
1257 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1258 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1263 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1264 const arch_register_t *reg = &cls->regs[j];
1265 ir_node *irn = pmap_get(regs, (void *) reg);
1267 if(irn && !arch_register_type_is(reg, ignore)) {
1269 obstack_ptr_grow(&obst, irn);
1270 set_irn_link(irn, (void *) reg);
1274 obstack_ptr_grow(&obst, NULL);
1275 in = obstack_finish(&obst);
1277 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1278 for(j = 0; j < n_regs; ++j) {
1279 ir_node *arg = in[j];
1280 arch_register_t *reg = get_irn_link(arg);
1281 pmap_insert(regs, reg, arg);
1282 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1285 obstack_free(&obst, in);
1288 obstack_free(&obst, NULL);
1293 const arch_register_t *reg;
1297 static int cmp_regs(const void *a, const void *b)
1299 const reg_node_map_t *p = a;
1300 const reg_node_map_t *q = b;
1302 if(p->reg->reg_class == q->reg->reg_class)
1303 return p->reg->index - q->reg->index;
1305 return p->reg->reg_class - q->reg->reg_class;
1308 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1311 int n = pmap_count(reg_map);
1313 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1315 pmap_foreach(reg_map, ent) {
1316 res[i].reg = ent->key;
1317 res[i].irn = ent->value;
1321 qsort(res, n, sizeof(res[0]), cmp_regs);
1326 * Creates a barrier.
1328 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1330 ir_graph *irg = env->birg->irg;
1331 int n_regs = pmap_count(regs);
1337 rm = reg_map_to_arr(&env->obst, regs);
1339 for(n = 0; n < n_regs; ++n)
1340 obstack_ptr_grow(&env->obst, rm[n].irn);
1343 obstack_ptr_grow(&env->obst, *mem);
1347 in = (ir_node **) obstack_finish(&env->obst);
1348 irn = be_new_Barrier(irg, bl, n, in);
1349 obstack_free(&env->obst, in);
1351 for(n = 0; n < n_regs; ++n) {
1352 const arch_register_t *reg = rm[n].reg;
1354 int pos = BE_OUT_POS(n);
1357 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1358 be_node_set_reg_class(irn, n, reg->reg_class);
1360 be_set_constr_single_reg(irn, n, reg);
1361 be_set_constr_single_reg(irn, pos, reg);
1362 be_node_set_reg_class(irn, pos, reg->reg_class);
1363 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1365 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1366 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1367 flags |= arch_irn_flags_ignore;
1369 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1370 flags |= arch_irn_flags_modify_sp;
1372 be_node_set_flags(irn, pos, flags);
1374 pmap_insert(regs, (void *) reg, proj);
1378 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1381 obstack_free(&env->obst, rm);
1386 * Creates a be_Return for a Return node.
1388 * @param @env the abi environment
1389 * @param irn the Return node or NULL if there was none
1390 * @param bl the block where the be_Retun should be placed
1391 * @param mem the current memory
1392 * @param n_res number of return results
1394 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1395 be_abi_call_t *call = env->call;
1396 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1398 pmap *reg_map = pmap_create();
1399 ir_node *keep = pmap_get(env->keep_map, bl);
1405 const arch_register_t **regs;
1409 get the valid stack node in this block.
1410 If we had a call in that block there is a Keep constructed by process_calls()
1411 which points to the last stack modification in that block. we'll use
1412 it then. Else we use the stack from the start block and let
1413 the ssa construction fix the usage.
1415 stack = be_abi_reg_map_get(env->regs, isa->sp);
1417 ir_node *bad = new_r_Bad(env->birg->irg);
1418 stack = get_irn_n(keep, 0);
1419 set_nodes_block(keep, bad);
1420 set_irn_n(keep, 0, bad);
1421 // exchange(keep, new_r_Bad(env->birg->irg));
1424 /* Insert results for Return into the register map. */
1425 for(i = 0; i < n_res; ++i) {
1426 ir_node *res = get_Return_res(irn, i);
1427 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1428 assert(arg->in_reg && "return value must be passed in register");
1429 pmap_insert(reg_map, (void *) arg->reg, res);
1432 /* Add uses of the callee save registers. */
1433 pmap_foreach(env->regs, ent) {
1434 const arch_register_t *reg = ent->key;
1435 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1436 pmap_insert(reg_map, ent->key, ent->value);
1439 be_abi_reg_map_set(reg_map, isa->sp, stack);
1441 /* Make the Epilogue node and call the arch's epilogue maker. */
1442 create_barrier(env, bl, &mem, reg_map, 1);
1443 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1446 Maximum size of the in array for Return nodes is
1447 return args + callee save/ignore registers + memory + stack pointer
1449 in_max = pmap_count(reg_map) + n_res + 2;
1451 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1452 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1455 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1460 /* clear SP entry, since it has already been grown. */
1461 pmap_insert(reg_map, (void *) isa->sp, NULL);
1462 for(i = 0; i < n_res; ++i) {
1463 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1465 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1466 regs[n++] = arg->reg;
1468 /* Clear the map entry to mark the register as processed. */
1469 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1472 /* grow the rest of the stuff. */
1473 pmap_foreach(reg_map, ent) {
1476 regs[n++] = ent->key;
1480 /* The in array for the new back end return is now ready. */
1481 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1483 /* Set the register classes of the return's parameter accordingly. */
1484 for(i = 0; i < n; ++i)
1486 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1488 /* Free the space of the Epilog's in array and the register <-> proj map. */
1489 obstack_free(&env->obst, in);
1490 pmap_destroy(reg_map);
1495 typedef struct lower_frame_sels_env_t {
1497 ir_entity *value_param_list; /**< the list of all value param entities */
1498 } lower_frame_sels_env_t;
1501 * Walker: Replaces Sels of frame type and
1502 * value param type entities by FrameAddress.
1504 static void lower_frame_sels_walker(ir_node *irn, void *data)
1506 lower_frame_sels_env_t *ctx = data;
1509 ir_graph *irg = current_ir_graph;
1510 ir_node *frame = get_irg_frame(irg);
1511 ir_node *param_base = get_irg_value_param_base(irg);
1512 ir_node *ptr = get_Sel_ptr(irn);
1514 if (ptr == frame || ptr == param_base) {
1515 be_abi_irg_t *env = ctx->env;
1516 ir_entity *ent = get_Sel_entity(irn);
1517 ir_node *bl = get_nodes_block(irn);
1520 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1523 /* check, if it's a param sel and if have not seen this entity immediatly before */
1524 if (ptr == param_base && ctx->value_param_list != ent) {
1525 set_entity_link(ent, ctx->value_param_list);
1526 ctx->value_param_list = ent;
1533 * Check if a value parameter is transmitted as a register.
1534 * This might happen if the address of an parameter is taken which is
1535 * transmitted in registers.
1537 * Note that on some architectures this case must be handled specially
1538 * because the place of the backing store is determined by their ABI.
1540 * In the default case we move the entity to the frame type and create
1541 * a backing store into the first block.
1543 static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_entity *value_param_list) {
1544 be_abi_call_t *call = env->call;
1545 ir_graph *irg = env->birg->irg;
1546 ir_entity *ent, *next_ent, *new_list;
1548 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1551 for (ent = value_param_list; ent; ent = next_ent) {
1552 int i = get_struct_member_index(get_entity_owner(ent), ent);
1553 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1555 next_ent = get_entity_link(ent);
1557 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1558 set_entity_link(ent, new_list);
1563 /* ok, change the graph */
1564 ir_node *start_bl = get_irg_start_block(irg);
1565 ir_node *first_bl = NULL;
1566 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1567 const ir_edge_t *edge;
1568 optimization_state_t state;
1571 foreach_block_succ(start_bl, edge) {
1572 ir_node *succ = get_edge_src_irn(edge);
1573 if (start_bl != succ) {
1579 /* we had already removed critical edges, so the following
1580 assertion should be always true. */
1581 assert(get_Block_n_cfgpreds(first_bl) == 1);
1583 /* now create backing stores */
1584 frame = get_irg_frame(irg);
1585 imem = get_irg_initial_mem(irg);
1587 save_optimization_state(&state);
1589 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1590 restore_optimization_state(&state);
1592 /* reroute all edges to the new memory source */
1593 edges_reroute(imem, nmem, irg);
1597 args = get_irg_args(irg);
1598 args_bl = get_nodes_block(args);
1599 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1600 int i = get_struct_member_index(get_entity_owner(ent), ent);
1601 ir_type *tp = get_entity_type(ent);
1602 ir_mode *mode = get_type_mode(tp);
1605 /* address for the backing store */
1606 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1609 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1611 /* the backing store itself */
1612 store = new_r_Store(irg, first_bl, mem, addr,
1613 new_r_Proj(irg, args_bl, args, mode, i));
1615 /* the new memory Proj gets the last Proj from store */
1616 set_Proj_pred(nmem, store);
1617 set_Proj_proj(nmem, pn_Store_M);
1619 /* move all entities to the frame type */
1620 frame_tp = get_irg_frame_type(irg);
1621 offset = get_type_size_bytes(frame_tp);
1622 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1623 ir_type *tp = get_entity_type(ent);
1624 int align = get_type_alignment_bytes(tp);
1626 offset += align - 1;
1628 set_entity_owner(ent, frame_tp);
1629 add_class_member(frame_tp, ent);
1630 /* must be automatic to set a fixed layout */
1631 set_entity_allocation(ent, allocation_automatic);
1632 set_entity_offset(ent, offset);
1633 offset += get_type_size_bytes(tp);
1635 set_type_size_bytes(frame_tp, offset);
1640 * The start block has no jump, instead it has an initial exec Proj.
1641 * The backend wants to handle all blocks the same way, so we replace
1642 * the out cfg edge with a real jump.
1644 static void fix_start_block(ir_node *block, void *env) {
1647 ir_node *start_block;
1650 /* we processed the start block, return */
1654 irg = get_irn_irg(block);
1655 start_block = get_irg_start_block(irg);
1657 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1658 ir_node *pred = get_Block_cfgpred(block, i);
1659 ir_node *pred_block = get_nodes_block(pred);
1661 /* ok, we are in the block, having start as cfg predecessor */
1662 if (pred_block == start_block) {
1663 ir_node *jump = new_r_Jmp(irg, pred_block);
1664 set_Block_cfgpred(block, i, jump);
1671 * Modify the irg itself and the frame type.
1673 static void modify_irg(be_abi_irg_t *env)
1675 be_abi_call_t *call = env->call;
1676 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1677 const arch_register_t *sp = arch_isa_sp(isa);
1678 ir_graph *irg = env->birg->irg;
1679 ir_node *bl = get_irg_start_block(irg);
1680 ir_node *end = get_irg_end_block(irg);
1681 ir_node *mem = get_irg_initial_mem(irg);
1682 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1683 pset *dont_save = pset_new_ptr(8);
1689 const arch_register_t *fp_reg;
1690 ir_node *frame_pointer;
1692 ir_node *reg_params_bl;
1695 ir_node *value_param_base;
1696 const ir_edge_t *edge;
1697 ir_type *arg_type, *bet_type;
1698 lower_frame_sels_env_t ctx;
1699 ir_entity **param_map;
1701 bitset_t *used_proj_nr;
1702 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1704 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1706 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1708 ctx.value_param_list = NULL;
1709 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1711 /* value_param_base anchor is not needed anymore now */
1712 value_param_base = get_irg_value_param_base(irg);
1713 be_kill_node(value_param_base);
1714 set_irg_value_param_base(irg, new_r_Bad(irg));
1716 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1717 env->regs = pmap_create();
1719 used_proj_nr = bitset_alloca(1024);
1720 n_params = get_method_n_params(method_type);
1721 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1722 memset(args, 0, n_params * sizeof(args[0]));
1724 /* Check if a value parameter is transmitted as a register.
1725 * This might happen if the address of an parameter is taken which is
1726 * transmitted in registers.
1728 * Note that on some architectures this case must be handled specially
1729 * because the place of the backing store is determined by their ABI.
1731 * In the default case we move the entity to the frame type and create
1732 * a backing store into the first block.
1734 fix_address_of_parameter_access(env, ctx.value_param_list);
1736 /* Fill the argument vector */
1737 arg_tuple = get_irg_args(irg);
1738 foreach_out_edge(arg_tuple, edge) {
1739 ir_node *irn = get_edge_src_irn(edge);
1740 int nr = get_Proj_proj(irn);
1742 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1745 arg_type = compute_arg_type(env, call, method_type, ¶m_map);
1746 bet_type = call->cb->get_between_type(env->cb);
1747 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map);
1749 /* Count the register params and add them to the number of Projs for the RegParams node */
1750 for(i = 0; i < n_params; ++i) {
1751 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1752 if(arg->in_reg && args[i]) {
1753 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1754 assert(i == get_Proj_proj(args[i]));
1756 /* For now, associate the register with the old Proj from Start representing that argument. */
1757 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1758 bitset_set(used_proj_nr, i);
1759 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1763 /* Collect all callee-save registers */
1764 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1765 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1766 for(j = 0; j < cls->n_regs; ++j) {
1767 const arch_register_t *reg = &cls->regs[j];
1768 if(arch_register_type_is(reg, callee_save) ||
1769 arch_register_type_is(reg, state)) {
1770 pmap_insert(env->regs, (void *) reg, NULL);
1775 pmap_insert(env->regs, (void *) sp, NULL);
1776 pmap_insert(env->regs, (void *) isa->bp, NULL);
1777 reg_params_bl = get_irg_start_block(irg);
1778 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1779 add_irn_dep(env->reg_params, get_irg_start(irg));
1782 * make proj nodes for the callee save registers.
1783 * memorize them, since Return nodes get those as inputs.
1785 * Note, that if a register corresponds to an argument, the regs map contains
1786 * the old Proj from start for that argument.
1789 rm = reg_map_to_arr(&env->obst, env->regs);
1790 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1791 arch_register_t *reg = (void *) rm[i].reg;
1792 ir_mode *mode = reg->reg_class->mode;
1794 int pos = BE_OUT_POS((int) nr);
1800 bitset_set(used_proj_nr, nr);
1801 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1802 pmap_insert(env->regs, (void *) reg, proj);
1803 be_set_constr_single_reg(env->reg_params, pos, reg);
1804 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1807 * If the register is an ignore register,
1808 * The Proj for that register shall also be ignored during register allocation.
1810 if(arch_register_type_is(reg, ignore))
1811 flags |= arch_irn_flags_ignore;
1814 flags |= arch_irn_flags_modify_sp;
1816 be_node_set_flags(env->reg_params, pos, flags);
1818 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1820 obstack_free(&env->obst, rm);
1822 /* Generate the Prologue */
1823 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1825 /* do the stack allocation BEFORE the barrier, or spill code
1826 might be added before it */
1827 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1828 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
1829 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1831 env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0);
1833 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1834 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1836 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1837 set_irg_frame(irg, frame_pointer);
1838 pset_insert_ptr(env->ignore_regs, fp_reg);
1840 set_irg_initial_mem(irg, mem);
1842 /* Now, introduce stack param nodes for all parameters passed on the stack */
1843 for(i = 0; i < n_params; ++i) {
1844 ir_node *arg_proj = args[i];
1845 ir_node *repl = NULL;
1847 if(arg_proj != NULL) {
1848 be_abi_call_arg_t *arg;
1849 ir_type *param_type;
1850 int nr = get_Proj_proj(arg_proj);
1852 nr = MIN(nr, n_params);
1853 arg = get_call_arg(call, 0, nr);
1854 param_type = get_method_param_type(method_type, nr);
1857 repl = pmap_get(env->regs, (void *) arg->reg);
1860 else if(arg->on_stack) {
1861 /* For atomic parameters which are actually used, we create a StackParam node. */
1862 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1863 ir_mode *mode = get_type_mode(param_type);
1864 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1865 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1868 /* The stack parameter is not primitive (it is a struct or array),
1869 we thus will create a node representing the parameter's address
1872 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1876 assert(repl != NULL);
1877 exchange(args[i], repl);
1881 /* the arg proj is not needed anymore now */
1882 assert(get_irn_n_edges(arg_tuple) == 0);
1883 be_kill_node(arg_tuple);
1884 set_irg_args(irg, new_rd_Bad(irg));
1886 /* All Return nodes hang on the End node, so look for them there. */
1887 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1888 ir_node *irn = get_Block_cfgpred(end, i);
1890 if (is_Return(irn)) {
1891 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1895 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
1896 the code is dead and will never be executed. */
1898 del_pset(dont_save);
1899 obstack_free(&env->obst, args);
1901 /* handle start block here (place a jump in the block) */
1903 irg_block_walk_graph(irg, fix_start_block, NULL, &temp);
1906 /** Fix the state inputs of calls that still hang on unknowns */
1908 void fix_call_state_inputs(be_abi_irg_t *env)
1910 const arch_isa_t *isa = env->isa;
1912 const arch_register_t **stateregs = NEW_ARR_F(const arch_register_t*, 0);
1914 /* Collect caller save registers */
1915 n = arch_isa_get_n_reg_class(isa);
1916 for(i = 0; i < n; ++i) {
1918 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1919 for(j = 0; j < cls->n_regs; ++j) {
1920 const arch_register_t *reg = arch_register_for_index(cls, j);
1921 if(arch_register_type_is(reg, state)) {
1922 ARR_APP1(arch_register_t*, stateregs, reg);
1927 n = ARR_LEN(env->calls);
1928 n_states = ARR_LEN(stateregs);
1929 for(i = 0; i < n; ++i) {
1931 ir_node *call = env->calls[i];
1933 arity = get_irn_arity(call);
1935 /* the statereg inputs are the last n inputs of the calls */
1936 for(s = 0; s < n_states; ++s) {
1937 int inp = arity - n_states + s;
1938 const arch_register_t *reg = stateregs[s];
1939 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
1941 set_irn_n(call, inp, regnode);
1946 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1948 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1949 ir_node *old_frame = get_irg_frame(birg->irg);
1950 ir_graph *irg = birg->irg;
1954 optimization_state_t state;
1955 unsigned *limited_bitset;
1957 be_omit_fp = birg->main_env->options->omit_fp;
1959 obstack_init(&env->obst);
1961 env->isa = birg->main_env->arch_env->isa;
1962 env->method_type = get_entity_type(get_irg_entity(irg));
1963 env->call = be_abi_call_new(env->isa->sp->reg_class);
1964 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1966 env->ignore_regs = pset_new_ptr_default();
1967 env->keep_map = pmap_create();
1968 env->dce_survivor = new_survive_dce();
1971 env->sp_req.type = arch_register_req_type_limited;
1972 env->sp_req.cls = arch_register_get_class(env->isa->sp);
1973 limited_bitset = rbitset_obstack_alloc(&env->obst, env->sp_req.cls->n_regs);
1974 rbitset_set(limited_bitset, arch_register_get_index(env->isa->sp));
1975 env->sp_req.limited = limited_bitset;
1977 env->sp_cls_req.type = arch_register_req_type_normal;
1978 env->sp_cls_req.cls = arch_register_get_class(env->isa->sp);
1980 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1981 to another Unknown or the stack pointer gets used */
1982 save_optimization_state(&state);
1984 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1985 restore_optimization_state(&state);
1986 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1989 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1990 env->irn_ops.impl = &abi_irn_ops;
1993 env->calls = NEW_ARR_F(ir_node*, 0);
1995 /* Lower all call nodes in the IRG. */
1999 Beware: init backend abi call object after processing calls,
2000 otherwise some information might be not yet available.
2002 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2004 /* Process the IRG */
2007 /* fix call inputs for state registers */
2008 fix_call_state_inputs(env);
2010 /* We don't need the keep map anymore. */
2011 pmap_destroy(env->keep_map);
2013 /* calls array is not needed anymore */
2014 DEL_ARR_F(env->calls);
2016 /* reroute the stack origin of the calls to the true stack origin. */
2017 exchange(dummy, env->init_sp);
2018 exchange(old_frame, get_irg_frame(irg));
2020 /* Make some important node pointers survive the dead node elimination. */
2021 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2022 pmap_foreach(env->regs, ent) {
2023 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2027 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
2030 env->call->cb->done(env->cb);
2035 void be_abi_free(be_abi_irg_t *env)
2037 free_survive_dce(env->dce_survivor);
2038 del_pset(env->ignore_regs);
2039 pmap_destroy(env->regs);
2040 obstack_free(&env->obst, NULL);
2042 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
2047 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2049 arch_register_t *reg;
2051 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2052 if(reg->reg_class == cls)
2053 bitset_set(bs, reg->index);
2056 /* Returns the stack layout from a abi environment. */
2057 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
2064 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2065 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2066 | _| | |> < ___) | || (_| | (__| <
2067 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2071 typedef struct fix_stack_walker_env_t {
2073 const arch_env_t *arch_env;
2074 } fix_stack_walker_env_t;
2077 * Walker. Collect all stack modifying nodes.
2079 static void collect_stack_nodes_walker(ir_node *node, void *data)
2081 fix_stack_walker_env_t *env = data;
2086 if (arch_irn_is(env->arch_env, node, modify_sp)) {
2087 assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
2088 ARR_APP1(ir_node*, env->nodes, node);
2092 void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
2096 be_irg_t *birg = env->birg;
2097 fix_stack_walker_env_t walker_env;
2099 walker_env.nodes = NEW_ARR_F(ir_node*, 0);
2100 walker_env.arch_env = birg->main_env->arch_env;
2102 be_assure_dom_front(birg);
2104 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2106 phis = be_ssa_construction(
2107 be_get_birg_dom_front(birg),
2108 be_get_birg_liveness(birg),
2110 ARR_LEN(walker_env.nodes), walker_env.nodes,
2113 /* set register requirements for stack phis */
2114 for(i = 0; i < ARR_LEN(phis); ++i) {
2115 ir_node *phi = phis[i];
2116 be_set_phi_reg_req(walker_env.arch_env, phi, &env->sp_req);
2117 be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
2118 arch_set_irn_register(walker_env.arch_env, phi, env->isa->sp);
2122 DEL_ARR_F(walker_env.nodes);
2125 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
2127 const arch_env_t *arch_env = env->birg->main_env->arch_env;
2128 int omit_fp = env->call->flags.bits.try_omit_fp;
2131 sched_foreach(bl, irn) {
2134 Check, if the node relates to an entity on the stack frame.
2135 If so, set the true offset (including the bias) for that
2138 ir_entity *ent = arch_get_frame_entity(arch_env, irn);
2140 int offset = get_stack_entity_offset(env->frame, ent, bias);
2141 arch_set_frame_offset(arch_env, irn, offset);
2142 DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
2146 If the node modifies the stack pointer by a constant offset,
2147 record that in the bias.
2149 if(arch_irn_is(arch_env, irn, modify_sp)) {
2150 int ofs = arch_get_sp_bias(arch_env, irn);
2152 if(be_is_IncSP(irn)) {
2153 if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2154 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
2155 be_set_IncSP_offset(irn, ofs);
2156 } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2157 ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
2158 be_set_IncSP_offset(irn, ofs);
2171 * A helper struct for the bias walker.
2174 be_abi_irg_t *env; /**< The ABI irg environment. */
2175 int start_block_bias; /**< The bias at the end of the start block. */
2176 ir_node *start_block; /**< The start block of the current graph. */
2180 * Block-Walker: fix all stack offsets
2182 static void stack_bias_walker(ir_node *bl, void *data)
2184 struct bias_walk *bw = data;
2185 if (bl != bw->start_block) {
2186 process_stack_bias(bw->env, bl, bw->start_block_bias);
2190 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2192 ir_graph *irg = env->birg->irg;
2193 struct bias_walk bw;
2195 stack_frame_compute_initial_offset(env->frame);
2196 // stack_layout_dump(stdout, env->frame);
2198 /* Determine the stack bias at the end of the start block. */
2199 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
2201 /* fix the bias is all other blocks */
2203 bw.start_block = get_irg_start_block(irg);
2204 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2207 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2209 assert(arch_register_type_is(reg, callee_save));
2210 assert(pmap_contains(abi->regs, (void *) reg));
2211 return pmap_get(abi->regs, (void *) reg);
2214 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2216 assert(arch_register_type_is(reg, ignore));
2217 assert(pmap_contains(abi->regs, (void *) reg));
2218 return pmap_get(abi->regs, (void *) reg);
2221 ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi)
2223 return abi->start_barrier;
2228 _____ _____ _ _ _ _ _ _
2229 |_ _| __ \| \ | | | | | | | | |
2230 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
2231 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
2232 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
2233 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
2235 for Phi nodes which are created due to stack modifying nodes
2236 such as IncSP, AddSP and SetSP.
2238 These Phis are always to be ignored by the reg alloc and are
2239 fixed on the SP register of the ISA.
2242 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
2244 const be_abi_irg_t *abi = get_abi_from_handler(handler);
2245 const void *res = NULL;
2247 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
2248 res = &abi->irn_ops;
2254 const arch_register_req_t *abi_get_irn_reg_req(const void *self,
2255 const ir_node *irn, int pos)
2257 be_abi_irg_t *abi = get_abi_from_ops(self);
2259 if(pos == BE_OUT_POS(0)) {
2260 return &abi->sp_req;
2261 } else if(pos >= 0 && pos < get_irn_arity(irn)) {
2262 return &abi->sp_cls_req;
2265 return arch_no_register_req;
2268 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
2272 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
2274 const be_abi_irg_t *abi = get_abi_from_ops(self);
2275 return abi->isa->sp;
2278 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
2280 return arch_irn_class_normal;
2283 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
2285 return arch_irn_flags_ignore | arch_irn_flags_modify_sp;
2288 static ir_entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
2293 static void abi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
2297 static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias)
2301 static int abi_get_sp_bias(const void *self, const ir_node *irn)
2306 static const arch_irn_ops_if_t abi_irn_ops = {
2307 abi_get_irn_reg_req,
2312 abi_get_frame_entity,
2313 abi_set_frame_entity,
2314 abi_set_frame_offset,
2316 NULL, /* get_inverse */
2317 NULL, /* get_op_estimated_cost */
2318 NULL, /* possible_memory_operand */
2319 NULL, /* perform_memory_operand */
2322 static const arch_irn_handler_t abi_irn_handler = {
2328 * Returns non-zero if the ABI has omitted the frame pointer in
2329 * the current graph.
2331 int be_abi_omit_fp(const be_abi_irg_t *abi) {
2332 return abi->call->flags.bits.try_omit_fp;