4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
30 #include "raw_bitset.h"
37 #include "besched_t.h"
41 typedef struct _be_abi_call_arg_t {
42 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
43 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
44 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
47 const arch_register_t *reg;
50 unsigned space_before;
54 struct _be_abi_call_t {
55 be_abi_call_flags_t flags;
56 const be_abi_callbacks_t *cb;
57 ir_type *between_type;
59 const arch_register_class_t *cls_addr;
62 struct _be_abi_irg_t {
64 be_stack_layout_t *frame; /**< The stack frame model. */
65 be_irg_t *birg; /**< The back end IRG. */
66 const arch_isa_t *isa; /**< The isa. */
67 survive_dce_t *dce_survivor;
69 be_abi_call_t *call; /**< The ABI call information. */
70 ir_type *method_type; /**< The type of the method of the IRG. */
72 ir_node *init_sp; /**< The node representing the stack pointer
73 at the start of the function. */
75 ir_node *start_barrier; /**< The barrier of the start block */
77 ir_node *reg_params; /**< The reg params node. */
78 pmap *regs; /**< A map of all callee-save and ignore regs to
79 their Projs to the RegParams node. */
81 int start_block_bias; /**< The stack bias at the end of the start block. */
83 void *cb; /**< ABI Callback self pointer. */
85 pmap *keep_map; /**< mapping blocks to keep nodes. */
86 pset *ignore_regs; /**< Additional registers which shall be ignored. */
88 ir_node **calls; /**< flexible array containing all be_Call nodes */
90 arch_register_req_t sp_req;
91 arch_register_req_t sp_cls_req;
93 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
96 static heights_t *ir_heights;
98 /* Flag: if set, try to omit the frame pointer if called by the backend */
99 static int be_omit_fp = 1;
102 _ ____ ___ ____ _ _ _ _
103 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
104 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
105 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
106 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
108 These callbacks are used by the backend to set the parameters
109 for a specific call type.
113 * Set compare function: compares two ABI call object arguments.
115 static int cmp_call_arg(const void *a, const void *b, size_t n)
117 const be_abi_call_arg_t *p = a, *q = b;
118 return !(p->is_res == q->is_res && p->pos == q->pos);
122 * Get or set an ABI call object argument.
124 * @param call the abi call
125 * @param is_res true for call results, false for call arguments
126 * @param pos position of the argument
127 * @param do_insert true if the argument is set, false if it's retrieved
129 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
131 be_abi_call_arg_t arg;
134 memset(&arg, 0, sizeof(arg));
138 hash = is_res * 128 + pos;
141 ? set_insert(call->params, &arg, sizeof(arg), hash)
142 : set_find(call->params, &arg, sizeof(arg), hash);
146 * Retrieve an ABI call object argument.
148 * @param call the ABI call object
149 * @param is_res true for call results, false for call arguments
150 * @param pos position of the argument
152 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
154 return get_or_set_call_arg(call, is_res, pos, 0);
157 /* Set the flags for a call. */
158 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
165 /* Set register class for call address */
166 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
168 call->cls_addr = cls;
172 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
174 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
176 arg->alignment = alignment;
177 arg->space_before = space_before;
178 arg->space_after = space_after;
179 assert(alignment > 0 && "Alignment must be greater than 0");
182 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
184 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
189 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
191 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
196 /* Get the flags of a ABI call object. */
197 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
203 * Constructor for a new ABI call object.
205 * @return the new ABI call object
207 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
209 be_abi_call_t *call = xmalloc(sizeof(call[0]));
212 call->params = new_set(cmp_call_arg, 16);
214 call->cls_addr = cls_addr;
216 call->flags.bits.try_omit_fp = be_omit_fp;
222 * Destructor for an ABI call object.
224 static void be_abi_call_free(be_abi_call_t *call)
226 del_set(call->params);
232 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
233 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
234 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
235 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
238 Handling of the stack frame. It is composed of three types:
239 1) The type of the arguments which are pushed on the stack.
240 2) The "between type" which consists of stuff the call of the
241 function pushes on the stack (like the return address and
242 the old base pointer for ia32).
243 3) The Firm frame type which consists of all local variables
247 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent, int bias)
249 ir_type *t = get_entity_owner(ent);
250 int ofs = get_entity_offset(ent);
254 /* Find the type the entity is contained in. */
255 for(index = 0; index < N_FRAME_TYPES; ++index) {
256 if(frame->order[index] == t)
260 /* Add the size of all the types below the one of the entity to the entity's offset */
261 for(i = 0; i < index; ++i)
262 ofs += get_type_size_bytes(frame->order[i]);
264 /* correct the offset by the initial position of the frame pointer */
265 ofs -= frame->initial_offset;
267 /* correct the offset with the current bias. */
274 * Retrieve the entity with given offset from a frame type.
276 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
280 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
281 ir_entity *ent = get_compound_member(t, i);
282 if(get_entity_offset(ent) == offset)
289 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
291 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
292 ir_entity *ent = search_ent_with_offset(base, 0);
294 frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
296 return frame->initial_offset;
300 * Initializes the frame layout from parts
302 * @param frame the stack layout that will be initialized
303 * @param args the stack argument layout type
304 * @param between the between layout type
305 * @param locals the method frame type
306 * @param stack_dir the stack direction
307 * @param param_map an array mapping method argument positions to the stack argument type
309 * @return the initialized stack layout
311 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
312 ir_type *between, ir_type *locals, int stack_dir,
313 ir_entity *param_map[])
315 frame->arg_type = args;
316 frame->between_type = between;
317 frame->frame_type = locals;
318 frame->initial_offset = 0;
319 frame->stack_dir = stack_dir;
320 frame->order[1] = between;
321 frame->param_map = param_map;
324 frame->order[0] = args;
325 frame->order[2] = locals;
328 frame->order[0] = locals;
329 frame->order[2] = args;
335 /** Dumps the stack layout to file. */
336 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
340 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
341 for (j = 0; j < N_FRAME_TYPES; ++j) {
342 ir_type *t = frame->order[j];
344 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
345 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
346 ir_entity *ent = get_compound_member(t, i);
347 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
354 * Returns non-zero if the call argument at given position
355 * is transfered on the stack.
357 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
359 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
360 return arg && !arg->in_reg;
370 Adjustment of the calls inside a graph.
375 * Transform a call node.
376 * @param env The ABI environment for the current irg.
377 * @param irn The call node.
378 * @param curr_sp The stack pointer node to use.
379 * @return The stack pointer after the call.
381 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
383 ir_graph *irg = env->birg->irg;
384 const arch_env_t *arch_env = env->birg->main_env->arch_env;
385 const arch_isa_t *isa = arch_env->isa;
386 ir_type *mt = get_Call_type(irn);
387 ir_node *call_ptr = get_Call_ptr(irn);
388 int n_params = get_method_n_params(mt);
389 ir_node *curr_mem = get_Call_mem(irn);
390 ir_node *bl = get_nodes_block(irn);
391 pset *results = pset_new_ptr(8);
392 pset *caller_save = pset_new_ptr(8);
393 pset *states = pset_new_ptr(2);
395 int stack_dir = arch_isa_stack_dir(isa);
396 const arch_register_t *sp = arch_isa_sp(isa);
397 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
398 ir_mode *mach_mode = sp->reg_class->mode;
399 struct obstack *obst = &env->obst;
400 int no_alloc = call->flags.bits.frame_is_setup_on_call;
402 ir_node *res_proj = NULL;
403 int curr_res_proj = pn_Call_max;
411 const arch_register_t *reg;
412 const ir_edge_t *edge;
417 /* Let the isa fill out the abi description for that call node. */
418 arch_isa_get_call_abi(isa, mt, call);
420 /* Insert code to put the stack arguments on the stack. */
421 assert(get_Call_n_params(irn) == n_params);
422 for(i = 0; i < n_params; ++i) {
423 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
426 int arg_size = get_type_size_bytes(get_method_param_type(mt, i));
428 stack_size += round_up2(arg->space_before, arg->alignment);
429 stack_size += round_up2(arg_size, arg->alignment);
430 stack_size += round_up2(arg->space_after, arg->alignment);
431 obstack_int_grow(obst, i);
435 pos = obstack_finish(obst);
437 /* Collect all arguments which are passed in registers. */
438 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
439 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
440 if(arg && arg->in_reg) {
441 obstack_int_grow(obst, i);
445 low_args = obstack_finish(obst);
447 /* If there are some parameters which shall be passed on the stack. */
450 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
453 * Reverse list of stack parameters if call arguments are from left to right.
454 * We must them reverse again if they are pushed (not stored) and the stack
455 * direction is downwards.
457 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
458 for (i = 0; i < n_pos >> 1; ++i) {
459 int other = n_pos - i - 1;
467 * If the stack is decreasing and we do not want to store sequentially,
468 * or someone else allocated the call frame
469 * we allocate as much space on the stack all parameters need, by
470 * moving the stack pointer along the stack's direction.
472 if(stack_dir < 0 && !do_seq && !no_alloc) {
473 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
475 add_irn_dep(curr_sp, alloca_copy);
481 obstack_ptr_grow(obst, get_Call_mem(irn));
482 curr_mem = new_NoMem();
484 curr_mem = get_Call_mem(irn);
487 for(i = 0; i < n_pos; ++i) {
489 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
490 ir_node *param = get_Call_param(irn, p);
491 ir_node *addr = curr_sp;
493 ir_type *param_type = get_method_param_type(mt, p);
494 int param_size = get_type_size_bytes(param_type) + arg->space_after;
497 * If we wanted to build the arguments sequentially,
498 * the stack pointer for the next must be incremented,
499 * and the memory value propagated.
503 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
505 add_irn_dep(curr_sp, alloca_copy);
508 add_irn_dep(curr_sp, curr_mem);
511 curr_ofs += arg->space_before;
512 curr_ofs = round_up2(curr_ofs, arg->alignment);
514 /* Make the expression to compute the argument's offset. */
516 ir_mode *constmode = mach_mode;
517 if(mode_is_reference(mach_mode)) {
520 addr = new_r_Const_long(irg, bl, constmode, curr_ofs);
521 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
525 /* Insert a store for primitive arguments. */
526 if (is_atomic_type(param_type)) {
528 store = new_r_Store(irg, bl, curr_mem, addr, param);
529 mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
532 /* Make a mem copy for compound arguments. */
536 assert(mode_is_reference(get_irn_mode(param)));
537 copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
538 mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
541 curr_ofs += param_size;
546 obstack_ptr_grow(obst, mem);
549 in = (ir_node **) obstack_finish(obst);
551 /* We need the sync only, if we didn't build the stores sequentially. */
554 curr_mem = new_r_Sync(irg, bl, n_pos + 1, in);
556 curr_mem = get_Call_mem(irn);
559 obstack_free(obst, in);
562 /* Collect caller save registers */
563 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
565 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
566 for(j = 0; j < cls->n_regs; ++j) {
567 const arch_register_t *reg = arch_register_for_index(cls, j);
568 if(arch_register_type_is(reg, caller_save)) {
569 pset_insert_ptr(caller_save, (void *) reg);
571 if(arch_register_type_is(reg, state)) {
572 pset_insert_ptr(caller_save, (void*) reg);
573 pset_insert_ptr(states, (void*) reg);
578 /* search the greatest result proj number */
580 /* TODO: what if the result is NOT used? Currently there is
581 * no way to detect this later, especially there is no way to
582 * see this in the proj numbers.
583 * While this is ok for the register allocator, it is bad for
584 * backends which need to change the be_Call further (x87 simulator
585 * for instance. However for this particular case the call_type is
588 foreach_out_edge(irn, edge) {
589 const ir_edge_t *res_edge;
590 ir_node *irn = get_edge_src_irn(edge);
592 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
594 foreach_out_edge(irn, res_edge) {
596 be_abi_call_arg_t *arg;
597 ir_node *res = get_edge_src_irn(res_edge);
599 assert(is_Proj(res));
601 proj = get_Proj_proj(res);
602 arg = get_call_arg(call, 1, proj);
605 shift the proj number to the right, since we will drop the
606 unspeakable Proj_T from the Call. Therefore, all real argument
607 Proj numbers must be increased by pn_be_Call_first_res
609 proj += pn_be_Call_first_res;
610 set_Proj_proj(res, proj);
611 obstack_ptr_grow(obst, res);
613 if(proj > curr_res_proj)
614 curr_res_proj = proj;
616 pset_remove_ptr(caller_save, arg->reg);
617 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
624 obstack_ptr_grow(obst, NULL);
625 res_projs = obstack_finish(obst);
627 /* make the back end call node and set its register requirements. */
628 for(i = 0; i < n_low_args; ++i) {
629 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
631 foreach_pset(states, reg) {
632 const arch_register_class_t *cls = arch_register_get_class(reg);
634 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
635 ir_fprintf(stderr, "Adding %+F\n", regnode);
637 ir_node *regnode = new_rd_Unknown(irg, arch_register_class_mode(cls));
638 obstack_ptr_grow(obst, regnode);
640 count = n_low_args + pset_count(states);
642 in = obstack_finish(obst);
644 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
645 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
647 curr_res_proj + pset_count(caller_save), count,
648 in, get_Call_type(irn));
649 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
651 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
653 curr_res_proj + pset_count(caller_save),
654 count, in, get_Call_type(irn));
656 ARR_APP1(ir_node*, env->calls, low_call);
659 Set the register class of the call address to
660 the backend provided class (default: stack pointer class)
662 be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr);
664 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
666 /* Set the register classes and constraints of the Call parameters. */
667 for(i = 0; i < n_low_args; ++i) {
668 int index = low_args[i];
669 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
670 assert(arg->reg != NULL);
672 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
675 /* Set the register constraints of the results. */
676 for (i = 0; res_projs[i]; ++i) {
677 int pn = get_Proj_proj(res_projs[i]);
679 /* Correct Proj number since it has been adjusted! (see above) */
680 const be_abi_call_arg_t *arg = get_call_arg(call, 1, pn - pn_Call_max);
682 /* Matze: we need the information about the real mode for later
683 * transforms (signed/unsigend compares, stores...), so leave the fixup
684 * for the backend transform phase... */
687 const arch_register_class_t *cls = arch_register_get_class(arg->reg);
688 ir_mode *mode = arch_register_class_mode(cls);
689 set_irn_mode(irn, mode);
693 be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
694 arch_set_irn_register(arch_env, res_projs[i], arg->reg);
696 obstack_free(obst, in);
697 exchange(irn, low_call);
699 /* redirect the result projs to the lowered call instead of the Proj_T */
700 for (i = 0; res_projs[i]; ++i)
701 set_Proj_pred(res_projs[i], low_call);
703 /* set the now unnecessary projT to bad */
704 if(res_proj != NULL) {
705 be_kill_node(res_proj);
708 /* Make additional projs for the caller save registers
709 and the Keep node which keeps them alive. */
710 if (pset_count(caller_save) > 0) {
711 const arch_register_t *reg;
715 for (reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
716 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
718 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
719 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
721 /* a call can produce ignore registers, in this case set the flag and register for the Proj */
722 if (arch_register_type_is(reg, ignore)) {
723 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
724 be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj), arch_irn_flags_ignore);
727 set_irn_link(proj, (void *) reg);
728 obstack_ptr_grow(obst, proj);
732 /* create the Keep for the caller save registers */
733 in = (ir_node **) obstack_finish(obst);
734 keep = be_new_Keep(NULL, irg, bl, n, in);
735 for (i = 0; i < n; ++i) {
736 const arch_register_t *reg = get_irn_link(in[i]);
737 be_node_set_reg_class(keep, i, reg->reg_class);
739 obstack_free(obst, in);
742 /* Clean up the stack. */
744 ir_node *mem_proj = NULL;
746 foreach_out_edge(low_call, edge) {
747 ir_node *irn = get_edge_src_irn(edge);
748 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
755 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
756 keep_alive(mem_proj);
759 /* Clean up the stack frame if we allocated it */
761 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
762 add_irn_dep(curr_sp, mem_proj);
764 add_irn_dep(curr_sp, alloca_copy);
770 be_abi_call_free(call);
771 obstack_free(obst, pos);
774 del_pset(caller_save);
781 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
783 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
791 const ir_edge_t *edge;
798 if (get_Alloc_where(alloc) != stack_alloc) {
803 block = get_nodes_block(alloc);
804 irg = get_irn_irg(block);
807 type = get_Alloc_type(alloc);
809 foreach_out_edge(alloc, edge) {
810 ir_node *irn = get_edge_src_irn(edge);
812 assert(is_Proj(irn));
813 switch(get_Proj_proj(irn)) {
825 /* Beware: currently Alloc nodes without a result might happen,
826 only escape analysis kills them and this phase runs only for object
827 oriented source. We kill the Alloc here. */
828 if (alloc_res == NULL && alloc_mem) {
829 exchange(alloc_mem, get_Alloc_mem(alloc));
833 /* we might need to multiply the size with the element size */
834 if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
835 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
836 ir_node *cnst = new_rd_Const(NULL, irg, block, mode_Iu, tv);
837 ir_node *mul = new_rd_Mul(NULL, irg, block, get_Alloc_size(alloc),
841 size = get_Alloc_size(alloc);
844 /* The stack pointer will be modified in an unknown manner.
845 We cannot omit it. */
846 env->call->flags.bits.try_omit_fp = 0;
847 new_alloc = be_new_AddSP(env->isa->sp, irg, block, curr_sp, size);
849 if(alloc_mem != NULL) {
853 addsp_mem = new_r_Proj(irg, block, new_alloc, mode_M, pn_be_AddSP_M);
855 // We need to sync the output mem of the AddSP with the input mem
856 // edge into the alloc node
857 ins[0] = get_Alloc_mem(alloc);
859 sync = new_r_Sync(irg, block, 2, ins);
861 exchange(alloc_mem, sync);
864 exchange(alloc, new_alloc);
866 /* fix projnum of alloca res */
867 set_Proj_proj(alloc_res, pn_be_AddSP_res);
869 addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
871 /* copy the address away, since it could be used after further stack pointer modifications. */
872 /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
873 *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, block, curr_sp);
875 /* Let all users of the Alloc() result now point to the copy. */
876 edges_reroute(alloc_res, copy, irg);
878 /* Rewire the copy appropriately. */
879 set_irn_n(copy, be_pos_Copy_op, addr);
888 * The Free is transformed into a back end free node and connected to the stack nodes.
890 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
894 ir_node *subsp, *mem, *res, *size, *sync;
898 if (get_Free_where(free) != stack_alloc) {
903 block = get_nodes_block(free);
904 irg = get_irn_irg(block);
905 type = get_Free_type(free);
907 /* we might need to multiply the size with the element size */
908 if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
909 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
910 ir_node *cnst = new_rd_Const(NULL, irg, block, mode_Iu, tv);
911 ir_node *mul = new_rd_Mul(NULL, irg, block, get_Free_size(free),
915 size = get_Free_size(free);
918 /* The stack pointer will be modified in an unknown manner.
919 We cannot omit it. */
920 env->call->flags.bits.try_omit_fp = 0;
921 subsp = be_new_SubSP(env->isa->sp, irg, block, curr_sp, size);
923 mem = new_r_Proj(irg, block, subsp, mode_M, pn_be_SubSP_M);
924 res = new_r_Proj(irg, block, subsp, mode_P_data, pn_be_SubSP_res);
926 /* we need to sync the memory */
927 in[0] = get_Free_mem(free);
929 sync = new_r_Sync(irg, block, 2, in);
931 /* and make the AddSP dependent on the former memory */
932 add_irn_dep(subsp, get_Free_mem(free));
935 exchange(free, sync);
941 /* the following function is replaced by the usage of the heights module */
944 * Walker for dependent_on().
945 * This function searches a node tgt recursively from a given node
946 * but is restricted to the given block.
947 * @return 1 if tgt was reachable from curr, 0 if not.
949 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
953 if (get_nodes_block(curr) != bl)
959 /* Phi functions stop the recursion inside a basic block */
960 if (! is_Phi(curr)) {
961 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
962 if (check_dependence(get_irn_n(curr, i), tgt, bl))
972 * Check if a node is somehow data dependent on another one.
973 * both nodes must be in the same basic block.
974 * @param n1 The first node.
975 * @param n2 The second node.
976 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
978 static int dependent_on(ir_node *n1, ir_node *n2)
980 assert(get_nodes_block(n1) == get_nodes_block(n2));
982 return heights_reachable_in_block(ir_heights, n1, n2);
985 static int cmp_call_dependecy(const void *c1, const void *c2)
987 ir_node *n1 = *(ir_node **) c1;
988 ir_node *n2 = *(ir_node **) c2;
991 Classical qsort() comparison function behavior:
992 0 if both elements are equal
993 1 if second is "smaller" that first
994 -1 if first is "smaller" that second
996 if (dependent_on(n1, n2))
999 if (dependent_on(n2, n1))
1006 * Walker: links all Call/alloc/Free nodes to the Block they are contained.
1008 static void link_calls_in_block_walker(ir_node *irn, void *data)
1010 ir_opcode code = get_irn_opcode(irn);
1012 if (code == iro_Call ||
1013 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1014 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1015 be_abi_irg_t *env = data;
1016 ir_node *bl = get_nodes_block(irn);
1017 void *save = get_irn_link(bl);
1019 if (code == iro_Call)
1020 env->call->flags.bits.irg_is_leaf = 0;
1022 set_irn_link(irn, save);
1023 set_irn_link(bl, irn);
1029 * Process all Call nodes inside a basic block.
1030 * Note that the link field of the block must contain a linked list of all
1031 * Call nodes inside the Block. We first order this list according to data dependency
1032 * and that connect the calls together.
1034 static void process_calls_in_block(ir_node *bl, void *data)
1036 be_abi_irg_t *env = data;
1037 ir_node *curr_sp = env->init_sp;
1041 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
1042 obstack_ptr_grow(&env->obst, irn);
1044 /* If there were call nodes in the block. */
1048 ir_node *copy = NULL;
1051 nodes = obstack_finish(&env->obst);
1053 /* order the call nodes according to data dependency */
1054 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
1056 for(i = n - 1; i >= 0; --i) {
1057 ir_node *irn = nodes[i];
1059 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1060 switch(get_irn_opcode(irn)) {
1062 curr_sp = adjust_call(env, irn, curr_sp, copy);
1065 curr_sp = adjust_alloc(env, irn, curr_sp, ©);
1068 curr_sp = adjust_free(env, irn, curr_sp);
1075 obstack_free(&env->obst, nodes);
1077 /* Keep the last stack state in the block by tying it to Keep node */
1079 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
1080 pmap_insert(env->keep_map, bl, keep);
1083 set_irn_link(bl, curr_sp);
1084 } /* process_calls_in_block */
1087 * Adjust all call nodes in the graph to the ABI conventions.
1089 static void process_calls(be_abi_irg_t *env)
1091 ir_graph *irg = env->birg->irg;
1093 env->call->flags.bits.irg_is_leaf = 1;
1094 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
1096 ir_heights = heights_new(env->birg->irg);
1097 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
1098 heights_free(ir_heights);
1102 static ir_node *setup_frame(be_abi_irg_t *env)
1104 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1105 const arch_register_t *sp = isa->sp;
1106 const arch_register_t *bp = isa->bp;
1107 be_abi_call_flags_bits_t flags = env->call->flags.bits;
1108 ir_graph *irg = env->birg->irg;
1109 ir_node *bl = get_irg_start_block(irg);
1110 ir_node *no_mem = get_irg_no_mem(irg);
1111 ir_node *old_frame = get_irg_frame(irg);
1112 ir_node *stack = pmap_get(env->regs, (void *) sp);
1113 ir_node *frame = pmap_get(env->regs, (void *) bp);
1115 int stack_nr = get_Proj_proj(stack);
1117 if(flags.try_omit_fp) {
1118 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
1123 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
1125 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
1126 if(!flags.fp_free) {
1127 be_set_constr_single_reg(frame, -1, bp);
1128 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
1129 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
1132 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
1135 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
1136 env->init_sp = stack;
1137 set_irg_frame(irg, frame);
1138 edges_reroute(old_frame, frame, irg);
1143 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
1145 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1146 const arch_register_t *sp = isa->sp;
1147 const arch_register_t *bp = isa->bp;
1148 ir_graph *irg = env->birg->irg;
1149 ir_node *ret_mem = get_Return_mem(ret);
1150 ir_node *frame = get_irg_frame(irg);
1151 ir_node *bl = get_nodes_block(ret);
1152 ir_node *stack = get_irn_link(bl);
1156 if(env->call->flags.bits.try_omit_fp) {
1157 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
1161 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
1162 be_set_constr_single_reg(stack, -1, sp);
1163 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
1166 pmap_foreach(env->regs, ent) {
1167 const arch_register_t *reg = ent->key;
1168 ir_node *irn = ent->value;
1171 obstack_ptr_grow(&env->obst, stack);
1173 obstack_ptr_grow(&env->obst, frame);
1174 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1175 obstack_ptr_grow(obst, irn);
1182 * Computes the stack argument layout type.
1183 * Changes a possibly allocated value param type by moving
1184 * entities to the stack layout type.
1186 * @param env the ABI environment
1187 * @param call the current call ABI
1188 * @param method_type the method type
1189 * @param param_map an array mapping method arguments to the stack layout type
1191 * @return the stack argument layout type
1193 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map)
1195 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1196 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1197 int n = get_method_n_params(method_type);
1198 int curr = inc > 0 ? 0 : n - 1;
1204 ir_type *val_param_tp = get_method_value_param_type(method_type);
1205 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1208 *param_map = map = obstack_alloc(&env->obst, n * sizeof(ir_entity *));
1209 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1210 for (i = 0; i < n; ++i, curr += inc) {
1211 ir_type *param_type = get_method_param_type(method_type, curr);
1212 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1215 if (arg->on_stack) {
1217 /* the entity was already created, move it to the param type */
1218 arg->stack_ent = get_method_value_param_ent(method_type, i);
1219 remove_struct_member(val_param_tp, arg->stack_ent);
1220 set_entity_owner(arg->stack_ent, res);
1221 add_struct_member(res, arg->stack_ent);
1222 /* must be automatic to set a fixed layout */
1223 set_entity_allocation(arg->stack_ent, allocation_automatic);
1226 snprintf(buf, sizeof(buf), "param_%d", i);
1227 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1229 ofs += arg->space_before;
1230 ofs = round_up2(ofs, arg->alignment);
1231 set_entity_offset(arg->stack_ent, ofs);
1232 ofs += arg->space_after;
1233 ofs += get_type_size_bytes(param_type);
1234 map[i] = arg->stack_ent;
1237 set_type_size_bytes(res, ofs);
1238 set_type_state(res, layout_fixed);
1243 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1246 struct obstack obst;
1248 obstack_init(&obst);
1250 /* Create a Perm after the RegParams node to delimit it. */
1251 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1252 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1257 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1258 const arch_register_t *reg = &cls->regs[j];
1259 ir_node *irn = pmap_get(regs, (void *) reg);
1261 if(irn && !arch_register_type_is(reg, ignore)) {
1263 obstack_ptr_grow(&obst, irn);
1264 set_irn_link(irn, (void *) reg);
1268 obstack_ptr_grow(&obst, NULL);
1269 in = obstack_finish(&obst);
1271 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1272 for(j = 0; j < n_regs; ++j) {
1273 ir_node *arg = in[j];
1274 arch_register_t *reg = get_irn_link(arg);
1275 pmap_insert(regs, reg, arg);
1276 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1279 obstack_free(&obst, in);
1282 obstack_free(&obst, NULL);
1287 const arch_register_t *reg;
1291 static int cmp_regs(const void *a, const void *b)
1293 const reg_node_map_t *p = a;
1294 const reg_node_map_t *q = b;
1296 if(p->reg->reg_class == q->reg->reg_class)
1297 return p->reg->index - q->reg->index;
1299 return p->reg->reg_class - q->reg->reg_class;
1302 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1305 int n = pmap_count(reg_map);
1307 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1309 pmap_foreach(reg_map, ent) {
1310 res[i].reg = ent->key;
1311 res[i].irn = ent->value;
1315 qsort(res, n, sizeof(res[0]), cmp_regs);
1320 * Creates a barrier.
1322 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1324 ir_graph *irg = env->birg->irg;
1325 int n_regs = pmap_count(regs);
1331 rm = reg_map_to_arr(&env->obst, regs);
1333 for(n = 0; n < n_regs; ++n)
1334 obstack_ptr_grow(&env->obst, rm[n].irn);
1337 obstack_ptr_grow(&env->obst, *mem);
1341 in = (ir_node **) obstack_finish(&env->obst);
1342 irn = be_new_Barrier(irg, bl, n, in);
1343 obstack_free(&env->obst, in);
1345 for(n = 0; n < n_regs; ++n) {
1346 const arch_register_t *reg = rm[n].reg;
1348 int pos = BE_OUT_POS(n);
1351 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1352 be_node_set_reg_class(irn, n, reg->reg_class);
1354 be_set_constr_single_reg(irn, n, reg);
1355 be_set_constr_single_reg(irn, pos, reg);
1356 be_node_set_reg_class(irn, pos, reg->reg_class);
1357 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1359 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1360 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1361 flags |= arch_irn_flags_ignore;
1363 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1364 flags |= arch_irn_flags_modify_sp;
1366 be_node_set_flags(irn, pos, flags);
1368 pmap_insert(regs, (void *) reg, proj);
1372 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1375 obstack_free(&env->obst, rm);
1380 * Creates a be_Return for a Return node.
1382 * @param @env the abi environment
1383 * @param irn the Return node or NULL if there was none
1384 * @param bl the block where the be_Retun should be placed
1385 * @param mem the current memory
1386 * @param n_res number of return results
1388 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1389 be_abi_call_t *call = env->call;
1390 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1392 pmap *reg_map = pmap_create();
1393 ir_node *keep = pmap_get(env->keep_map, bl);
1399 const arch_register_t **regs;
1403 get the valid stack node in this block.
1404 If we had a call in that block there is a Keep constructed by process_calls()
1405 which points to the last stack modification in that block. we'll use
1406 it then. Else we use the stack from the start block and let
1407 the ssa construction fix the usage.
1409 stack = be_abi_reg_map_get(env->regs, isa->sp);
1411 ir_node *bad = new_r_Bad(env->birg->irg);
1412 stack = get_irn_n(keep, 0);
1413 set_nodes_block(keep, bad);
1414 set_irn_n(keep, 0, bad);
1415 // exchange(keep, new_r_Bad(env->birg->irg));
1418 /* Insert results for Return into the register map. */
1419 for(i = 0; i < n_res; ++i) {
1420 ir_node *res = get_Return_res(irn, i);
1421 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1422 assert(arg->in_reg && "return value must be passed in register");
1423 pmap_insert(reg_map, (void *) arg->reg, res);
1426 /* Add uses of the callee save registers. */
1427 pmap_foreach(env->regs, ent) {
1428 const arch_register_t *reg = ent->key;
1429 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1430 pmap_insert(reg_map, ent->key, ent->value);
1433 be_abi_reg_map_set(reg_map, isa->sp, stack);
1435 /* Make the Epilogue node and call the arch's epilogue maker. */
1436 create_barrier(env, bl, &mem, reg_map, 1);
1437 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1440 Maximum size of the in array for Return nodes is
1441 return args + callee save/ignore registers + memory + stack pointer
1443 in_max = pmap_count(reg_map) + n_res + 2;
1445 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1446 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1449 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1454 /* clear SP entry, since it has already been grown. */
1455 pmap_insert(reg_map, (void *) isa->sp, NULL);
1456 for(i = 0; i < n_res; ++i) {
1457 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1459 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1460 regs[n++] = arg->reg;
1462 /* Clear the map entry to mark the register as processed. */
1463 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1466 /* grow the rest of the stuff. */
1467 pmap_foreach(reg_map, ent) {
1470 regs[n++] = ent->key;
1474 /* The in array for the new back end return is now ready. */
1475 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1477 /* Set the register classes of the return's parameter accordingly. */
1478 for(i = 0; i < n; ++i)
1480 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1482 /* Free the space of the Epilog's in array and the register <-> proj map. */
1483 obstack_free(&env->obst, in);
1484 pmap_destroy(reg_map);
1489 typedef struct lower_frame_sels_env_t {
1491 ir_entity *value_param_list; /**< the list of all value param entities */
1492 } lower_frame_sels_env_t;
1495 * Walker: Replaces Sels of frame type and
1496 * value param type entities by FrameAddress.
1498 static void lower_frame_sels_walker(ir_node *irn, void *data)
1500 lower_frame_sels_env_t *ctx = data;
1503 ir_graph *irg = current_ir_graph;
1504 ir_node *frame = get_irg_frame(irg);
1505 ir_node *param_base = get_irg_value_param_base(irg);
1506 ir_node *ptr = get_Sel_ptr(irn);
1508 if (ptr == frame || ptr == param_base) {
1509 be_abi_irg_t *env = ctx->env;
1510 ir_entity *ent = get_Sel_entity(irn);
1511 ir_node *bl = get_nodes_block(irn);
1514 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1517 /* check, if it's a param sel and if have not seen this entity immediatly before */
1518 if (ptr == param_base && ctx->value_param_list != ent) {
1519 set_entity_link(ent, ctx->value_param_list);
1520 ctx->value_param_list = ent;
1527 * Check if a value parameter is transmitted as a register.
1528 * This might happen if the address of an parameter is taken which is
1529 * transmitted in registers.
1531 * Note that on some architectures this case must be handled specially
1532 * because the place of the backing store is determined by their ABI.
1534 * In the default case we move the entity to the frame type and create
1535 * a backing store into the first block.
1537 static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_entity *value_param_list) {
1538 be_abi_call_t *call = env->call;
1539 ir_graph *irg = env->birg->irg;
1540 ir_entity *ent, *next_ent, *new_list;
1542 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1545 for (ent = value_param_list; ent; ent = next_ent) {
1546 int i = get_struct_member_index(get_entity_owner(ent), ent);
1547 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1549 next_ent = get_entity_link(ent);
1551 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1552 set_entity_link(ent, new_list);
1557 /* ok, change the graph */
1558 ir_node *start_bl = get_irg_start_block(irg);
1559 ir_node *first_bl = NULL;
1560 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1561 const ir_edge_t *edge;
1562 optimization_state_t state;
1565 foreach_block_succ(start_bl, edge) {
1566 ir_node *succ = get_edge_src_irn(edge);
1567 if (start_bl != succ) {
1573 /* we had already removed critical edges, so the following
1574 assertion should be always true. */
1575 assert(get_Block_n_cfgpreds(first_bl) == 1);
1577 /* now create backing stores */
1578 frame = get_irg_frame(irg);
1579 imem = get_irg_initial_mem(irg);
1581 save_optimization_state(&state);
1583 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1584 restore_optimization_state(&state);
1586 /* reroute all edges to the new memory source */
1587 edges_reroute(imem, nmem, irg);
1591 args = get_irg_args(irg);
1592 args_bl = get_nodes_block(args);
1593 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1594 int i = get_struct_member_index(get_entity_owner(ent), ent);
1595 ir_type *tp = get_entity_type(ent);
1596 ir_mode *mode = get_type_mode(tp);
1599 /* address for the backing store */
1600 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1603 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1605 /* the backing store itself */
1606 store = new_r_Store(irg, first_bl, mem, addr,
1607 new_r_Proj(irg, args_bl, args, mode, i));
1609 /* the new memory Proj gets the last Proj from store */
1610 set_Proj_pred(nmem, store);
1611 set_Proj_proj(nmem, pn_Store_M);
1613 /* move all entities to the frame type */
1614 frame_tp = get_irg_frame_type(irg);
1615 offset = get_type_size_bytes(frame_tp);
1616 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1617 ir_type *tp = get_entity_type(ent);
1618 int align = get_type_alignment_bytes(tp);
1620 offset += align - 1;
1622 set_entity_owner(ent, frame_tp);
1623 add_class_member(frame_tp, ent);
1624 /* must be automatic to set a fixed layout */
1625 set_entity_allocation(ent, allocation_automatic);
1626 set_entity_offset(ent, offset);
1627 offset += get_type_size_bytes(tp);
1629 set_type_size_bytes(frame_tp, offset);
1634 * The start block has no jump, instead it has an initial exec Proj.
1635 * The backend wants to handle all blocks the same way, so we replace
1636 * the out cfg edge with a real jump.
1638 static void fix_start_block(ir_node *block, void *env) {
1641 ir_node *start_block;
1644 /* we processed the start block, return */
1648 irg = get_irn_irg(block);
1649 start_block = get_irg_start_block(irg);
1651 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1652 ir_node *pred = get_Block_cfgpred(block, i);
1653 ir_node *pred_block = get_nodes_block(pred);
1655 /* ok, we are in the block, having start as cfg predecessor */
1656 if (pred_block == start_block) {
1657 ir_node *jump = new_r_Jmp(irg, pred_block);
1658 set_Block_cfgpred(block, i, jump);
1665 * Modify the irg itself and the frame type.
1667 static void modify_irg(be_abi_irg_t *env)
1669 be_abi_call_t *call = env->call;
1670 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1671 const arch_register_t *sp = arch_isa_sp(isa);
1672 ir_graph *irg = env->birg->irg;
1673 ir_node *bl = get_irg_start_block(irg);
1674 ir_node *end = get_irg_end_block(irg);
1675 ir_node *mem = get_irg_initial_mem(irg);
1676 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1677 pset *dont_save = pset_new_ptr(8);
1683 const arch_register_t *fp_reg;
1684 ir_node *frame_pointer;
1686 ir_node *reg_params_bl;
1689 ir_node *value_param_base;
1690 const ir_edge_t *edge;
1691 ir_type *arg_type, *bet_type;
1692 lower_frame_sels_env_t ctx;
1693 ir_entity **param_map;
1695 bitset_t *used_proj_nr;
1696 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1698 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1700 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1702 ctx.value_param_list = NULL;
1703 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1705 /* value_param_base anchor is not needed anymore now */
1706 value_param_base = get_irg_value_param_base(irg);
1707 be_kill_node(value_param_base);
1708 set_irg_value_param_base(irg, new_r_Bad(irg));
1710 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1711 env->regs = pmap_create();
1713 used_proj_nr = bitset_alloca(1024);
1714 n_params = get_method_n_params(method_type);
1715 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1716 memset(args, 0, n_params * sizeof(args[0]));
1718 /* Check if a value parameter is transmitted as a register.
1719 * This might happen if the address of an parameter is taken which is
1720 * transmitted in registers.
1722 * Note that on some architectures this case must be handled specially
1723 * because the place of the backing store is determined by their ABI.
1725 * In the default case we move the entity to the frame type and create
1726 * a backing store into the first block.
1728 fix_address_of_parameter_access(env, ctx.value_param_list);
1730 /* Fill the argument vector */
1731 arg_tuple = get_irg_args(irg);
1732 foreach_out_edge(arg_tuple, edge) {
1733 ir_node *irn = get_edge_src_irn(edge);
1734 int nr = get_Proj_proj(irn);
1736 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1739 arg_type = compute_arg_type(env, call, method_type, ¶m_map);
1740 bet_type = call->cb->get_between_type(env->cb);
1741 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map);
1743 /* Count the register params and add them to the number of Projs for the RegParams node */
1744 for(i = 0; i < n_params; ++i) {
1745 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1746 if(arg->in_reg && args[i]) {
1747 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1748 assert(i == get_Proj_proj(args[i]));
1750 /* For now, associate the register with the old Proj from Start representing that argument. */
1751 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1752 bitset_set(used_proj_nr, i);
1753 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1757 /* Collect all callee-save registers */
1758 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1759 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1760 for(j = 0; j < cls->n_regs; ++j) {
1761 const arch_register_t *reg = &cls->regs[j];
1762 if(arch_register_type_is(reg, callee_save) ||
1763 arch_register_type_is(reg, state)) {
1764 pmap_insert(env->regs, (void *) reg, NULL);
1769 pmap_insert(env->regs, (void *) sp, NULL);
1770 pmap_insert(env->regs, (void *) isa->bp, NULL);
1771 reg_params_bl = get_irg_start_block(irg);
1772 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1773 add_irn_dep(env->reg_params, get_irg_start(irg));
1776 * make proj nodes for the callee save registers.
1777 * memorize them, since Return nodes get those as inputs.
1779 * Note, that if a register corresponds to an argument, the regs map contains
1780 * the old Proj from start for that argument.
1783 rm = reg_map_to_arr(&env->obst, env->regs);
1784 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1785 arch_register_t *reg = (void *) rm[i].reg;
1786 ir_mode *mode = reg->reg_class->mode;
1788 int pos = BE_OUT_POS((int) nr);
1794 bitset_set(used_proj_nr, nr);
1795 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1796 pmap_insert(env->regs, (void *) reg, proj);
1797 be_set_constr_single_reg(env->reg_params, pos, reg);
1798 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1801 * If the register is an ignore register,
1802 * The Proj for that register shall also be ignored during register allocation.
1804 if(arch_register_type_is(reg, ignore))
1805 flags |= arch_irn_flags_ignore;
1808 flags |= arch_irn_flags_modify_sp;
1810 be_node_set_flags(env->reg_params, pos, flags);
1812 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1814 obstack_free(&env->obst, rm);
1816 /* Generate the Prologue */
1817 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1819 /* do the stack allocation BEFORE the barrier, or spill code
1820 might be added before it */
1821 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1822 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
1823 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1825 env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0);
1827 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1828 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1830 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1831 set_irg_frame(irg, frame_pointer);
1832 pset_insert_ptr(env->ignore_regs, fp_reg);
1834 set_irg_initial_mem(irg, mem);
1836 /* Now, introduce stack param nodes for all parameters passed on the stack */
1837 for(i = 0; i < n_params; ++i) {
1838 ir_node *arg_proj = args[i];
1839 ir_node *repl = NULL;
1841 if(arg_proj != NULL) {
1842 be_abi_call_arg_t *arg;
1843 ir_type *param_type;
1844 int nr = get_Proj_proj(arg_proj);
1846 nr = MIN(nr, n_params);
1847 arg = get_call_arg(call, 0, nr);
1848 param_type = get_method_param_type(method_type, nr);
1851 repl = pmap_get(env->regs, (void *) arg->reg);
1854 else if(arg->on_stack) {
1855 /* For atomic parameters which are actually used, we create a StackParam node. */
1856 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1857 ir_mode *mode = get_type_mode(param_type);
1858 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1859 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1862 /* The stack parameter is not primitive (it is a struct or array),
1863 we thus will create a node representing the parameter's address
1866 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1870 assert(repl != NULL);
1871 exchange(args[i], repl);
1875 /* the arg proj is not needed anymore now */
1876 assert(get_irn_n_edges(arg_tuple) == 0);
1877 be_kill_node(arg_tuple);
1878 set_irg_args(irg, new_rd_Bad(irg));
1880 /* All Return nodes hang on the End node, so look for them there. */
1881 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1882 ir_node *irn = get_Block_cfgpred(end, i);
1884 if (is_Return(irn)) {
1885 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1889 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
1890 the code is dead and will never be executed. */
1892 del_pset(dont_save);
1893 obstack_free(&env->obst, args);
1895 /* handle start block here (place a jump in the block) */
1897 irg_block_walk_graph(irg, fix_start_block, NULL, &temp);
1900 /** Fix the state inputs of calls that still hang on unknowns */
1902 void fix_call_state_inputs(be_abi_irg_t *env)
1904 const arch_isa_t *isa = env->isa;
1906 const arch_register_t **stateregs = NEW_ARR_F(const arch_register_t*, 0);
1908 /* Collect caller save registers */
1909 n = arch_isa_get_n_reg_class(isa);
1910 for(i = 0; i < n; ++i) {
1912 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1913 for(j = 0; j < cls->n_regs; ++j) {
1914 const arch_register_t *reg = arch_register_for_index(cls, j);
1915 if(arch_register_type_is(reg, state)) {
1916 ARR_APP1(arch_register_t*, stateregs, reg);
1921 n = ARR_LEN(env->calls);
1922 n_states = ARR_LEN(stateregs);
1923 for(i = 0; i < n; ++i) {
1925 ir_node *call = env->calls[i];
1927 arity = get_irn_arity(call);
1929 /* the statereg inputs are the last n inputs of the calls */
1930 for(s = 0; s < n_states; ++s) {
1931 int inp = arity - n_states + s;
1932 const arch_register_t *reg = stateregs[s];
1933 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
1935 set_irn_n(call, inp, regnode);
1940 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1942 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1943 ir_node *old_frame = get_irg_frame(birg->irg);
1944 ir_graph *irg = birg->irg;
1948 optimization_state_t state;
1949 unsigned *limited_bitset;
1951 be_omit_fp = birg->main_env->options->omit_fp;
1953 obstack_init(&env->obst);
1955 env->isa = birg->main_env->arch_env->isa;
1956 env->method_type = get_entity_type(get_irg_entity(irg));
1957 env->call = be_abi_call_new(env->isa->sp->reg_class);
1958 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1960 env->ignore_regs = pset_new_ptr_default();
1961 env->keep_map = pmap_create();
1962 env->dce_survivor = new_survive_dce();
1965 env->sp_req.type = arch_register_req_type_limited;
1966 env->sp_req.cls = arch_register_get_class(env->isa->sp);
1967 limited_bitset = rbitset_obstack_alloc(&env->obst, env->sp_req.cls->n_regs);
1968 rbitset_set(limited_bitset, arch_register_get_index(env->isa->sp));
1969 env->sp_req.limited = limited_bitset;
1971 env->sp_cls_req.type = arch_register_req_type_normal;
1972 env->sp_cls_req.cls = arch_register_get_class(env->isa->sp);
1974 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1975 to another Unknown or the stack pointer gets used */
1976 save_optimization_state(&state);
1978 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1979 restore_optimization_state(&state);
1980 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1982 env->calls = NEW_ARR_F(ir_node*, 0);
1984 /* Lower all call nodes in the IRG. */
1988 Beware: init backend abi call object after processing calls,
1989 otherwise some information might be not yet available.
1991 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1993 /* Process the IRG */
1996 /* fix call inputs for state registers */
1997 fix_call_state_inputs(env);
1999 /* We don't need the keep map anymore. */
2000 pmap_destroy(env->keep_map);
2002 /* calls array is not needed anymore */
2003 DEL_ARR_F(env->calls);
2005 /* reroute the stack origin of the calls to the true stack origin. */
2006 exchange(dummy, env->init_sp);
2007 exchange(old_frame, get_irg_frame(irg));
2009 /* Make some important node pointers survive the dead node elimination. */
2010 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2011 pmap_foreach(env->regs, ent) {
2012 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2015 env->call->cb->done(env->cb);
2020 void be_abi_free(be_abi_irg_t *env)
2022 free_survive_dce(env->dce_survivor);
2023 del_pset(env->ignore_regs);
2024 pmap_destroy(env->regs);
2025 obstack_free(&env->obst, NULL);
2029 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2031 arch_register_t *reg;
2033 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2034 if(reg->reg_class == cls)
2035 bitset_set(bs, reg->index);
2038 /* Returns the stack layout from a abi environment. */
2039 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
2046 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2047 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2048 | _| | |> < ___) | || (_| | (__| <
2049 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2053 typedef ir_node **node_array;
2055 typedef struct fix_stack_walker_env_t {
2056 node_array sp_nodes;
2057 node_array *state_nodes;
2058 const arch_register_t **state_regs;
2059 const arch_env_t *arch_env;
2060 } fix_stack_walker_env_t;
2063 * Walker. Collect all stack modifying nodes.
2065 static void collect_stack_nodes_walker(ir_node *node, void *data)
2067 fix_stack_walker_env_t *env = data;
2072 if (arch_irn_is(env->arch_env, node, modify_sp)) {
2073 assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
2074 ARR_APP1(ir_node*, env->sp_nodes, node);
2077 if(ARR_LEN(env->state_nodes) > 0) {
2079 const arch_register_t *reg = arch_get_irn_register(env->arch_env, node);
2081 n = ARR_LEN(env->state_nodes);
2082 for(i = 0; i < n; ++i) {
2083 if(reg == env->state_regs[i]) {
2084 ARR_APP1(ir_node*, env->state_nodes[i], node);
2090 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2094 be_irg_t *birg = env->birg;
2095 fix_stack_walker_env_t walker_env;
2098 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2099 walker_env.arch_env = birg->main_env->arch_env;
2100 walker_env.state_nodes = NEW_ARR_F(node_array, 0);
2101 walker_env.state_regs = NEW_ARR_F(const arch_register_t*, 0);
2102 isa = walker_env.arch_env->isa;
2104 /* collect all state registers */
2105 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
2106 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
2107 int j, n_regs = cls->n_regs;
2109 for(j = 0; j < n_regs; ++j) {
2110 const arch_register_t *reg = arch_register_for_index(cls, j);
2111 if(arch_register_type_is(reg, state)) {
2112 node_array arr = NEW_ARR_F(ir_node*, 0);
2113 ARR_APP1(node_array, walker_env.state_nodes, arr);
2114 ARR_APP1(const arch_register_t*, walker_env.state_regs, reg);
2119 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2121 be_assure_dom_front(birg);
2122 phis = be_ssa_construction(
2123 be_get_birg_dom_front(birg),
2124 be_get_birg_liveness(birg),
2126 ARR_LEN(walker_env.sp_nodes), walker_env.sp_nodes,
2129 /* set register requirements for stack phis */
2130 for(i = 0; i < ARR_LEN(phis); ++i) {
2131 ir_node *phi = phis[i];
2132 be_set_phi_reg_req(walker_env.arch_env, phi, &env->sp_req);
2133 be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
2134 arch_set_irn_register(walker_env.arch_env, phi, env->isa->sp);
2138 DEL_ARR_F(walker_env.sp_nodes);
2140 n = ARR_LEN(walker_env.state_nodes);
2141 for(i = 0; i < n; ++i) {
2142 const arch_register_t *reg = walker_env.state_regs[i];
2143 node_array nodes = walker_env.state_nodes[i];
2144 ir_node *initial_value = be_abi_reg_map_get(env->regs, reg);
2146 phis = be_ssa_construction(
2147 be_get_birg_dom_front(birg),
2148 be_get_birg_liveness(birg),
2150 ARR_LEN(nodes), nodes,
2153 /* set registers for the phis */
2154 for(i = 0; i < ARR_LEN(phis); ++i) {
2155 ir_node *phi = phis[i];
2156 be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore);
2157 arch_set_irn_register(walker_env.arch_env, phi, reg);
2164 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
2166 const arch_env_t *arch_env = env->birg->main_env->arch_env;
2167 int omit_fp = env->call->flags.bits.try_omit_fp;
2170 sched_foreach(bl, irn) {
2173 Check, if the node relates to an entity on the stack frame.
2174 If so, set the true offset (including the bias) for that
2177 ir_entity *ent = arch_get_frame_entity(arch_env, irn);
2179 int offset = get_stack_entity_offset(env->frame, ent, bias);
2180 arch_set_frame_offset(arch_env, irn, offset);
2181 DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
2185 If the node modifies the stack pointer by a constant offset,
2186 record that in the bias.
2188 if(arch_irn_is(arch_env, irn, modify_sp)) {
2189 int ofs = arch_get_sp_bias(arch_env, irn);
2191 if(be_is_IncSP(irn)) {
2192 if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2193 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
2194 be_set_IncSP_offset(irn, ofs);
2195 } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2196 ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
2197 be_set_IncSP_offset(irn, ofs);
2210 * A helper struct for the bias walker.
2213 be_abi_irg_t *env; /**< The ABI irg environment. */
2214 int start_block_bias; /**< The bias at the end of the start block. */
2215 ir_node *start_block; /**< The start block of the current graph. */
2219 * Block-Walker: fix all stack offsets
2221 static void stack_bias_walker(ir_node *bl, void *data)
2223 struct bias_walk *bw = data;
2224 if (bl != bw->start_block) {
2225 process_stack_bias(bw->env, bl, bw->start_block_bias);
2229 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2231 ir_graph *irg = env->birg->irg;
2232 struct bias_walk bw;
2234 stack_frame_compute_initial_offset(env->frame);
2235 // stack_layout_dump(stdout, env->frame);
2237 /* Determine the stack bias at the end of the start block. */
2238 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
2240 /* fix the bias is all other blocks */
2242 bw.start_block = get_irg_start_block(irg);
2243 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2246 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2248 assert(arch_register_type_is(reg, callee_save));
2249 assert(pmap_contains(abi->regs, (void *) reg));
2250 return pmap_get(abi->regs, (void *) reg);
2253 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2255 assert(arch_register_type_is(reg, ignore));
2256 assert(pmap_contains(abi->regs, (void *) reg));
2257 return pmap_get(abi->regs, (void *) reg);
2260 ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi)
2262 return abi->start_barrier;
2266 * Returns non-zero if the ABI has omitted the frame pointer in
2267 * the current graph.
2269 int be_abi_omit_fp(const be_abi_irg_t *abi) {
2270 return abi->call->flags.bits.try_omit_fp;