2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
33 #include "irgraph_t.h"
36 #include "iredges_t.h"
39 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
58 typedef struct _be_abi_call_arg_t {
59 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
60 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
61 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
64 const arch_register_t *reg;
67 unsigned alignment; /**< stack alignment */
68 unsigned space_before; /**< allocate space before */
69 unsigned space_after; /**< allocate space after */
72 struct _be_abi_call_t {
73 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
74 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
75 const be_abi_callbacks_t *cb;
76 ir_type *between_type;
78 const arch_register_class_t *cls_addr; /**< register class of the call address */
82 * The ABI information for the current birg.
84 struct _be_abi_irg_t {
86 be_irg_t *birg; /**< The back end IRG. */
87 const arch_env_t *arch_env;
88 survive_dce_t *dce_survivor;
90 be_abi_call_t *call; /**< The ABI call information. */
91 ir_type *method_type; /**< The type of the method of the IRG. */
93 ir_node *init_sp; /**< The node representing the stack pointer
94 at the start of the function. */
96 ir_node *start; /**< The be_Start params node. */
97 pmap *regs; /**< A map of all callee-save and ignore regs to
98 their Projs to the RegParams node. */
100 int start_block_bias; /**< The stack bias at the end of the start block. */
102 void *cb; /**< ABI Callback self pointer. */
104 pmap *keep_map; /**< mapping blocks to keep nodes. */
105 pset *ignore_regs; /**< Additional registers which shall be ignored. */
107 ir_node **calls; /**< flexible array containing all be_Call nodes */
109 arch_register_req_t *sp_req;
111 be_stack_layout_t frame; /**< The stack frame model. */
113 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
116 static heights_t *ir_heights;
118 /** Flag: if set, try to omit the frame pointer in all routines. */
119 static int be_omit_fp = 1;
121 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
122 static int be_omit_leaf_fp = 1;
125 _ ____ ___ ____ _ _ _ _
126 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
127 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
128 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
129 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
131 These callbacks are used by the backend to set the parameters
132 for a specific call type.
136 * Set compare function: compares two ABI call object arguments.
138 static int cmp_call_arg(const void *a, const void *b, size_t n)
140 const be_abi_call_arg_t *p = a, *q = b;
142 return !(p->is_res == q->is_res && p->pos == q->pos);
146 * Get an ABI call object argument.
148 * @param call the abi call
149 * @param is_res true for call results, false for call arguments
150 * @param pos position of the argument
152 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
154 be_abi_call_arg_t arg;
157 memset(&arg, 0, sizeof(arg));
161 hash = is_res * 128 + pos;
163 return set_find(call->params, &arg, sizeof(arg), hash);
167 * Set an ABI call object argument.
169 * @param call the abi call
170 * @param is_res true for call results, false for call arguments
171 * @param pos position of the argument
173 static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
175 be_abi_call_arg_t arg;
178 memset(&arg, 0, sizeof(arg));
182 hash = is_res * 128 + pos;
184 return set_insert(call->params, &arg, sizeof(arg), hash);
187 /* Set the flags for a call. */
188 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
194 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
195 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
201 /* Set register class for call address */
202 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
204 call->cls_addr = cls;
208 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
210 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
212 arg->load_mode = load_mode;
213 arg->alignment = alignment;
214 arg->space_before = space_before;
215 arg->space_after = space_after;
216 assert(alignment > 0 && "Alignment must be greater than 0");
219 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
221 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
226 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
228 be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
233 /* Get the flags of a ABI call object. */
234 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
240 * Constructor for a new ABI call object.
242 * @param cls_addr register class of the call address
244 * @return the new ABI call object
246 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
248 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
251 call->params = new_set(cmp_call_arg, 16);
253 call->cls_addr = cls_addr;
255 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
261 * Destructor for an ABI call object.
263 static void be_abi_call_free(be_abi_call_t *call)
265 del_set(call->params);
271 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
272 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
273 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
274 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
277 Handling of the stack frame. It is composed of three types:
278 1) The type of the arguments which are pushed on the stack.
279 2) The "between type" which consists of stuff the call of the
280 function pushes on the stack (like the return address and
281 the old base pointer for ia32).
282 3) The Firm frame type which consists of all local variables
286 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
289 ir_type *t = get_entity_owner(ent);
290 int ofs = get_entity_offset(ent);
294 /* Find the type the entity is contained in. */
295 for (index = 0; index < N_FRAME_TYPES; ++index) {
296 if (frame->order[index] == t)
298 /* Add the size of all the types below the one of the entity to the entity's offset */
299 ofs += get_type_size_bytes(frame->order[index]);
302 /* correct the offset by the initial position of the frame pointer */
303 ofs -= frame->initial_offset;
305 /* correct the offset with the current bias. */
312 * Retrieve the entity with given offset from a frame type.
314 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
318 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
319 ir_entity *ent = get_compound_member(t, i);
320 if (get_entity_offset(ent) == offset)
327 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
329 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
330 ir_entity *ent = search_ent_with_offset(base, 0);
332 frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
334 return frame->initial_offset;
338 * Initializes the frame layout from parts
340 * @param frame the stack layout that will be initialized
341 * @param args the stack argument layout type
342 * @param between the between layout type
343 * @param locals the method frame type
344 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
345 * @param param_map an array mapping method argument positions to the stack argument type
347 * @return the initialized stack layout
349 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
350 ir_type *between, ir_type *locals, int stack_dir,
351 ir_entity *param_map[])
353 frame->arg_type = args;
354 frame->between_type = between;
355 frame->frame_type = locals;
356 frame->initial_offset = 0;
357 frame->initial_bias = 0;
358 frame->stack_dir = stack_dir;
359 frame->order[1] = between;
360 frame->param_map = param_map;
363 frame->order[0] = args;
364 frame->order[2] = locals;
367 /* typical decreasing stack: locals have the
368 * lowest addresses, arguments the highest */
369 frame->order[0] = locals;
370 frame->order[2] = args;
376 /** Dumps the stack layout to file. */
377 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
381 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
382 for (j = 0; j < N_FRAME_TYPES; ++j) {
383 ir_type *t = frame->order[j];
385 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
386 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
387 ir_entity *ent = get_compound_member(t, i);
388 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
395 * Returns non-zero if the call argument at given position
396 * is transfered on the stack.
398 static inline int is_on_stack(be_abi_call_t *call, int pos)
400 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
401 return arg && !arg->in_reg;
411 Adjustment of the calls inside a graph.
416 * Transform a call node into a be_Call node.
418 * @param env The ABI environment for the current irg.
419 * @param irn The call node.
420 * @param curr_sp The stack pointer node to use.
421 * @return The stack pointer after the call.
423 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
425 ir_graph *irg = env->birg->irg;
426 const arch_env_t *arch_env = env->birg->main_env->arch_env;
427 ir_type *call_tp = get_Call_type(irn);
428 ir_node *call_ptr = get_Call_ptr(irn);
429 int n_params = get_method_n_params(call_tp);
430 ir_node *curr_mem = get_Call_mem(irn);
431 ir_node *bl = get_nodes_block(irn);
433 int stack_dir = arch_env->stack_dir;
434 const arch_register_t *sp = arch_env->sp;
435 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
436 ir_mode *mach_mode = sp->reg_class->mode;
437 struct obstack *obst = &env->obst;
438 int no_alloc = call->flags.bits.frame_is_setup_on_call;
439 int n_res = get_method_n_ress(call_tp);
440 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
442 ir_node *res_proj = NULL;
443 int n_reg_params = 0;
444 int n_stack_params = 0;
447 pset_new_t destroyed_regs, states;
448 pset_new_iterator_t iter;
452 int n_reg_results = 0;
453 const arch_register_t *reg;
454 const ir_edge_t *edge;
456 int *stack_param_idx;
457 int i, n, destroy_all_regs;
460 pset_new_init(&destroyed_regs);
461 pset_new_init(&states);
463 /* Let the isa fill out the abi description for that call node. */
464 arch_env_get_call_abi(arch_env, call_tp, call);
466 /* Insert code to put the stack arguments on the stack. */
467 assert(get_Call_n_params(irn) == n_params);
468 for (i = 0; i < n_params; ++i) {
469 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
472 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
474 stack_size += round_up2(arg->space_before, arg->alignment);
475 stack_size += round_up2(arg_size, arg->alignment);
476 stack_size += round_up2(arg->space_after, arg->alignment);
477 obstack_int_grow(obst, i);
481 stack_param_idx = obstack_finish(obst);
483 /* Collect all arguments which are passed in registers. */
484 for (i = 0; i < n_params; ++i) {
485 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
486 if (arg && arg->in_reg) {
487 obstack_int_grow(obst, i);
491 reg_param_idxs = obstack_finish(obst);
494 * If the stack is decreasing and we do not want to store sequentially,
495 * or someone else allocated the call frame
496 * we allocate as much space on the stack all parameters need, by
497 * moving the stack pointer along the stack's direction.
499 * Note: we also have to do this for stack_size == 0, because we may have
500 * to adjust stack alignment for the call.
502 if (stack_dir < 0 && !do_seq && !no_alloc) {
503 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
506 dbgi = get_irn_dbg_info(irn);
507 /* If there are some parameters which shall be passed on the stack. */
508 if (n_stack_params > 0) {
512 * Reverse list of stack parameters if call arguments are from left to right.
513 * We must them reverse again if they are pushed (not stored) and the stack
514 * direction is downwards.
516 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
517 for (i = 0; i < n_stack_params >> 1; ++i) {
518 int other = n_stack_params - i - 1;
519 int tmp = stack_param_idx[i];
520 stack_param_idx[i] = stack_param_idx[other];
521 stack_param_idx[other] = tmp;
525 curr_mem = get_Call_mem(irn);
527 obstack_ptr_grow(obst, curr_mem);
530 for (i = 0; i < n_stack_params; ++i) {
531 int p = stack_param_idx[i];
532 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
533 ir_node *param = get_Call_param(irn, p);
534 ir_node *addr = curr_sp;
536 ir_type *param_type = get_method_param_type(call_tp, p);
537 int param_size = get_type_size_bytes(param_type) + arg->space_after;
540 * If we wanted to build the arguments sequentially,
541 * the stack pointer for the next must be incremented,
542 * and the memory value propagated.
546 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp, param_size + arg->space_before, 0);
547 add_irn_dep(curr_sp, curr_mem);
550 curr_ofs += arg->space_before;
551 curr_ofs = round_up2(curr_ofs, arg->alignment);
553 /* Make the expression to compute the argument's offset. */
555 ir_mode *constmode = mach_mode;
556 if (mode_is_reference(mach_mode)) {
559 addr = new_r_Const_long(irg, constmode, curr_ofs);
560 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
564 /* Insert a store for primitive arguments. */
565 if (is_atomic_type(param_type)) {
567 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
568 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
569 mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
572 /* Make a mem copy for compound arguments. */
576 assert(mode_is_reference(get_irn_mode(param)));
577 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
578 mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
581 curr_ofs += param_size;
586 obstack_ptr_grow(obst, mem);
589 in = (ir_node **) obstack_finish(obst);
591 /* We need the sync only, if we didn't build the stores sequentially. */
593 if (n_stack_params >= 1) {
594 curr_mem = new_r_Sync(bl, n_stack_params + 1, in);
596 curr_mem = get_Call_mem(irn);
599 obstack_free(obst, in);
602 /* check for the return_twice property */
603 destroy_all_regs = 0;
604 if (is_SymConst_addr_ent(call_ptr)) {
605 ir_entity *ent = get_SymConst_entity(call_ptr);
607 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
608 destroy_all_regs = 1;
610 ir_type *call_tp = get_Call_type(irn);
612 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
613 destroy_all_regs = 1;
616 /* Put caller save into the destroyed set and state registers in the states set */
617 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
619 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
620 for (j = 0; j < cls->n_regs; ++j) {
621 const arch_register_t *reg = arch_register_for_index(cls, j);
623 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
624 if (! arch_register_type_is(reg, ignore))
625 pset_new_insert(&destroyed_regs, (void *) reg);
627 if (arch_register_type_is(reg, state)) {
628 pset_new_insert(&destroyed_regs, (void*) reg);
629 pset_new_insert(&states, (void*) reg);
634 if (destroy_all_regs) {
635 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
636 pset_new_remove(&destroyed_regs, arch_env->sp);
637 pset_new_remove(&destroyed_regs, arch_env->bp);
640 /* search the largest result proj number */
641 res_projs = ALLOCANZ(ir_node*, n_res);
643 foreach_out_edge(irn, edge) {
644 const ir_edge_t *res_edge;
645 ir_node *irn = get_edge_src_irn(edge);
647 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
650 foreach_out_edge(irn, res_edge) {
652 ir_node *res = get_edge_src_irn(res_edge);
654 assert(is_Proj(res));
656 proj = get_Proj_proj(res);
657 assert(proj < n_res);
658 assert(res_projs[proj] == NULL);
659 res_projs[proj] = res;
665 /** TODO: this is not correct for cases where return values are passed
666 * on the stack, but no known ABI does this currently...
668 n_reg_results = n_res;
670 /* make the back end call node and set its register requirements. */
671 for (i = 0; i < n_reg_params; ++i) {
672 obstack_ptr_grow(obst, get_Call_param(irn, reg_param_idxs[i]));
675 /* add state registers ins */
676 foreach_pset_new(&states, reg, iter) {
677 const arch_register_class_t *cls = arch_register_get_class(reg);
679 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
680 ir_fprintf(stderr, "Adding %+F\n", regnode);
682 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
683 obstack_ptr_grow(obst, regnode);
685 n_ins = n_reg_params + pset_new_size(&states);
687 in = obstack_finish(obst);
689 /* ins collected, build the call */
690 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
692 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
693 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
694 n_ins, in, get_Call_type(irn));
695 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
698 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
699 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
700 n_ins, in, get_Call_type(irn));
702 be_Call_set_pop(low_call, call->pop);
704 /* put the call into the list of all calls for later processing */
705 ARR_APP1(ir_node *, env->calls, low_call);
707 /* create new stack pointer */
708 curr_sp = new_r_Proj(bl, low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
709 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
710 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
711 arch_set_irn_register(curr_sp, sp);
713 /* now handle results */
714 for (i = 0; i < n_res; ++i) {
716 ir_node *proj = res_projs[i];
717 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
719 /* returns values on stack not supported yet */
723 shift the proj number to the right, since we will drop the
724 unspeakable Proj_T from the Call. Therefore, all real argument
725 Proj numbers must be increased by pn_be_Call_first_res
727 pn = i + pn_be_Call_first_res;
730 ir_type *res_type = get_method_res_type(call_tp, i);
731 ir_mode *mode = get_type_mode(res_type);
732 proj = new_r_Proj(bl, low_call, mode, pn);
735 set_Proj_pred(proj, low_call);
736 set_Proj_proj(proj, pn);
740 pset_new_remove(&destroyed_regs, arg->reg);
745 Set the register class of the call address to
746 the backend provided class (default: stack pointer class)
748 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
750 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
752 /* Set the register classes and constraints of the Call parameters. */
753 for (i = 0; i < n_reg_params; ++i) {
754 int index = reg_param_idxs[i];
755 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
756 assert(arg->reg != NULL);
758 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
762 /* Set the register constraints of the results. */
763 for (i = 0; i < n_res; ++i) {
764 ir_node *proj = res_projs[i];
765 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
766 int pn = get_Proj_proj(proj);
769 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
770 arch_set_irn_register(proj, arg->reg);
772 obstack_free(obst, in);
773 exchange(irn, low_call);
775 /* kill the ProjT node */
776 if (res_proj != NULL) {
780 /* Make additional projs for the caller save registers
781 and the Keep node which keeps them alive. */
783 const arch_register_t *reg;
787 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
788 pset_new_iterator_t iter;
790 /* also keep the stack pointer */
792 set_irn_link(curr_sp, (void*) sp);
793 obstack_ptr_grow(obst, curr_sp);
795 foreach_pset_new(&destroyed_regs, reg, iter) {
796 ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj);
798 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
799 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
800 arch_set_irn_register(proj, reg);
802 set_irn_link(proj, (void*) reg);
803 obstack_ptr_grow(obst, proj);
808 for (i = 0; i < n_reg_results; ++i) {
809 ir_node *proj = res_projs[i];
810 const arch_register_t *reg = arch_get_irn_register(proj);
811 set_irn_link(proj, (void*) reg);
812 obstack_ptr_grow(obst, proj);
816 /* create the Keep for the caller save registers */
817 in = (ir_node **) obstack_finish(obst);
818 keep = be_new_Keep(bl, n, in);
819 for (i = 0; i < n; ++i) {
820 const arch_register_t *reg = get_irn_link(in[i]);
821 be_node_set_reg_class_in(keep, i, reg->reg_class);
823 obstack_free(obst, in);
826 /* Clean up the stack. */
827 assert(stack_size >= call->pop);
828 stack_size -= call->pop;
830 if (stack_size > 0) {
831 ir_node *mem_proj = NULL;
833 foreach_out_edge(low_call, edge) {
834 ir_node *irn = get_edge_src_irn(edge);
835 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
842 mem_proj = new_r_Proj(bl, low_call, mode_M, pn_be_Call_M_regular);
843 keep_alive(mem_proj);
846 /* Clean up the stack frame or revert alignment fixes if we allocated it */
848 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
851 be_abi_call_free(call);
852 obstack_free(obst, stack_param_idx);
854 pset_new_destroy(&states);
855 pset_new_destroy(&destroyed_regs);
861 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
863 * @param alignment the minimum stack alignment
864 * @param size the node containing the non-aligned size
865 * @param block the block where new nodes are allocated on
866 * @param dbg debug info for new nodes
868 * @return a node representing the aligned size
870 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
871 ir_node *block, dbg_info *dbg)
873 if (stack_alignment > 1) {
879 assert(is_po2(stack_alignment));
881 mode = get_irn_mode(size);
882 tv = new_tarval_from_long(stack_alignment-1, mode);
883 irg = get_Block_irg(block);
884 mask = new_r_Const(irg, tv);
885 size = new_rd_Add(dbg, block, size, mask, mode);
887 tv = new_tarval_from_long(-(long)stack_alignment, mode);
888 mask = new_r_Const(irg, tv);
889 size = new_rd_And(dbg, block, size, mask, mode);
895 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
897 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
906 const ir_edge_t *edge;
907 ir_node *new_alloc, *size, *addr, *ins[2];
908 unsigned stack_alignment;
910 assert(get_Alloc_where(alloc) == stack_alloc);
912 block = get_nodes_block(alloc);
913 irg = get_Block_irg(block);
916 type = get_Alloc_type(alloc);
918 foreach_out_edge(alloc, edge) {
919 ir_node *irn = get_edge_src_irn(edge);
921 assert(is_Proj(irn));
922 switch (get_Proj_proj(irn)) {
934 /* Beware: currently Alloc nodes without a result might happen,
935 only escape analysis kills them and this phase runs only for object
936 oriented source. We kill the Alloc here. */
937 if (alloc_res == NULL && alloc_mem) {
938 exchange(alloc_mem, get_Alloc_mem(alloc));
942 dbg = get_irn_dbg_info(alloc);
943 size = get_Alloc_size(alloc);
945 /* we might need to multiply the size with the element size */
946 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
947 ir_mode *mode = get_irn_mode(size);
948 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
950 ir_node *cnst = new_rd_Const(dbg, irg, tv);
951 size = new_rd_Mul(dbg, block, size, cnst, mode);
954 /* The stack pointer will be modified in an unknown manner.
955 We cannot omit it. */
956 env->call->flags.bits.try_omit_fp = 0;
958 stack_alignment = 1 << env->arch_env->stack_alignment;
959 size = adjust_alloc_size(stack_alignment, size, block, dbg);
960 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
961 set_irn_dbg_info(new_alloc, dbg);
963 if (alloc_mem != NULL) {
967 addsp_mem = new_r_Proj(block, new_alloc, mode_M, pn_be_AddSP_M);
969 /* We need to sync the output mem of the AddSP with the input mem
970 edge into the alloc node. */
971 ins[0] = get_Alloc_mem(alloc);
973 sync = new_r_Sync(block, 2, ins);
975 exchange(alloc_mem, sync);
978 exchange(alloc, new_alloc);
980 /* fix projnum of alloca res */
981 set_Proj_proj(alloc_res, pn_be_AddSP_res);
984 curr_sp = new_r_Proj(block, new_alloc, get_irn_mode(curr_sp),
992 * The Free is transformed into a back end free node and connected to the stack nodes.
994 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
998 ir_node *subsp, *mem, *res, *size, *sync;
1002 unsigned stack_alignment;
1005 assert(get_Free_where(free) == stack_alloc);
1007 block = get_nodes_block(free);
1008 irg = get_irn_irg(block);
1009 type = get_Free_type(free);
1010 sp_mode = env->arch_env->sp->reg_class->mode;
1011 dbg = get_irn_dbg_info(free);
1013 /* we might need to multiply the size with the element size */
1014 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1015 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1016 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1017 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1021 size = get_Free_size(free);
1024 stack_alignment = 1 << env->arch_env->stack_alignment;
1025 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1027 /* The stack pointer will be modified in an unknown manner.
1028 We cannot omit it. */
1029 env->call->flags.bits.try_omit_fp = 0;
1030 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1031 set_irn_dbg_info(subsp, dbg);
1033 mem = new_r_Proj(block, subsp, mode_M, pn_be_SubSP_M);
1034 res = new_r_Proj(block, subsp, sp_mode, pn_be_SubSP_sp);
1036 /* we need to sync the memory */
1037 in[0] = get_Free_mem(free);
1039 sync = new_r_Sync(block, 2, in);
1041 /* and make the AddSP dependent on the former memory */
1042 add_irn_dep(subsp, get_Free_mem(free));
1045 exchange(free, sync);
1051 /* the following function is replaced by the usage of the heights module */
1054 * Walker for dependent_on().
1055 * This function searches a node tgt recursively from a given node
1056 * but is restricted to the given block.
1057 * @return 1 if tgt was reachable from curr, 0 if not.
1059 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
1063 if (get_nodes_block(curr) != bl)
1069 /* Phi functions stop the recursion inside a basic block */
1070 if (! is_Phi(curr)) {
1071 for (i = 0, n = get_irn_arity(curr); i < n; ++i) {
1072 if (check_dependence(get_irn_n(curr, i), tgt, bl))
1082 * Check if a node is somehow data dependent on another one.
1083 * both nodes must be in the same basic block.
1084 * @param n1 The first node.
1085 * @param n2 The second node.
1086 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1088 static int dependent_on(ir_node *n1, ir_node *n2)
1090 assert(get_nodes_block(n1) == get_nodes_block(n2));
1092 return heights_reachable_in_block(ir_heights, n1, n2);
1095 static int cmp_call_dependency(const void *c1, const void *c2)
1097 ir_node *n1 = *(ir_node **) c1;
1098 ir_node *n2 = *(ir_node **) c2;
1101 Classical qsort() comparison function behavior:
1102 0 if both elements are equal
1103 1 if second is "smaller" that first
1104 -1 if first is "smaller" that second
1106 if (dependent_on(n1, n2))
1109 if (dependent_on(n2, n1))
1112 /* The nodes have no depth order, but we need a total order because qsort()
1114 return get_irn_idx(n1) - get_irn_idx(n2);
1118 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1119 * Clears the irg_is_leaf flag if a Call is detected.
1121 static void link_ops_in_block_walker(ir_node *irn, void *data)
1123 be_abi_irg_t *env = data;
1124 ir_opcode code = get_irn_opcode(irn);
1126 if (code == iro_Call ||
1127 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1128 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1129 ir_node *bl = get_nodes_block(irn);
1130 void *save = get_irn_link(bl);
1132 if (code == iro_Call)
1133 env->call->flags.bits.irg_is_leaf = 0;
1135 set_irn_link(irn, save);
1136 set_irn_link(bl, irn);
1139 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1140 ir_node *param = get_Builtin_param(irn, 0);
1141 tarval *tv = get_Const_tarval(param);
1142 unsigned long value = get_tarval_long(tv);
1143 /* use ebp, so the climbframe algo works... */
1145 env->call->flags.bits.try_omit_fp = 0;
1152 * Process all Call/Alloc/Free nodes inside a basic block.
1153 * Note that the link field of the block must contain a linked list of all
1154 * Call nodes inside the Block. We first order this list according to data dependency
1155 * and that connect the calls together.
1157 static void process_ops_in_block(ir_node *bl, void *data)
1159 be_abi_irg_t *env = data;
1160 ir_node *curr_sp = env->init_sp;
1164 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
1165 obstack_ptr_grow(&env->obst, irn);
1167 /* If there were call nodes in the block. */
1173 nodes = obstack_finish(&env->obst);
1175 /* order the call nodes according to data dependency */
1176 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
1178 for (i = n - 1; i >= 0; --i) {
1179 ir_node *irn = nodes[i];
1181 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1182 switch (get_irn_opcode(irn)) {
1185 /* The stack pointer will be modified due to a call. */
1186 env->call->flags.bits.try_omit_fp = 0;
1188 curr_sp = adjust_call(env, irn, curr_sp);
1191 if (get_Alloc_where(irn) == stack_alloc)
1192 curr_sp = adjust_alloc(env, irn, curr_sp);
1195 if (get_Free_where(irn) == stack_alloc)
1196 curr_sp = adjust_free(env, irn, curr_sp);
1199 panic("invalid call");
1204 obstack_free(&env->obst, nodes);
1206 /* Keep the last stack state in the block by tying it to Keep node,
1207 * the proj from calls is already kept */
1208 if (curr_sp != env->init_sp &&
1209 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1211 keep = be_new_Keep(bl, 1, nodes);
1212 pmap_insert(env->keep_map, bl, keep);
1216 set_irn_link(bl, curr_sp);
1217 } /* process_ops_in_block */
1220 * Adjust all call nodes in the graph to the ABI conventions.
1222 static void process_calls(be_abi_irg_t *env)
1224 ir_graph *irg = env->birg->irg;
1226 env->call->flags.bits.irg_is_leaf = 1;
1227 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1229 ir_heights = heights_new(env->birg->irg);
1230 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1231 heights_free(ir_heights);
1235 * Computes the stack argument layout type.
1236 * Changes a possibly allocated value param type by moving
1237 * entities to the stack layout type.
1239 * @param env the ABI environment
1240 * @param call the current call ABI
1241 * @param method_type the method type
1242 * @param val_param_tp the value parameter type, will be destroyed
1243 * @param param_map an array mapping method arguments to the stack layout type
1245 * @return the stack argument layout type
1247 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1248 ir_type *method_type, ir_type *val_param_tp,
1249 ir_entity ***param_map)
1251 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1252 int inc = env->birg->main_env->arch_env->stack_dir * dir;
1253 int n = get_method_n_params(method_type);
1254 int curr = inc > 0 ? 0 : n - 1;
1260 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1263 *param_map = map = OALLOCN(&env->obst, ir_entity*, n);
1264 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1265 for (i = 0; i < n; ++i, curr += inc) {
1266 ir_type *param_type = get_method_param_type(method_type, curr);
1267 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1270 if (arg->on_stack) {
1271 if (val_param_tp != NULL) {
1272 /* the entity was already created, create a copy in the param type */
1273 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1274 arg->stack_ent = copy_entity_own(val_ent, res);
1275 set_entity_link(val_ent, arg->stack_ent);
1276 set_entity_link(arg->stack_ent, NULL);
1277 /* must be automatic to set a fixed layout */
1278 set_entity_allocation(arg->stack_ent, allocation_automatic);
1280 /* create a new entity */
1281 snprintf(buf, sizeof(buf), "param_%d", i);
1282 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1284 ofs += arg->space_before;
1285 ofs = round_up2(ofs, arg->alignment);
1286 set_entity_offset(arg->stack_ent, ofs);
1287 ofs += arg->space_after;
1288 ofs += get_type_size_bytes(param_type);
1289 map[i] = arg->stack_ent;
1292 set_type_size_bytes(res, ofs);
1293 set_type_state(res, layout_fixed);
1298 const arch_register_t *reg;
1302 static int cmp_regs(const void *a, const void *b)
1304 const reg_node_map_t *p = a;
1305 const reg_node_map_t *q = b;
1307 if (p->reg->reg_class == q->reg->reg_class)
1308 return p->reg->index - q->reg->index;
1310 return p->reg->reg_class - q->reg->reg_class;
1313 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1316 int n = pmap_count(reg_map);
1318 reg_node_map_t *res = OALLOCN(obst, reg_node_map_t, n);
1320 foreach_pmap(reg_map, ent) {
1321 res[i].reg = ent->key;
1322 res[i].irn = ent->value;
1326 qsort(res, n, sizeof(res[0]), cmp_regs);
1331 * Creates a barrier.
1333 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1335 int n_regs = pmap_count(regs);
1341 rm = reg_map_to_arr(&env->obst, regs);
1343 for (n = 0; n < n_regs; ++n)
1344 obstack_ptr_grow(&env->obst, rm[n].irn);
1347 obstack_ptr_grow(&env->obst, *mem);
1351 in = (ir_node **) obstack_finish(&env->obst);
1352 irn = be_new_Barrier(bl, n, in);
1353 obstack_free(&env->obst, in);
1355 for (n = 0; n < n_regs; ++n) {
1356 ir_node *pred = rm[n].irn;
1357 const arch_register_t *reg = rm[n].reg;
1358 arch_register_type_t add_type = 0;
1360 const backend_info_t *info;
1362 /* stupid workaround for now... as not all nodes report register
1364 info = be_get_info(skip_Proj(pred));
1365 if (info != NULL && info->out_infos != NULL) {
1366 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1367 if (ireq->type & arch_register_req_type_ignore)
1368 add_type |= arch_register_req_type_ignore;
1369 if (ireq->type & arch_register_req_type_produces_sp)
1370 add_type |= arch_register_req_type_produces_sp;
1373 proj = new_r_Proj(bl, irn, get_irn_mode(pred), n);
1374 be_node_set_reg_class_in(irn, n, reg->reg_class);
1376 be_set_constr_single_reg_in(irn, n, reg, 0);
1377 be_set_constr_single_reg_out(irn, n, reg, add_type);
1378 arch_set_irn_register(proj, reg);
1380 pmap_insert(regs, (void *) reg, proj);
1384 *mem = new_r_Proj(bl, irn, mode_M, n);
1387 obstack_free(&env->obst, rm);
1392 * Creates a be_Return for a Return node.
1394 * @param @env the abi environment
1395 * @param irn the Return node or NULL if there was none
1396 * @param bl the block where the be_Retun should be placed
1397 * @param mem the current memory
1398 * @param n_res number of return results
1400 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1401 ir_node *mem, int n_res)
1403 be_abi_call_t *call = env->call;
1404 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1406 pmap *reg_map = pmap_create();
1407 ir_node *keep = pmap_get(env->keep_map, bl);
1414 const arch_register_t **regs;
1418 get the valid stack node in this block.
1419 If we had a call in that block there is a Keep constructed by process_calls()
1420 which points to the last stack modification in that block. we'll use
1421 it then. Else we use the stack from the start block and let
1422 the ssa construction fix the usage.
1424 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1426 stack = get_irn_n(keep, 0);
1428 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1431 /* Insert results for Return into the register map. */
1432 for (i = 0; i < n_res; ++i) {
1433 ir_node *res = get_Return_res(irn, i);
1434 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1435 assert(arg->in_reg && "return value must be passed in register");
1436 pmap_insert(reg_map, (void *) arg->reg, res);
1439 /* Add uses of the callee save registers. */
1440 foreach_pmap(env->regs, ent) {
1441 const arch_register_t *reg = ent->key;
1442 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1443 pmap_insert(reg_map, ent->key, ent->value);
1446 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1448 /* Make the Epilogue node and call the arch's epilogue maker. */
1449 create_barrier(env, bl, &mem, reg_map, 1);
1450 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1453 Maximum size of the in array for Return nodes is
1454 return args + callee save/ignore registers + memory + stack pointer
1456 in_max = pmap_count(reg_map) + n_res + 2;
1458 in = OALLOCN(&env->obst, ir_node*, in_max);
1459 regs = OALLOCN(&env->obst, arch_register_t const*, in_max);
1462 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1464 regs[1] = arch_env->sp;
1467 /* clear SP entry, since it has already been grown. */
1468 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1469 for (i = 0; i < n_res; ++i) {
1470 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1472 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1473 regs[n++] = arg->reg;
1475 /* Clear the map entry to mark the register as processed. */
1476 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1479 /* grow the rest of the stuff. */
1480 foreach_pmap(reg_map, ent) {
1483 regs[n++] = ent->key;
1487 /* The in array for the new back end return is now ready. */
1489 dbgi = get_irn_dbg_info(irn);
1493 /* we have to pop the shadow parameter in in case of struct returns */
1495 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1497 /* Set the register classes of the return's parameter accordingly. */
1498 for (i = 0; i < n; ++i) {
1499 if (regs[i] == NULL)
1502 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1505 /* Free the space of the Epilog's in array and the register <-> proj map. */
1506 obstack_free(&env->obst, in);
1507 pmap_destroy(reg_map);
1512 typedef struct ent_pos_pair ent_pos_pair;
1513 struct ent_pos_pair {
1514 ir_entity *ent; /**< a value param entity */
1515 int pos; /**< its parameter number */
1516 ent_pos_pair *next; /**< for linking */
1519 typedef struct lower_frame_sels_env_t {
1520 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1521 ir_node *frame; /**< the current frame */
1522 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1523 const arch_register_class_t *link_class; /**< register class of the link pointer */
1524 ir_type *value_tp; /**< the value type if any */
1525 ir_type *frame_tp; /**< the frame type */
1526 int static_link_pos; /**< argument number of the hidden static link */
1527 } lower_frame_sels_env_t;
1530 * Return an entity from the backend for an value param entity.
1532 * @param ent an value param type entity
1533 * @param ctx context
1535 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1537 ir_entity *argument_ent = get_entity_link(ent);
1539 if (argument_ent == NULL) {
1540 /* we have NO argument entity yet: This is bad, as we will
1541 * need one for backing store.
1544 ir_type *frame_tp = ctx->frame_tp;
1545 unsigned offset = get_type_size_bytes(frame_tp);
1546 ir_type *tp = get_entity_type(ent);
1547 unsigned align = get_type_alignment_bytes(tp);
1549 offset += align - 1;
1550 offset &= ~(align - 1);
1552 argument_ent = copy_entity_own(ent, frame_tp);
1554 /* must be automatic to set a fixed layout */
1555 set_entity_allocation(argument_ent, allocation_automatic);
1556 set_entity_offset(argument_ent, offset);
1557 offset += get_type_size_bytes(tp);
1559 set_type_size_bytes(frame_tp, offset);
1560 set_entity_link(ent, argument_ent);
1562 return argument_ent;
1565 * Walker: Replaces Sels of frame type and
1566 * value param type entities by FrameAddress.
1567 * Links all used entities.
1569 static void lower_frame_sels_walker(ir_node *irn, void *data)
1571 lower_frame_sels_env_t *ctx = data;
1574 ir_node *ptr = get_Sel_ptr(irn);
1576 if (ptr == ctx->frame) {
1577 ir_entity *ent = get_Sel_entity(irn);
1578 ir_node *bl = get_nodes_block(irn);
1581 int is_value_param = 0;
1583 if (get_entity_owner(ent) == ctx->value_tp) {
1586 /* replace by its copy from the argument type */
1587 pos = get_struct_member_index(ctx->value_tp, ent);
1588 ent = get_argument_entity(ent, ctx);
1591 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1594 /* check, if it's a param Sel and if have not seen this entity before */
1595 if (is_value_param && get_entity_link(ent) == NULL) {
1601 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1603 set_entity_link(ent, ctx->value_param_list);
1610 * Check if a value parameter is transmitted as a register.
1611 * This might happen if the address of an parameter is taken which is
1612 * transmitted in registers.
1614 * Note that on some architectures this case must be handled specially
1615 * because the place of the backing store is determined by their ABI.
1617 * In the default case we move the entity to the frame type and create
1618 * a backing store into the first block.
1620 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1622 be_abi_call_t *call = env->call;
1623 ir_graph *irg = env->birg->irg;
1624 ent_pos_pair *entry, *new_list;
1626 int i, n = ARR_LEN(value_param_list);
1627 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1630 for (i = 0; i < n; ++i) {
1631 int pos = value_param_list[i].pos;
1632 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
1635 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1636 value_param_list[i].next = new_list;
1637 new_list = &value_param_list[i];
1640 if (new_list != NULL) {
1641 /* ok, change the graph */
1642 ir_node *start_bl = get_irg_start_block(irg);
1643 ir_node *first_bl = NULL;
1644 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1645 const ir_edge_t *edge;
1646 optimization_state_t state;
1649 foreach_block_succ(start_bl, edge) {
1650 first_bl = get_edge_src_irn(edge);
1653 assert(first_bl && first_bl != start_bl);
1654 /* we had already removed critical edges, so the following
1655 assertion should be always true. */
1656 assert(get_Block_n_cfgpreds(first_bl) == 1);
1658 /* now create backing stores */
1659 frame = get_irg_frame(irg);
1660 imem = get_irg_initial_mem(irg);
1662 save_optimization_state(&state);
1664 nmem = new_r_Proj(start_bl, get_irg_start(irg), mode_M, pn_Start_M);
1665 restore_optimization_state(&state);
1667 /* reroute all edges to the new memory source */
1668 edges_reroute(imem, nmem, irg);
1672 args = get_irg_args(irg);
1673 args_bl = get_nodes_block(args);
1674 for (entry = new_list; entry != NULL; entry = entry->next) {
1676 ir_type *tp = get_entity_type(entry->ent);
1677 ir_mode *mode = get_type_mode(tp);
1680 /* address for the backing store */
1681 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1684 mem = new_r_Proj(first_bl, store, mode_M, pn_Store_M);
1686 /* the backing store itself */
1687 store = new_r_Store(first_bl, mem, addr,
1688 new_r_Proj(args_bl, args, mode, i), 0);
1690 /* the new memory Proj gets the last Proj from store */
1691 set_Proj_pred(nmem, store);
1692 set_Proj_proj(nmem, pn_Store_M);
1694 /* move all entities to the frame type */
1695 frame_tp = get_irg_frame_type(irg);
1696 offset = get_type_size_bytes(frame_tp);
1698 /* we will add new entities: set the layout to undefined */
1699 assert(get_type_state(frame_tp) == layout_fixed);
1700 set_type_state(frame_tp, layout_undefined);
1701 for (entry = new_list; entry != NULL; entry = entry->next) {
1702 ir_entity *ent = entry->ent;
1704 /* If the entity is still on the argument type, move it to the frame type.
1705 This happens if the value_param type was build due to compound
1707 if (get_entity_owner(ent) != frame_tp) {
1708 ir_type *tp = get_entity_type(ent);
1709 unsigned align = get_type_alignment_bytes(tp);
1711 offset += align - 1;
1712 offset &= ~(align - 1);
1713 set_entity_owner(ent, frame_tp);
1714 add_class_member(frame_tp, ent);
1715 /* must be automatic to set a fixed layout */
1716 set_entity_allocation(ent, allocation_automatic);
1717 set_entity_offset(ent, offset);
1718 offset += get_type_size_bytes(tp);
1721 set_type_size_bytes(frame_tp, offset);
1722 /* fix the layout again */
1723 set_type_state(frame_tp, layout_fixed);
1728 * The start block has no jump, instead it has an initial exec Proj.
1729 * The backend wants to handle all blocks the same way, so we replace
1730 * the out cfg edge with a real jump.
1732 static void fix_start_block(ir_graph *irg)
1734 ir_node *initial_X = get_irg_initial_exec(irg);
1735 ir_node *start_block = get_irg_start_block(irg);
1736 const ir_edge_t *edge;
1738 assert(is_Proj(initial_X));
1740 foreach_out_edge(initial_X, edge) {
1741 ir_node *block = get_edge_src_irn(edge);
1743 if (is_Anchor(block))
1745 if (block != start_block) {
1746 ir_node *jmp = new_r_Jmp(start_block);
1747 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1748 set_irg_initial_exec(irg, jmp);
1752 panic("Initial exec has no follow block in %+F", irg);
1756 * Update the entity of Sels to the outer value parameters.
1758 static void update_outer_frame_sels(ir_node *irn, void *env) {
1759 lower_frame_sels_env_t *ctx = env;
1766 ptr = get_Sel_ptr(irn);
1767 if (! is_arg_Proj(ptr))
1769 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1771 ent = get_Sel_entity(irn);
1773 if (get_entity_owner(ent) == ctx->value_tp) {
1774 /* replace by its copy from the argument type */
1775 pos = get_struct_member_index(ctx->value_tp, ent);
1776 ent = get_argument_entity(ent, ctx);
1777 set_Sel_entity(irn, ent);
1779 /* check, if we have not seen this entity before */
1780 if (get_entity_link(ent) == NULL) {
1786 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1788 set_entity_link(ent, ctx->value_param_list);
1794 * Fix access to outer local variables.
1796 static void fix_outer_variable_access(be_abi_irg_t *env,
1797 lower_frame_sels_env_t *ctx)
1803 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1804 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1806 if (! is_method_entity(ent))
1808 if (get_entity_peculiarity(ent) == peculiarity_description)
1812 * FIXME: find the number of the static link parameter
1813 * for now we assume 0 here
1815 ctx->static_link_pos = 0;
1817 irg = get_entity_irg(ent);
1818 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1823 * Modify the irg itself and the frame type.
1825 static void modify_irg(be_abi_irg_t *env)
1827 be_abi_call_t *call = env->call;
1828 const arch_env_t *arch_env= env->birg->main_env->arch_env;
1829 const arch_register_t *sp = arch_env->sp;
1830 ir_graph *irg = env->birg->irg;
1833 ir_node *new_mem_proj;
1835 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1840 unsigned frame_size;
1843 const arch_register_t *fp_reg;
1844 ir_node *frame_pointer;
1848 const ir_edge_t *edge;
1849 ir_type *arg_type, *bet_type, *tp;
1850 lower_frame_sels_env_t ctx;
1851 ir_entity **param_map;
1853 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1855 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1857 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1858 * memory, which leads to loops in the DAG. */
1859 old_mem = get_irg_initial_mem(irg);
1861 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1863 /* set the links of all frame entities to NULL, we use it
1864 to detect if an entity is already linked in the value_param_list */
1865 tp = get_method_value_param_type(method_type);
1868 /* clear the links of the clone type, let the
1869 original entities point to its clones */
1870 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1871 ir_entity *mem = get_struct_member(tp, i);
1872 set_entity_link(mem, NULL);
1876 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1878 /* Convert the Sel nodes in the irg to frame addr nodes: */
1879 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1880 ctx.frame = get_irg_frame(irg);
1881 ctx.sp_class = env->arch_env->sp->reg_class;
1882 ctx.link_class = env->arch_env->link_class;
1883 ctx.frame_tp = get_irg_frame_type(irg);
1885 /* we will possible add new entities to the frame: set the layout to undefined */
1886 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1887 set_type_state(ctx.frame_tp, layout_undefined);
1889 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1891 /* fix the frame type layout again */
1892 set_type_state(ctx.frame_tp, layout_fixed);
1893 /* align stackframe to 4 byte */
1894 frame_size = get_type_size_bytes(ctx.frame_tp);
1895 if (frame_size % 4 != 0) {
1896 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1899 env->regs = pmap_create();
1901 n_params = get_method_n_params(method_type);
1902 args = OALLOCNZ(&env->obst, ir_node*, n_params);
1905 * for inner function we must now fix access to outer frame entities.
1907 fix_outer_variable_access(env, &ctx);
1909 /* Check if a value parameter is transmitted as a register.
1910 * This might happen if the address of an parameter is taken which is
1911 * transmitted in registers.
1913 * Note that on some architectures this case must be handled specially
1914 * because the place of the backing store is determined by their ABI.
1916 * In the default case we move the entity to the frame type and create
1917 * a backing store into the first block.
1919 fix_address_of_parameter_access(env, ctx.value_param_list);
1921 DEL_ARR_F(ctx.value_param_list);
1922 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1924 /* Fill the argument vector */
1925 arg_tuple = get_irg_args(irg);
1926 foreach_out_edge(arg_tuple, edge) {
1927 ir_node *irn = get_edge_src_irn(edge);
1928 if (! is_Anchor(irn)) {
1929 int nr = get_Proj_proj(irn);
1931 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1935 bet_type = call->cb->get_between_type(env->cb);
1936 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1938 /* Count the register params and add them to the number of Projs for the RegParams node */
1939 for (i = 0; i < n_params; ++i) {
1940 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1941 if (arg->in_reg && args[i]) {
1942 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1943 assert(i == get_Proj_proj(args[i]));
1945 /* For now, associate the register with the old Proj from Start representing that argument. */
1946 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1947 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1951 /* Collect all callee-save registers */
1952 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1953 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1954 for (j = 0; j < cls->n_regs; ++j) {
1955 const arch_register_t *reg = &cls->regs[j];
1956 if (arch_register_type_is(reg, callee_save) ||
1957 arch_register_type_is(reg, state)) {
1958 pmap_insert(env->regs, (void *) reg, NULL);
1963 /* handle start block here (place a jump in the block) */
1964 fix_start_block(irg);
1966 pmap_insert(env->regs, (void *) sp, NULL);
1967 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1968 start_bl = get_irg_start_block(irg);
1969 env->start = be_new_Start(start_bl, pmap_count(env->regs) + 1);
1972 * make proj nodes for the callee save registers.
1973 * memorize them, since Return nodes get those as inputs.
1975 * Note, that if a register corresponds to an argument, the regs map contains
1976 * the old Proj from start for that argument.
1979 rm = reg_map_to_arr(&env->obst, env->regs);
1980 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1981 arch_register_t *reg = (void *) rm[i].reg;
1982 ir_mode *mode = reg->reg_class->mode;
1984 arch_register_req_type_t add_type = 0;
1988 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1991 proj = new_r_Proj(start_bl, env->start, mode, nr + 1);
1992 pmap_insert(env->regs, (void *) reg, proj);
1993 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1994 arch_set_irn_register(proj, reg);
1996 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1998 obstack_free(&env->obst, rm);
2000 /* create a new initial memory proj */
2001 assert(is_Proj(old_mem));
2002 arch_set_out_register_req(env->start, 0, arch_no_register_req);
2003 new_mem_proj = new_r_Proj(start_bl, env->start, mode_M, 0);
2005 set_irg_initial_mem(irg, mem);
2007 /* Generate the Prologue */
2008 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
2010 /* do the stack allocation BEFORE the barrier, or spill code
2011 might be added before it */
2012 env->init_sp = be_abi_reg_map_get(env->regs, sp);
2013 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
2014 be_abi_reg_map_set(env->regs, sp, env->init_sp);
2016 create_barrier(env, start_bl, &mem, env->regs, 0);
2018 env->init_sp = be_abi_reg_map_get(env->regs, sp);
2019 arch_set_irn_register(env->init_sp, sp);
2021 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
2022 set_irg_frame(irg, frame_pointer);
2023 pset_insert_ptr(env->ignore_regs, fp_reg);
2025 /* rewire old mem users to new mem */
2026 exchange(old_mem, mem);
2028 set_irg_initial_mem(irg, mem);
2030 /* Now, introduce stack param nodes for all parameters passed on the stack */
2031 for (i = 0; i < n_params; ++i) {
2032 ir_node *arg_proj = args[i];
2033 ir_node *repl = NULL;
2035 if (arg_proj != NULL) {
2036 be_abi_call_arg_t *arg;
2037 ir_type *param_type;
2038 int nr = get_Proj_proj(arg_proj);
2041 nr = MIN(nr, n_params);
2042 arg = get_call_arg(call, 0, nr);
2043 param_type = get_method_param_type(method_type, nr);
2046 repl = pmap_get(env->regs, (void *) arg->reg);
2047 } else if (arg->on_stack) {
2048 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2050 /* For atomic parameters which are actually used, we create a Load node. */
2051 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2052 ir_mode *mode = get_type_mode(param_type);
2053 ir_mode *load_mode = arg->load_mode;
2055 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2056 repl = new_r_Proj(start_bl, load, load_mode, pn_Load_res);
2058 if (mode != load_mode) {
2059 repl = new_r_Conv(start_bl, repl, mode);
2062 /* The stack parameter is not primitive (it is a struct or array),
2063 * we thus will create a node representing the parameter's address
2069 assert(repl != NULL);
2071 /* Beware: the mode of the register parameters is always the mode of the register class
2072 which may be wrong. Add Conv's then. */
2073 mode = get_irn_mode(args[i]);
2074 if (mode != get_irn_mode(repl)) {
2075 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2077 exchange(args[i], repl);
2081 /* the arg proj is not needed anymore now and should be only used by the anchor */
2082 assert(get_irn_n_edges(arg_tuple) == 1);
2083 kill_node(arg_tuple);
2084 set_irg_args(irg, new_r_Bad(irg));
2086 /* All Return nodes hang on the End node, so look for them there. */
2087 end = get_irg_end_block(irg);
2088 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2089 ir_node *irn = get_Block_cfgpred(end, i);
2091 if (is_Return(irn)) {
2092 ir_node *blk = get_nodes_block(irn);
2093 ir_node *mem = get_Return_mem(irn);
2094 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2098 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2099 the code is dead and will never be executed. */
2101 obstack_free(&env->obst, args);
2104 /** Fix the state inputs of calls that still hang on unknowns */
2106 void fix_call_state_inputs(be_abi_irg_t *env)
2108 const arch_env_t *arch_env = env->arch_env;
2110 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2112 /* Collect caller save registers */
2113 n = arch_env_get_n_reg_class(arch_env);
2114 for (i = 0; i < n; ++i) {
2116 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2117 for (j = 0; j < cls->n_regs; ++j) {
2118 const arch_register_t *reg = arch_register_for_index(cls, j);
2119 if (arch_register_type_is(reg, state)) {
2120 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2125 n = ARR_LEN(env->calls);
2126 n_states = ARR_LEN(stateregs);
2127 for (i = 0; i < n; ++i) {
2129 ir_node *call = env->calls[i];
2131 arity = get_irn_arity(call);
2133 /* the state reg inputs are the last n inputs of the calls */
2134 for (s = 0; s < n_states; ++s) {
2135 int inp = arity - n_states + s;
2136 const arch_register_t *reg = stateregs[s];
2137 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2139 set_irn_n(call, inp, regnode);
2143 DEL_ARR_F(stateregs);
2147 * Create a trampoline entity for the given method.
2149 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2151 ir_type *type = get_entity_type(method);
2152 ident *old_id = get_entity_ld_ident(method);
2153 ident *id = id_mangle3("L", old_id, "$stub");
2154 ir_type *parent = be->pic_trampolines_type;
2155 ir_entity *ent = new_entity(parent, old_id, type);
2156 set_entity_ld_ident(ent, id);
2157 set_entity_visibility(ent, visibility_local);
2158 set_entity_variability(ent, variability_uninitialized);
2164 * Returns the trampoline entity for the given method.
2166 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2168 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2169 if (result == NULL) {
2170 result = create_trampoline(env, method);
2171 pmap_insert(env->ent_trampoline_map, method, result);
2177 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2179 ident *old_id = get_entity_ld_ident(entity);
2180 ident *id = id_mangle3("L", old_id, "$non_lazy_ptr");
2181 ir_type *e_type = get_entity_type(entity);
2182 ir_type *type = new_type_pointer(id, e_type, mode_P_data);
2183 ir_type *parent = be->pic_symbols_type;
2184 ir_entity *ent = new_entity(parent, old_id, type);
2185 set_entity_ld_ident(ent, id);
2186 set_entity_visibility(ent, visibility_local);
2187 set_entity_variability(ent, variability_uninitialized);
2192 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2194 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2195 if (result == NULL) {
2196 result = create_pic_symbol(env, entity);
2197 pmap_insert(env->ent_pic_symbol_map, entity, result);
2206 * Returns non-zero if a given entity can be accessed using a relative address.
2208 static int can_address_relative(ir_entity *entity)
2210 return get_entity_visibility(entity) != visibility_external_allocated;
2213 /** patches SymConsts to work in position independent code */
2214 static void fix_pic_symconsts(ir_node *node, void *data)
2224 be_abi_irg_t *env = data;
2226 be_main_env_t *be = env->birg->main_env;
2228 arity = get_irn_arity(node);
2229 for (i = 0; i < arity; ++i) {
2231 ir_node *pred = get_irn_n(node, i);
2233 ir_entity *pic_symbol;
2234 ir_node *pic_symconst;
2236 if (!is_SymConst(pred))
2239 entity = get_SymConst_entity(pred);
2240 block = get_nodes_block(pred);
2241 irg = get_irn_irg(pred);
2243 /* calls can jump to relative addresses, so we can directly jump to
2244 the (relatively) known call address or the trampoline */
2245 if (i == 1 && is_Call(node)) {
2246 ir_entity *trampoline;
2247 ir_node *trampoline_const;
2249 if (can_address_relative(entity))
2252 dbgi = get_irn_dbg_info(pred);
2253 trampoline = get_trampoline(be, entity);
2254 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2256 set_irn_n(node, i, trampoline_const);
2260 /* everything else is accessed relative to EIP */
2261 mode = get_irn_mode(pred);
2262 unknown = new_r_Unknown(irg, mode);
2263 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2265 /* all ok now for locally constructed stuff */
2266 if (can_address_relative(entity)) {
2267 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2269 /* make sure the walker doesn't visit this add again */
2270 mark_irn_visited(add);
2271 set_irn_n(node, i, add);
2275 /* get entry from pic symbol segment */
2276 dbgi = get_irn_dbg_info(pred);
2277 pic_symbol = get_pic_symbol(be, entity);
2278 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2280 add = new_r_Add(block, pic_base, pic_symconst, mode);
2281 mark_irn_visited(add);
2283 /* we need an extra indirection for global data outside our current
2284 module. The loads are always safe and can therefore float
2285 and need no memory input */
2286 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2287 load_res = new_r_Proj(block, load, mode, pn_Load_res);
2289 set_irn_n(node, i, load_res);
2293 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2295 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2296 ir_node *old_frame = get_irg_frame(birg->irg);
2297 ir_graph *irg = birg->irg;
2301 optimization_state_t state;
2302 unsigned *limited_bitset;
2303 arch_register_req_t *sp_req;
2305 be_omit_fp = birg->main_env->options->omit_fp;
2306 be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
2308 obstack_init(&env->obst);
2310 env->arch_env = birg->main_env->arch_env;
2311 env->method_type = get_entity_type(get_irg_entity(irg));
2312 env->call = be_abi_call_new(env->arch_env->sp->reg_class);
2313 arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
2315 env->ignore_regs = pset_new_ptr_default();
2316 env->keep_map = pmap_create();
2317 env->dce_survivor = new_survive_dce();
2320 sp_req = OALLOCZ(&env->obst, arch_register_req_t);
2321 env->sp_req = sp_req;
2323 sp_req->type = arch_register_req_type_limited
2324 | arch_register_req_type_produces_sp;
2325 sp_req->cls = arch_register_get_class(env->arch_env->sp);
2327 limited_bitset = rbitset_obstack_alloc(&env->obst, sp_req->cls->n_regs);
2328 rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
2329 sp_req->limited = limited_bitset;
2330 if (env->arch_env->sp->type & arch_register_type_ignore) {
2331 sp_req->type |= arch_register_req_type_ignore;
2334 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
2335 to another Unknown or the stack pointer gets used */
2336 save_optimization_state(&state);
2338 env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
2339 restore_optimization_state(&state);
2341 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
2343 env->calls = NEW_ARR_F(ir_node*, 0);
2345 if (birg->main_env->options->pic) {
2346 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2349 /* Lower all call nodes in the IRG. */
2353 Beware: init backend abi call object after processing calls,
2354 otherwise some information might be not yet available.
2356 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2358 /* Process the IRG */
2361 /* fix call inputs for state registers */
2362 fix_call_state_inputs(env);
2364 /* We don't need the keep map anymore. */
2365 pmap_destroy(env->keep_map);
2366 env->keep_map = NULL;
2368 /* calls array is not needed anymore */
2369 DEL_ARR_F(env->calls);
2372 /* reroute the stack origin of the calls to the true stack origin. */
2373 exchange(dummy, env->init_sp);
2374 exchange(old_frame, get_irg_frame(irg));
2376 /* Make some important node pointers survive the dead node elimination. */
2377 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2378 foreach_pmap(env->regs, ent) {
2379 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2382 env->call->cb->done(env->cb);
2387 void be_abi_free(be_abi_irg_t *env)
2389 be_abi_call_free(env->call);
2390 free_survive_dce(env->dce_survivor);
2391 del_pset(env->ignore_regs);
2392 pmap_destroy(env->regs);
2393 obstack_free(&env->obst, NULL);
2397 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2399 arch_register_t *reg;
2401 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2402 if (reg->reg_class == cls)
2403 bitset_set(bs, reg->index);
2406 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2409 arch_register_t *reg;
2411 for (i = 0; i < cls->n_regs; ++i) {
2412 if (arch_register_type_is(&cls->regs[i], ignore))
2415 rbitset_set(raw_bitset, i);
2418 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2419 reg = pset_next(abi->ignore_regs)) {
2420 if (reg->reg_class != cls)
2423 rbitset_clear(raw_bitset, reg->index);
2427 /* Returns the stack layout from a abi environment. */
2428 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2436 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2437 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2438 | _| | |> < ___) | || (_| | (__| <
2439 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2443 typedef ir_node **node_array;
2445 typedef struct fix_stack_walker_env_t {
2446 node_array sp_nodes;
2447 } fix_stack_walker_env_t;
2450 * Walker. Collect all stack modifying nodes.
2452 static void collect_stack_nodes_walker(ir_node *node, void *data)
2454 ir_node *insn = node;
2455 fix_stack_walker_env_t *env = data;
2456 const arch_register_req_t *req;
2458 if (is_Proj(node)) {
2459 insn = get_Proj_pred(node);
2462 if (arch_irn_get_n_outs(insn) == 0)
2465 req = arch_get_register_req_out(node);
2466 if (! (req->type & arch_register_req_type_produces_sp))
2469 ARR_APP1(ir_node*, env->sp_nodes, node);
2472 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2474 be_ssa_construction_env_t senv;
2477 be_irg_t *birg = env->birg;
2478 be_lv_t *lv = be_get_birg_liveness(birg);
2479 fix_stack_walker_env_t walker_env;
2481 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2483 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2485 /* nothing to be done if we didn't find any node, in fact we mustn't
2486 * continue, as for endless loops incsp might have had no users and is bad
2489 len = ARR_LEN(walker_env.sp_nodes);
2491 DEL_ARR_F(walker_env.sp_nodes);
2495 be_ssa_construction_init(&senv, birg);
2496 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2497 ARR_LEN(walker_env.sp_nodes));
2498 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2499 ARR_LEN(walker_env.sp_nodes));
2502 len = ARR_LEN(walker_env.sp_nodes);
2503 for (i = 0; i < len; ++i) {
2504 be_liveness_update(lv, walker_env.sp_nodes[i]);
2506 be_ssa_construction_update_liveness_phis(&senv, lv);
2509 phis = be_ssa_construction_get_new_phis(&senv);
2511 /* set register requirements for stack phis */
2512 len = ARR_LEN(phis);
2513 for (i = 0; i < len; ++i) {
2514 ir_node *phi = phis[i];
2515 be_set_phi_reg_req(phi, env->sp_req);
2516 arch_set_irn_register(phi, env->arch_env->sp);
2518 be_ssa_construction_destroy(&senv);
2520 DEL_ARR_F(walker_env.sp_nodes);
2524 * Fix all stack accessing operations in the block bl.
2526 * @param env the abi environment
2527 * @param bl the block to process
2528 * @param real_bias the bias value
2530 * @return the bias at the end of this block
2532 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2534 int omit_fp = env->call->flags.bits.try_omit_fp;
2536 int wanted_bias = real_bias;
2538 sched_foreach(bl, irn) {
2542 Check, if the node relates to an entity on the stack frame.
2543 If so, set the true offset (including the bias) for that
2546 ir_entity *ent = arch_get_frame_entity(irn);
2548 int bias = omit_fp ? real_bias : 0;
2549 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2550 arch_set_frame_offset(irn, offset);
2551 DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2552 ent, offset, bias));
2556 * If the node modifies the stack pointer by a constant offset,
2557 * record that in the bias.
2559 ofs = arch_get_sp_bias(irn);
2561 if (be_is_IncSP(irn)) {
2562 /* fill in real stack frame size */
2563 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2564 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2565 ofs = (int) get_type_size_bytes(frame_type);
2566 be_set_IncSP_offset(irn, ofs);
2567 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2568 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2569 ofs = - (int)get_type_size_bytes(frame_type);
2570 be_set_IncSP_offset(irn, ofs);
2572 if (be_get_IncSP_align(irn)) {
2573 /* patch IncSP to produce an aligned stack pointer */
2574 ir_type *between_type = env->frame.between_type;
2575 int between_size = get_type_size_bytes(between_type);
2576 int alignment = 1 << env->arch_env->stack_alignment;
2577 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2580 be_set_IncSP_offset(irn, ofs + alignment - delta);
2581 real_bias += alignment - delta;
2584 /* adjust so real_bias corresponds with wanted_bias */
2585 int delta = wanted_bias - real_bias;
2588 be_set_IncSP_offset(irn, ofs + delta);
2599 assert(real_bias == wanted_bias);
2604 * A helper struct for the bias walker.
2607 be_abi_irg_t *env; /**< The ABI irg environment. */
2608 int start_block_bias; /**< The bias at the end of the start block. */
2610 ir_node *start_block; /**< The start block of the current graph. */
2614 * Block-Walker: fix all stack offsets for all blocks
2615 * except the start block
2617 static void stack_bias_walker(ir_node *bl, void *data)
2619 struct bias_walk *bw = data;
2620 if (bl != bw->start_block) {
2621 process_stack_bias(bw->env, bl, bw->start_block_bias);
2626 * Walker: finally lower all Sels of outer frame or parameter
2629 static void lower_outer_frame_sels(ir_node *sel, void *ctx) {
2630 be_abi_irg_t *env = ctx;
2638 ent = get_Sel_entity(sel);
2639 owner = get_entity_owner(ent);
2640 ptr = get_Sel_ptr(sel);
2642 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2643 /* found access to outer frame or arguments */
2644 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2647 ir_node *bl = get_nodes_block(sel);
2648 dbg_info *dbgi = get_irn_dbg_info(sel);
2649 ir_mode *mode = get_irn_mode(sel);
2650 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2651 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2653 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2659 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2661 ir_graph *irg = env->birg->irg;
2664 struct bias_walk bw;
2666 stack_frame_compute_initial_offset(&env->frame);
2667 // stack_layout_dump(stdout, frame);
2669 /* Determine the stack bias at the end of the start block. */
2670 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2671 bw.between_size = get_type_size_bytes(env->frame.between_type);
2673 /* fix the bias is all other blocks */
2675 bw.start_block = get_irg_start_block(irg);
2676 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2678 /* fix now inner functions: these still have Sel node to outer
2679 frame and parameter entities */
2680 frame_tp = get_irg_frame_type(irg);
2681 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2682 ir_entity *ent = get_class_member(frame_tp, i);
2684 if (is_method_entity(ent) && get_entity_peculiarity(ent) != peculiarity_description) {
2685 ir_graph *irg = get_entity_irg(ent);
2687 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2692 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2694 assert(arch_register_type_is(reg, callee_save));
2695 assert(pmap_contains(abi->regs, (void *) reg));
2696 return pmap_get(abi->regs, (void *) reg);
2699 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2701 assert(arch_register_type_is(reg, ignore));
2702 assert(pmap_contains(abi->regs, (void *) reg));
2703 return pmap_get(abi->regs, (void *) reg);
2707 * Returns non-zero if the ABI has omitted the frame pointer in
2708 * the current graph.
2710 int be_abi_omit_fp(const be_abi_irg_t *abi)
2712 return abi->call->flags.bits.try_omit_fp;