2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
32 #include "irgraph_t.h"
35 #include "iredges_t.h"
38 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 typedef struct _be_abi_call_arg_t {
62 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
64 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
65 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct _be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
80 ir_type *between_type;
82 const arch_register_class_t *cls_addr; /**< register class of the call address */
86 * The ABI information for the current birg.
88 struct _be_abi_irg_t {
89 be_irg_t *birg; /**< The back end IRG. */
91 const arch_env_t *arch_env;
92 survive_dce_t *dce_survivor;
94 be_abi_call_t *call; /**< The ABI call information. */
95 ir_type *method_type; /**< The type of the method of the IRG. */
97 ir_node *init_sp; /**< The node representing the stack pointer
98 at the start of the function. */
100 ir_node *start; /**< The be_Start params node. */
101 pmap *regs; /**< A map of all callee-save and ignore regs to
102 their Projs to the RegParams node. */
104 int start_block_bias; /**< The stack bias at the end of the start block. */
106 void *cb; /**< ABI Callback self pointer. */
108 pmap *keep_map; /**< mapping blocks to keep nodes. */
109 pset *ignore_regs; /**< Additional registers which shall be ignored. */
111 ir_node **calls; /**< flexible array containing all be_Call nodes */
113 arch_register_req_t *sp_req;
115 be_stack_layout_t frame; /**< The stack frame model. */
118 static heights_t *ir_heights;
120 /** Flag: if set, try to omit the frame pointer in all routines. */
121 static int be_omit_fp = 1;
123 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
124 static int be_omit_leaf_fp = 1;
127 _ ____ ___ ____ _ _ _ _
128 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
129 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
130 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
131 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
133 These callbacks are used by the backend to set the parameters
134 for a specific call type.
138 * Set compare function: compares two ABI call object arguments.
140 static int cmp_call_arg(const void *a, const void *b, size_t n)
142 const be_abi_call_arg_t *p = a, *q = b;
144 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
148 * Get an ABI call object argument.
150 * @param call the abi call
151 * @param is_res true for call results, false for call arguments
152 * @param pos position of the argument
153 * @param callee context type - if we are callee or caller
155 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
157 be_abi_call_arg_t arg;
160 memset(&arg, 0, sizeof(arg));
165 hash = is_res * 128 + pos;
167 return set_find(call->params, &arg, sizeof(arg), hash);
171 * Set an ABI call object argument.
173 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
175 unsigned hash = arg->is_res * 128 + arg->pos;
176 if (context & ABI_CONTEXT_CALLEE) {
178 set_insert(call->params, arg, sizeof(*arg), hash);
180 if (context & ABI_CONTEXT_CALLER) {
182 set_insert(call->params, arg, sizeof(*arg), hash);
186 /* Set the flags for a call. */
187 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
193 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
194 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
200 /* Set register class for call address */
201 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
203 call->cls_addr = cls;
207 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
208 ir_mode *load_mode, unsigned alignment,
209 unsigned space_before, unsigned space_after,
210 be_abi_context_t context)
212 be_abi_call_arg_t arg;
213 memset(&arg, 0, sizeof(arg));
214 assert(alignment > 0 && "Alignment must be greater than 0");
216 arg.load_mode = load_mode;
217 arg.alignment = alignment;
218 arg.space_before = space_before;
219 arg.space_after = space_after;
223 remember_call_arg(&arg, call, context);
226 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
228 be_abi_call_arg_t arg;
229 memset(&arg, 0, sizeof(arg));
236 remember_call_arg(&arg, call, context);
239 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
241 be_abi_call_arg_t arg;
242 memset(&arg, 0, sizeof(arg));
249 remember_call_arg(&arg, call, context);
252 /* Get the flags of a ABI call object. */
253 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
259 * Constructor for a new ABI call object.
261 * @param cls_addr register class of the call address
263 * @return the new ABI call object
265 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
267 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
270 call->params = new_set(cmp_call_arg, 16);
272 call->cls_addr = cls_addr;
274 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
280 * Destructor for an ABI call object.
282 static void be_abi_call_free(be_abi_call_t *call)
284 del_set(call->params);
290 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
291 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
292 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
293 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
296 Handling of the stack frame. It is composed of three types:
297 1) The type of the arguments which are pushed on the stack.
298 2) The "between type" which consists of stuff the call of the
299 function pushes on the stack (like the return address and
300 the old base pointer for ia32).
301 3) The Firm frame type which consists of all local variables
305 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
308 ir_type *t = get_entity_owner(ent);
309 int ofs = get_entity_offset(ent);
313 /* Find the type the entity is contained in. */
314 for (index = 0; index < N_FRAME_TYPES; ++index) {
315 if (frame->order[index] == t)
317 /* Add the size of all the types below the one of the entity to the entity's offset */
318 ofs += get_type_size_bytes(frame->order[index]);
321 /* correct the offset by the initial position of the frame pointer */
322 ofs -= frame->initial_offset;
324 /* correct the offset with the current bias. */
331 * Retrieve the entity with given offset from a frame type.
333 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
337 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
338 ir_entity *ent = get_compound_member(t, i);
339 if (get_entity_offset(ent) == offset)
346 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
348 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
349 ir_entity *ent = search_ent_with_offset(base, 0);
352 frame->initial_offset
353 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
355 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
358 return frame->initial_offset;
362 * Initializes the frame layout from parts
364 * @param frame the stack layout that will be initialized
365 * @param args the stack argument layout type
366 * @param between the between layout type
367 * @param locals the method frame type
368 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
369 * @param param_map an array mapping method argument positions to the stack argument type
371 * @return the initialized stack layout
373 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
374 ir_type *between, ir_type *locals, int stack_dir,
375 ir_entity *param_map[])
377 frame->arg_type = args;
378 frame->between_type = between;
379 frame->frame_type = locals;
380 frame->initial_offset = 0;
381 frame->initial_bias = 0;
382 frame->stack_dir = stack_dir;
383 frame->order[1] = between;
384 frame->param_map = param_map;
387 frame->order[0] = args;
388 frame->order[2] = locals;
391 /* typical decreasing stack: locals have the
392 * lowest addresses, arguments the highest */
393 frame->order[0] = locals;
394 frame->order[2] = args;
406 Adjustment of the calls inside a graph.
411 * Transform a call node into a be_Call node.
413 * @param env The ABI environment for the current irg.
414 * @param irn The call node.
415 * @param curr_sp The stack pointer node to use.
416 * @return The stack pointer after the call.
418 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
420 ir_graph *irg = env->birg->irg;
421 const arch_env_t *arch_env = env->birg->main_env->arch_env;
422 ir_type *call_tp = get_Call_type(irn);
423 ir_node *call_ptr = get_Call_ptr(irn);
424 int n_params = get_method_n_params(call_tp);
425 ir_node *curr_mem = get_Call_mem(irn);
426 ir_node *bl = get_nodes_block(irn);
428 int stack_dir = arch_env->stack_dir;
429 const arch_register_t *sp = arch_env->sp;
430 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
431 ir_mode *mach_mode = sp->reg_class->mode;
432 struct obstack *obst = be_get_birg_obst(irg);
433 int no_alloc = call->flags.bits.frame_is_setup_on_call;
434 int n_res = get_method_n_ress(call_tp);
435 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
437 ir_node *res_proj = NULL;
438 int n_reg_params = 0;
439 int n_stack_params = 0;
442 pset_new_t destroyed_regs, states;
443 pset_new_iterator_t iter;
447 int n_reg_results = 0;
448 const arch_register_t *reg;
449 const ir_edge_t *edge;
451 int *stack_param_idx;
452 int i, n, destroy_all_regs;
455 pset_new_init(&destroyed_regs);
456 pset_new_init(&states);
458 /* Let the isa fill out the abi description for that call node. */
459 arch_env_get_call_abi(arch_env, call_tp, call);
461 /* Insert code to put the stack arguments on the stack. */
462 assert(get_Call_n_params(irn) == n_params);
463 assert(obstack_object_size(obst) == 0);
464 stack_param_idx = ALLOCAN(int, n_params);
465 for (i = 0; i < n_params; ++i) {
466 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
469 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
471 stack_size += round_up2(arg->space_before, arg->alignment);
472 stack_size += round_up2(arg_size, arg->alignment);
473 stack_size += round_up2(arg->space_after, arg->alignment);
475 stack_param_idx[n_stack_params++] = i;
479 /* Collect all arguments which are passed in registers. */
480 reg_param_idxs = ALLOCAN(int, n_params);
481 for (i = 0; i < n_params; ++i) {
482 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
483 if (arg && arg->in_reg) {
484 reg_param_idxs[n_reg_params++] = i;
489 * If the stack is decreasing and we do not want to store sequentially,
490 * or someone else allocated the call frame
491 * we allocate as much space on the stack all parameters need, by
492 * moving the stack pointer along the stack's direction.
494 * Note: we also have to do this for stack_size == 0, because we may have
495 * to adjust stack alignment for the call.
497 if (stack_dir < 0 && !do_seq && !no_alloc) {
498 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
501 dbgi = get_irn_dbg_info(irn);
502 /* If there are some parameters which shall be passed on the stack. */
503 if (n_stack_params > 0) {
505 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
509 * Reverse list of stack parameters if call arguments are from left to right.
510 * We must them reverse again if they are pushed (not stored) and the stack
511 * direction is downwards.
513 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
514 for (i = 0; i < n_stack_params >> 1; ++i) {
515 int other = n_stack_params - i - 1;
516 int tmp = stack_param_idx[i];
517 stack_param_idx[i] = stack_param_idx[other];
518 stack_param_idx[other] = tmp;
522 curr_mem = get_Call_mem(irn);
524 in[n_in++] = curr_mem;
527 for (i = 0; i < n_stack_params; ++i) {
528 int p = stack_param_idx[i];
529 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
530 ir_node *param = get_Call_param(irn, p);
531 ir_node *addr = curr_sp;
533 ir_type *param_type = get_method_param_type(call_tp, p);
534 int param_size = get_type_size_bytes(param_type) + arg->space_after;
537 * If we wanted to build the arguments sequentially,
538 * the stack pointer for the next must be incremented,
539 * and the memory value propagated.
543 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
544 param_size + arg->space_before, 0);
545 add_irn_dep(curr_sp, curr_mem);
547 curr_ofs += arg->space_before;
548 curr_ofs = round_up2(curr_ofs, arg->alignment);
550 /* Make the expression to compute the argument's offset. */
552 ir_mode *constmode = mach_mode;
553 if (mode_is_reference(mach_mode)) {
556 addr = new_r_Const_long(irg, constmode, curr_ofs);
557 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
561 /* Insert a store for primitive arguments. */
562 if (is_atomic_type(param_type)) {
564 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
565 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
566 mem = new_r_Proj(store, mode_M, pn_Store_M);
568 /* Make a mem copy for compound arguments. */
571 assert(mode_is_reference(get_irn_mode(param)));
572 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
573 mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular);
576 curr_ofs += param_size;
584 /* We need the sync only, if we didn't build the stores sequentially. */
586 if (n_stack_params >= 1) {
587 curr_mem = new_r_Sync(bl, n_in, in);
589 curr_mem = get_Call_mem(irn);
594 /* check for the return_twice property */
595 destroy_all_regs = 0;
596 if (is_SymConst_addr_ent(call_ptr)) {
597 ir_entity *ent = get_SymConst_entity(call_ptr);
599 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
600 destroy_all_regs = 1;
602 ir_type *call_tp = get_Call_type(irn);
604 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
605 destroy_all_regs = 1;
608 /* Put caller save into the destroyed set and state registers in the states set */
609 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
611 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
612 for (j = 0; j < cls->n_regs; ++j) {
613 const arch_register_t *reg = arch_register_for_index(cls, j);
615 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
616 if (! arch_register_type_is(reg, ignore))
617 pset_new_insert(&destroyed_regs, (void *) reg);
619 if (arch_register_type_is(reg, state)) {
620 pset_new_insert(&destroyed_regs, (void*) reg);
621 pset_new_insert(&states, (void*) reg);
626 if (destroy_all_regs) {
627 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
628 pset_new_remove(&destroyed_regs, arch_env->sp);
629 pset_new_remove(&destroyed_regs, arch_env->bp);
632 /* search the largest result proj number */
633 res_projs = ALLOCANZ(ir_node*, n_res);
635 foreach_out_edge(irn, edge) {
636 const ir_edge_t *res_edge;
637 ir_node *irn = get_edge_src_irn(edge);
639 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
642 foreach_out_edge(irn, res_edge) {
644 ir_node *res = get_edge_src_irn(res_edge);
646 assert(is_Proj(res));
648 proj = get_Proj_proj(res);
649 assert(proj < n_res);
650 assert(res_projs[proj] == NULL);
651 res_projs[proj] = res;
657 /** TODO: this is not correct for cases where return values are passed
658 * on the stack, but no known ABI does this currently...
660 n_reg_results = n_res;
662 assert(obstack_object_size(obst) == 0);
664 in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
666 /* make the back end call node and set its register requirements. */
667 for (i = 0; i < n_reg_params; ++i) {
668 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
671 /* add state registers ins */
672 foreach_pset_new(&states, reg, iter) {
673 const arch_register_class_t *cls = arch_register_get_class(reg);
675 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
676 ir_fprintf(stderr, "Adding %+F\n", regnode);
678 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
679 in[n_ins++] = regnode;
681 assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
683 /* ins collected, build the call */
684 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
686 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
687 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
688 n_ins, in, get_Call_type(irn));
689 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
692 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
693 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
694 n_ins, in, get_Call_type(irn));
696 be_Call_set_pop(low_call, call->pop);
698 /* put the call into the list of all calls for later processing */
699 ARR_APP1(ir_node *, env->calls, low_call);
701 /* create new stack pointer */
702 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
703 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
704 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
705 arch_set_irn_register(curr_sp, sp);
707 /* now handle results */
708 for (i = 0; i < n_res; ++i) {
710 ir_node *proj = res_projs[i];
711 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
713 /* returns values on stack not supported yet */
717 shift the proj number to the right, since we will drop the
718 unspeakable Proj_T from the Call. Therefore, all real argument
719 Proj numbers must be increased by pn_be_Call_first_res
721 pn = i + pn_be_Call_first_res;
724 ir_type *res_type = get_method_res_type(call_tp, i);
725 ir_mode *mode = get_type_mode(res_type);
726 proj = new_r_Proj(low_call, mode, pn);
729 set_Proj_pred(proj, low_call);
730 set_Proj_proj(proj, pn);
734 pset_new_remove(&destroyed_regs, arg->reg);
739 Set the register class of the call address to
740 the backend provided class (default: stack pointer class)
742 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
744 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
746 /* Set the register classes and constraints of the Call parameters. */
747 for (i = 0; i < n_reg_params; ++i) {
748 int index = reg_param_idxs[i];
749 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
750 assert(arg->reg != NULL);
752 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
756 /* Set the register constraints of the results. */
757 for (i = 0; i < n_res; ++i) {
758 ir_node *proj = res_projs[i];
759 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
760 int pn = get_Proj_proj(proj);
763 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
764 arch_set_irn_register(proj, arg->reg);
766 exchange(irn, low_call);
768 /* kill the ProjT node */
769 if (res_proj != NULL) {
773 /* Make additional projs for the caller save registers
774 and the Keep node which keeps them alive. */
776 const arch_register_t *reg;
780 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
781 pset_new_iterator_t iter;
784 n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
785 in = ALLOCAN(ir_node *, n_ins);
787 /* also keep the stack pointer */
788 set_irn_link(curr_sp, (void*) sp);
791 foreach_pset_new(&destroyed_regs, reg, iter) {
792 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
794 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
795 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
796 arch_set_irn_register(proj, reg);
798 set_irn_link(proj, (void*) reg);
803 for (i = 0; i < n_reg_results; ++i) {
804 ir_node *proj = res_projs[i];
805 const arch_register_t *reg = arch_get_irn_register(proj);
806 set_irn_link(proj, (void*) reg);
811 /* create the Keep for the caller save registers */
812 keep = be_new_Keep(bl, n, in);
813 for (i = 0; i < n; ++i) {
814 const arch_register_t *reg = get_irn_link(in[i]);
815 be_node_set_reg_class_in(keep, i, reg->reg_class);
819 /* Clean up the stack. */
820 assert(stack_size >= call->pop);
821 stack_size -= call->pop;
823 if (stack_size > 0) {
824 ir_node *mem_proj = NULL;
826 foreach_out_edge(low_call, edge) {
827 ir_node *irn = get_edge_src_irn(edge);
828 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
835 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
836 keep_alive(mem_proj);
839 /* Clean up the stack frame or revert alignment fixes if we allocated it */
841 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
844 be_abi_call_free(call);
846 pset_new_destroy(&states);
847 pset_new_destroy(&destroyed_regs);
853 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
855 * @param alignment the minimum stack alignment
856 * @param size the node containing the non-aligned size
857 * @param block the block where new nodes are allocated on
858 * @param dbg debug info for new nodes
860 * @return a node representing the aligned size
862 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
863 ir_node *block, dbg_info *dbg)
865 if (stack_alignment > 1) {
871 assert(is_po2(stack_alignment));
873 mode = get_irn_mode(size);
874 tv = new_tarval_from_long(stack_alignment-1, mode);
875 irg = get_Block_irg(block);
876 mask = new_r_Const(irg, tv);
877 size = new_rd_Add(dbg, block, size, mask, mode);
879 tv = new_tarval_from_long(-(long)stack_alignment, mode);
880 mask = new_r_Const(irg, tv);
881 size = new_rd_And(dbg, block, size, mask, mode);
887 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
889 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
898 const ir_edge_t *edge;
903 unsigned stack_alignment;
905 assert(get_Alloc_where(alloc) == stack_alloc);
907 block = get_nodes_block(alloc);
908 irg = get_Block_irg(block);
911 type = get_Alloc_type(alloc);
913 foreach_out_edge(alloc, edge) {
914 ir_node *irn = get_edge_src_irn(edge);
916 assert(is_Proj(irn));
917 switch (get_Proj_proj(irn)) {
929 /* Beware: currently Alloc nodes without a result might happen,
930 only escape analysis kills them and this phase runs only for object
931 oriented source. We kill the Alloc here. */
932 if (alloc_res == NULL && alloc_mem) {
933 exchange(alloc_mem, get_Alloc_mem(alloc));
937 dbg = get_irn_dbg_info(alloc);
938 count = get_Alloc_count(alloc);
940 /* we might need to multiply the count with the element size */
941 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
942 ir_mode *mode = get_irn_mode(count);
943 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
945 ir_node *cnst = new_rd_Const(dbg, irg, tv);
946 size = new_rd_Mul(dbg, block, count, cnst, mode);
951 /* The stack pointer will be modified in an unknown manner.
952 We cannot omit it. */
953 env->call->flags.bits.try_omit_fp = 0;
955 stack_alignment = 1 << env->arch_env->stack_alignment;
956 size = adjust_alloc_size(stack_alignment, size, block, dbg);
957 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
958 set_irn_dbg_info(new_alloc, dbg);
960 if (alloc_mem != NULL) {
964 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
966 /* We need to sync the output mem of the AddSP with the input mem
967 edge into the alloc node. */
968 ins[0] = get_Alloc_mem(alloc);
970 sync = new_r_Sync(block, 2, ins);
972 exchange(alloc_mem, sync);
975 exchange(alloc, new_alloc);
977 /* fix projnum of alloca res */
978 set_Proj_proj(alloc_res, pn_be_AddSP_res);
980 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
987 * The Free is transformed into a back end free node and connected to the stack nodes.
989 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
993 ir_node *subsp, *mem, *res, *size, *sync;
997 unsigned stack_alignment;
1000 assert(get_Free_where(free) == stack_alloc);
1002 block = get_nodes_block(free);
1003 irg = get_irn_irg(block);
1004 type = get_Free_type(free);
1005 sp_mode = env->arch_env->sp->reg_class->mode;
1006 dbg = get_irn_dbg_info(free);
1008 /* we might need to multiply the size with the element size */
1009 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1010 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1011 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1012 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1016 size = get_Free_size(free);
1019 stack_alignment = 1 << env->arch_env->stack_alignment;
1020 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1022 /* The stack pointer will be modified in an unknown manner.
1023 We cannot omit it. */
1024 env->call->flags.bits.try_omit_fp = 0;
1025 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1026 set_irn_dbg_info(subsp, dbg);
1028 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
1029 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
1031 /* we need to sync the memory */
1032 in[0] = get_Free_mem(free);
1034 sync = new_r_Sync(block, 2, in);
1036 /* and make the AddSP dependent on the former memory */
1037 add_irn_dep(subsp, get_Free_mem(free));
1040 exchange(free, sync);
1047 * Check if a node is somehow data dependent on another one.
1048 * both nodes must be in the same basic block.
1049 * @param n1 The first node.
1050 * @param n2 The second node.
1051 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1053 static int dependent_on(ir_node *n1, ir_node *n2)
1055 assert(get_nodes_block(n1) == get_nodes_block(n2));
1057 return heights_reachable_in_block(ir_heights, n1, n2);
1060 static int cmp_call_dependency(const void *c1, const void *c2)
1062 ir_node *n1 = *(ir_node **) c1;
1063 ir_node *n2 = *(ir_node **) c2;
1066 Classical qsort() comparison function behavior:
1067 0 if both elements are equal
1068 1 if second is "smaller" that first
1069 -1 if first is "smaller" that second
1071 if (dependent_on(n1, n2))
1074 if (dependent_on(n2, n1))
1077 /* The nodes have no depth order, but we need a total order because qsort()
1079 return get_irn_idx(n1) - get_irn_idx(n2);
1083 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1084 * Clears the irg_is_leaf flag if a Call is detected.
1086 static void link_ops_in_block_walker(ir_node *irn, void *data)
1088 be_abi_irg_t *env = data;
1089 ir_opcode code = get_irn_opcode(irn);
1091 if (code == iro_Call ||
1092 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1093 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1094 ir_node *bl = get_nodes_block(irn);
1095 void *save = get_irn_link(bl);
1097 if (code == iro_Call)
1098 env->call->flags.bits.irg_is_leaf = 0;
1100 set_irn_link(irn, save);
1101 set_irn_link(bl, irn);
1104 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1105 ir_node *param = get_Builtin_param(irn, 0);
1106 tarval *tv = get_Const_tarval(param);
1107 unsigned long value = get_tarval_long(tv);
1108 /* use ebp, so the climbframe algo works... */
1110 env->call->flags.bits.try_omit_fp = 0;
1117 * Process all Call/Alloc/Free nodes inside a basic block.
1118 * Note that the link field of the block must contain a linked list of all
1119 * Call nodes inside the Block. We first order this list according to data dependency
1120 * and that connect the calls together.
1122 static void process_ops_in_block(ir_node *bl, void *data)
1124 be_abi_irg_t *env = data;
1125 ir_node *curr_sp = env->init_sp;
1132 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1136 nodes = ALLOCAN(ir_node*, n_nodes);
1137 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1141 /* If there were call nodes in the block. */
1146 /* order the call nodes according to data dependency */
1147 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1149 for (i = n_nodes - 1; i >= 0; --i) {
1150 ir_node *irn = nodes[i];
1152 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1153 switch (get_irn_opcode(irn)) {
1156 /* The stack pointer will be modified due to a call. */
1157 env->call->flags.bits.try_omit_fp = 0;
1159 curr_sp = adjust_call(env, irn, curr_sp);
1162 if (get_Alloc_where(irn) == stack_alloc)
1163 curr_sp = adjust_alloc(env, irn, curr_sp);
1166 if (get_Free_where(irn) == stack_alloc)
1167 curr_sp = adjust_free(env, irn, curr_sp);
1170 panic("invalid call");
1174 /* Keep the last stack state in the block by tying it to Keep node,
1175 * the proj from calls is already kept */
1176 if (curr_sp != env->init_sp &&
1177 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1179 keep = be_new_Keep(bl, 1, nodes);
1180 pmap_insert(env->keep_map, bl, keep);
1184 set_irn_link(bl, curr_sp);
1188 * Adjust all call nodes in the graph to the ABI conventions.
1190 static void process_calls(be_abi_irg_t *env)
1192 ir_graph *irg = env->birg->irg;
1194 env->call->flags.bits.irg_is_leaf = 1;
1195 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1197 ir_heights = heights_new(env->birg->irg);
1198 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1199 heights_free(ir_heights);
1203 * Computes the stack argument layout type.
1204 * Changes a possibly allocated value param type by moving
1205 * entities to the stack layout type.
1207 * @param env the ABI environment
1208 * @param call the current call ABI
1209 * @param method_type the method type
1210 * @param val_param_tp the value parameter type, will be destroyed
1211 * @param param_map an array mapping method arguments to the stack layout type
1213 * @return the stack argument layout type
1215 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1216 ir_type *method_type, ir_type *val_param_tp,
1217 ir_entity ***param_map)
1219 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1220 int inc = env->birg->main_env->arch_env->stack_dir * dir;
1221 int n = get_method_n_params(method_type);
1222 int curr = inc > 0 ? 0 : n - 1;
1223 struct obstack *obst = be_get_birg_obst(env->irg);
1229 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1232 *param_map = map = OALLOCN(obst, ir_entity*, n);
1233 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1234 for (i = 0; i < n; ++i, curr += inc) {
1235 ir_type *param_type = get_method_param_type(method_type, curr);
1236 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
1239 if (arg->on_stack) {
1240 if (val_param_tp != NULL) {
1241 /* the entity was already created, create a copy in the param type */
1242 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1243 arg->stack_ent = copy_entity_own(val_ent, res);
1244 set_entity_link(val_ent, arg->stack_ent);
1245 set_entity_link(arg->stack_ent, NULL);
1247 /* create a new entity */
1248 snprintf(buf, sizeof(buf), "param_%d", i);
1249 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1251 ofs += arg->space_before;
1252 ofs = round_up2(ofs, arg->alignment);
1253 set_entity_offset(arg->stack_ent, ofs);
1254 ofs += arg->space_after;
1255 ofs += get_type_size_bytes(param_type);
1256 map[i] = arg->stack_ent;
1259 set_type_size_bytes(res, ofs);
1260 set_type_state(res, layout_fixed);
1265 const arch_register_t *reg;
1269 static int cmp_regs(const void *a, const void *b)
1271 const reg_node_map_t *p = a;
1272 const reg_node_map_t *q = b;
1274 if (p->reg->reg_class == q->reg->reg_class)
1275 return p->reg->index - q->reg->index;
1277 return p->reg->reg_class - q->reg->reg_class;
1280 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1283 int n = pmap_count(reg_map);
1286 foreach_pmap(reg_map, ent) {
1287 res[i].reg = ent->key;
1288 res[i].irn = ent->value;
1292 qsort(res, n, sizeof(res[0]), cmp_regs);
1296 * Creates a barrier.
1298 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1301 int n_regs = pmap_count(regs);
1307 in = ALLOCAN(ir_node*, n_regs+1);
1308 rm = ALLOCAN(reg_node_map_t, n_regs);
1309 reg_map_to_arr(rm, regs);
1310 for (n = 0; n < n_regs; ++n) {
1318 irn = be_new_Barrier(bl, n, in);
1320 for (n = 0; n < n_regs; ++n) {
1321 ir_node *pred = rm[n].irn;
1322 const arch_register_t *reg = rm[n].reg;
1323 arch_register_type_t add_type = 0;
1325 const backend_info_t *info;
1327 /* stupid workaround for now... as not all nodes report register
1329 info = be_get_info(skip_Proj(pred));
1330 if (info != NULL && info->out_infos != NULL) {
1331 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1332 if (ireq->type & arch_register_req_type_ignore)
1333 add_type |= arch_register_req_type_ignore;
1334 if (ireq->type & arch_register_req_type_produces_sp)
1335 add_type |= arch_register_req_type_produces_sp;
1338 proj = new_r_Proj(irn, get_irn_mode(pred), n);
1339 be_node_set_reg_class_in(irn, n, reg->reg_class);
1341 be_set_constr_single_reg_in(irn, n, reg, 0);
1342 be_set_constr_single_reg_out(irn, n, reg, add_type);
1343 arch_set_irn_register(proj, reg);
1345 pmap_insert(regs, (void *) reg, proj);
1349 *mem = new_r_Proj(irn, mode_M, n);
1356 * Creates a be_Return for a Return node.
1358 * @param @env the abi environment
1359 * @param irn the Return node or NULL if there was none
1360 * @param bl the block where the be_Retun should be placed
1361 * @param mem the current memory
1362 * @param n_res number of return results
1364 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1365 ir_node *mem, int n_res)
1367 be_abi_call_t *call = env->call;
1368 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1370 pmap *reg_map = pmap_create();
1371 ir_node *keep = pmap_get(env->keep_map, bl);
1378 const arch_register_t **regs;
1382 get the valid stack node in this block.
1383 If we had a call in that block there is a Keep constructed by process_calls()
1384 which points to the last stack modification in that block. we'll use
1385 it then. Else we use the stack from the start block and let
1386 the ssa construction fix the usage.
1388 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1390 stack = get_irn_n(keep, 0);
1392 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1395 /* Insert results for Return into the register map. */
1396 for (i = 0; i < n_res; ++i) {
1397 ir_node *res = get_Return_res(irn, i);
1398 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1399 assert(arg->in_reg && "return value must be passed in register");
1400 pmap_insert(reg_map, (void *) arg->reg, res);
1403 /* Add uses of the callee save registers. */
1404 foreach_pmap(env->regs, ent) {
1405 const arch_register_t *reg = ent->key;
1406 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1407 pmap_insert(reg_map, ent->key, ent->value);
1410 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1412 /* Make the Epilogue node and call the arch's epilogue maker. */
1413 create_barrier(bl, &mem, reg_map, 1);
1414 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1417 Maximum size of the in array for Return nodes is
1418 return args + callee save/ignore registers + memory + stack pointer
1420 in_max = pmap_count(reg_map) + n_res + 2;
1422 in = ALLOCAN(ir_node*, in_max);
1423 regs = ALLOCAN(arch_register_t const*, in_max);
1426 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1428 regs[1] = arch_env->sp;
1431 /* clear SP entry, since it has already been grown. */
1432 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1433 for (i = 0; i < n_res; ++i) {
1434 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1436 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1437 regs[n++] = arg->reg;
1439 /* Clear the map entry to mark the register as processed. */
1440 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1443 /* grow the rest of the stuff. */
1444 foreach_pmap(reg_map, ent) {
1447 regs[n++] = ent->key;
1451 /* The in array for the new back end return is now ready. */
1453 dbgi = get_irn_dbg_info(irn);
1457 /* we have to pop the shadow parameter in in case of struct returns */
1459 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1461 /* Set the register classes of the return's parameter accordingly. */
1462 for (i = 0; i < n; ++i) {
1463 if (regs[i] == NULL)
1466 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1469 /* Free the space of the Epilog's in array and the register <-> proj map. */
1470 pmap_destroy(reg_map);
1475 typedef struct ent_pos_pair ent_pos_pair;
1476 struct ent_pos_pair {
1477 ir_entity *ent; /**< a value param entity */
1478 int pos; /**< its parameter number */
1479 ent_pos_pair *next; /**< for linking */
1482 typedef struct lower_frame_sels_env_t {
1483 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1484 ir_node *frame; /**< the current frame */
1485 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1486 const arch_register_class_t *link_class; /**< register class of the link pointer */
1487 ir_type *value_tp; /**< the value type if any */
1488 ir_type *frame_tp; /**< the frame type */
1489 int static_link_pos; /**< argument number of the hidden static link */
1490 } lower_frame_sels_env_t;
1493 * Return an entity from the backend for an value param entity.
1495 * @param ent an value param type entity
1496 * @param ctx context
1498 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1500 ir_entity *argument_ent = get_entity_link(ent);
1502 if (argument_ent == NULL) {
1503 /* we have NO argument entity yet: This is bad, as we will
1504 * need one for backing store.
1507 ir_type *frame_tp = ctx->frame_tp;
1508 unsigned offset = get_type_size_bytes(frame_tp);
1509 ir_type *tp = get_entity_type(ent);
1510 unsigned align = get_type_alignment_bytes(tp);
1512 offset += align - 1;
1513 offset &= ~(align - 1);
1515 argument_ent = copy_entity_own(ent, frame_tp);
1517 /* must be automatic to set a fixed layout */
1518 set_entity_offset(argument_ent, offset);
1519 offset += get_type_size_bytes(tp);
1521 set_type_size_bytes(frame_tp, offset);
1522 set_entity_link(ent, argument_ent);
1524 return argument_ent;
1527 * Walker: Replaces Sels of frame type and
1528 * value param type entities by FrameAddress.
1529 * Links all used entities.
1531 static void lower_frame_sels_walker(ir_node *irn, void *data)
1533 lower_frame_sels_env_t *ctx = data;
1536 ir_node *ptr = get_Sel_ptr(irn);
1538 if (ptr == ctx->frame) {
1539 ir_entity *ent = get_Sel_entity(irn);
1540 ir_node *bl = get_nodes_block(irn);
1543 int is_value_param = 0;
1545 if (get_entity_owner(ent) == ctx->value_tp) {
1548 /* replace by its copy from the argument type */
1549 pos = get_struct_member_index(ctx->value_tp, ent);
1550 ent = get_argument_entity(ent, ctx);
1553 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1556 /* check, if it's a param Sel and if have not seen this entity before */
1557 if (is_value_param && get_entity_link(ent) == NULL) {
1563 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1565 set_entity_link(ent, ctx->value_param_list);
1572 * Check if a value parameter is transmitted as a register.
1573 * This might happen if the address of an parameter is taken which is
1574 * transmitted in registers.
1576 * Note that on some architectures this case must be handled specially
1577 * because the place of the backing store is determined by their ABI.
1579 * In the default case we move the entity to the frame type and create
1580 * a backing store into the first block.
1582 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1584 be_abi_call_t *call = env->call;
1585 ir_graph *irg = env->birg->irg;
1586 ent_pos_pair *entry, *new_list;
1588 int i, n = ARR_LEN(value_param_list);
1591 for (i = 0; i < n; ++i) {
1592 int pos = value_param_list[i].pos;
1593 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
1596 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1597 value_param_list[i].next = new_list;
1598 new_list = &value_param_list[i];
1601 if (new_list != NULL) {
1602 /* ok, change the graph */
1603 ir_node *start_bl = get_irg_start_block(irg);
1604 ir_node *first_bl = get_first_block_succ(start_bl);
1605 ir_node *frame, *imem, *nmem, *store, *mem, *args;
1606 optimization_state_t state;
1609 assert(first_bl && first_bl != start_bl);
1610 /* we had already removed critical edges, so the following
1611 assertion should be always true. */
1612 assert(get_Block_n_cfgpreds(first_bl) == 1);
1614 /* now create backing stores */
1615 frame = get_irg_frame(irg);
1616 imem = get_irg_initial_mem(irg);
1618 save_optimization_state(&state);
1620 nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
1621 restore_optimization_state(&state);
1623 /* reroute all edges to the new memory source */
1624 edges_reroute(imem, nmem, irg);
1628 args = get_irg_args(irg);
1629 for (entry = new_list; entry != NULL; entry = entry->next) {
1631 ir_type *tp = get_entity_type(entry->ent);
1632 ir_mode *mode = get_type_mode(tp);
1635 /* address for the backing store */
1636 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1639 mem = new_r_Proj(store, mode_M, pn_Store_M);
1641 /* the backing store itself */
1642 store = new_r_Store(first_bl, mem, addr,
1643 new_r_Proj(args, mode, i), 0);
1645 /* the new memory Proj gets the last Proj from store */
1646 set_Proj_pred(nmem, store);
1647 set_Proj_proj(nmem, pn_Store_M);
1649 /* move all entities to the frame type */
1650 frame_tp = get_irg_frame_type(irg);
1651 offset = get_type_size_bytes(frame_tp);
1653 /* we will add new entities: set the layout to undefined */
1654 assert(get_type_state(frame_tp) == layout_fixed);
1655 set_type_state(frame_tp, layout_undefined);
1656 for (entry = new_list; entry != NULL; entry = entry->next) {
1657 ir_entity *ent = entry->ent;
1659 /* If the entity is still on the argument type, move it to the
1661 * This happens if the value_param type was build due to compound
1663 if (get_entity_owner(ent) != frame_tp) {
1664 ir_type *tp = get_entity_type(ent);
1665 unsigned align = get_type_alignment_bytes(tp);
1667 offset += align - 1;
1668 offset &= ~(align - 1);
1669 set_entity_owner(ent, frame_tp);
1670 /* must be automatic to set a fixed layout */
1671 set_entity_offset(ent, offset);
1672 offset += get_type_size_bytes(tp);
1675 set_type_size_bytes(frame_tp, offset);
1676 /* fix the layout again */
1677 set_type_state(frame_tp, layout_fixed);
1682 * The start block has no jump, instead it has an initial exec Proj.
1683 * The backend wants to handle all blocks the same way, so we replace
1684 * the out cfg edge with a real jump.
1686 static void fix_start_block(ir_graph *irg)
1688 ir_node *initial_X = get_irg_initial_exec(irg);
1689 ir_node *start_block = get_irg_start_block(irg);
1690 const ir_edge_t *edge;
1692 assert(is_Proj(initial_X));
1694 foreach_out_edge(initial_X, edge) {
1695 ir_node *block = get_edge_src_irn(edge);
1697 if (is_Anchor(block))
1699 if (block != start_block) {
1700 ir_node *jmp = new_r_Jmp(start_block);
1701 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1702 set_irg_initial_exec(irg, jmp);
1706 panic("Initial exec has no follow block in %+F", irg);
1710 * Update the entity of Sels to the outer value parameters.
1712 static void update_outer_frame_sels(ir_node *irn, void *env)
1714 lower_frame_sels_env_t *ctx = env;
1721 ptr = get_Sel_ptr(irn);
1722 if (! is_arg_Proj(ptr))
1724 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1726 ent = get_Sel_entity(irn);
1728 if (get_entity_owner(ent) == ctx->value_tp) {
1729 /* replace by its copy from the argument type */
1730 pos = get_struct_member_index(ctx->value_tp, ent);
1731 ent = get_argument_entity(ent, ctx);
1732 set_Sel_entity(irn, ent);
1734 /* check, if we have not seen this entity before */
1735 if (get_entity_link(ent) == NULL) {
1741 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1743 set_entity_link(ent, ctx->value_param_list);
1749 * Fix access to outer local variables.
1751 static void fix_outer_variable_access(be_abi_irg_t *env,
1752 lower_frame_sels_env_t *ctx)
1758 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1759 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1761 if (! is_method_entity(ent))
1764 irg = get_entity_irg(ent);
1769 * FIXME: find the number of the static link parameter
1770 * for now we assume 0 here
1772 ctx->static_link_pos = 0;
1774 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1779 * Modify the irg itself and the frame type.
1781 static void modify_irg(be_abi_irg_t *env)
1783 be_abi_call_t *call = env->call;
1784 const arch_env_t *arch_env= env->birg->main_env->arch_env;
1785 const arch_register_t *sp = arch_env->sp;
1786 ir_graph *irg = env->birg->irg;
1789 ir_node *new_mem_proj;
1791 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1792 struct obstack *obst = be_get_birg_obst(irg);
1797 unsigned frame_size;
1800 const arch_register_t *fp_reg;
1801 ir_node *frame_pointer;
1805 const ir_edge_t *edge;
1806 ir_type *arg_type, *bet_type, *tp;
1807 lower_frame_sels_env_t ctx;
1808 ir_entity **param_map;
1810 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1812 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1813 * memory, which leads to loops in the DAG. */
1814 old_mem = get_irg_initial_mem(irg);
1816 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1818 /* set the links of all frame entities to NULL, we use it
1819 to detect if an entity is already linked in the value_param_list */
1820 tp = get_method_value_param_type(method_type);
1823 /* clear the links of the clone type, let the
1824 original entities point to its clones */
1825 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1826 ir_entity *mem = get_struct_member(tp, i);
1827 set_entity_link(mem, NULL);
1831 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1833 /* Convert the Sel nodes in the irg to frame addr nodes: */
1834 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1835 ctx.frame = get_irg_frame(irg);
1836 ctx.sp_class = env->arch_env->sp->reg_class;
1837 ctx.link_class = env->arch_env->link_class;
1838 ctx.frame_tp = get_irg_frame_type(irg);
1840 /* layout the stackframe now */
1841 if (get_type_state(ctx.frame_tp) == layout_undefined) {
1842 default_layout_compound_type(ctx.frame_tp);
1845 /* we will possible add new entities to the frame: set the layout to undefined */
1846 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1847 set_type_state(ctx.frame_tp, layout_undefined);
1849 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1851 /* fix the frame type layout again */
1852 set_type_state(ctx.frame_tp, layout_fixed);
1853 /* align stackframe to 4 byte */
1854 frame_size = get_type_size_bytes(ctx.frame_tp);
1855 if (frame_size % 4 != 0) {
1856 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1859 env->regs = pmap_create();
1861 n_params = get_method_n_params(method_type);
1862 args = OALLOCNZ(obst, ir_node*, n_params);
1865 * for inner function we must now fix access to outer frame entities.
1867 fix_outer_variable_access(env, &ctx);
1869 /* Check if a value parameter is transmitted as a register.
1870 * This might happen if the address of an parameter is taken which is
1871 * transmitted in registers.
1873 * Note that on some architectures this case must be handled specially
1874 * because the place of the backing store is determined by their ABI.
1876 * In the default case we move the entity to the frame type and create
1877 * a backing store into the first block.
1879 fix_address_of_parameter_access(env, ctx.value_param_list);
1881 DEL_ARR_F(ctx.value_param_list);
1882 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1884 /* Fill the argument vector */
1885 arg_tuple = get_irg_args(irg);
1886 foreach_out_edge(arg_tuple, edge) {
1887 ir_node *irn = get_edge_src_irn(edge);
1888 if (! is_Anchor(irn)) {
1889 int nr = get_Proj_proj(irn);
1891 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1895 bet_type = call->cb->get_between_type(env->cb);
1896 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1898 /* Count the register params and add them to the number of Projs for the RegParams node */
1899 for (i = 0; i < n_params; ++i) {
1900 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1901 if (arg->in_reg && args[i]) {
1902 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1903 assert(i == get_Proj_proj(args[i]));
1905 /* For now, associate the register with the old Proj from Start representing that argument. */
1906 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1907 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1911 /* Collect all callee-save registers */
1912 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1913 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1914 for (j = 0; j < cls->n_regs; ++j) {
1915 const arch_register_t *reg = &cls->regs[j];
1916 if (arch_register_type_is(reg, callee_save) ||
1917 arch_register_type_is(reg, state)) {
1918 pmap_insert(env->regs, (void *) reg, NULL);
1923 /* handle start block here (place a jump in the block) */
1924 fix_start_block(irg);
1926 pmap_insert(env->regs, (void *) sp, NULL);
1927 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1928 start_bl = get_irg_start_block(irg);
1929 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1932 * make proj nodes for the callee save registers.
1933 * memorize them, since Return nodes get those as inputs.
1935 * Note, that if a register corresponds to an argument, the regs map contains
1936 * the old Proj from start for that argument.
1939 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1940 reg_map_to_arr(rm, env->regs);
1941 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1942 arch_register_t *reg = (void *) rm[i].reg;
1943 ir_mode *mode = reg->reg_class->mode;
1945 arch_register_req_type_t add_type = 0;
1949 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1952 proj = new_r_Proj(env->start, mode, nr + 1);
1953 pmap_insert(env->regs, (void *) reg, proj);
1954 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1955 arch_set_irn_register(proj, reg);
1957 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1960 /* create a new initial memory proj */
1961 assert(is_Proj(old_mem));
1962 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1963 new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1965 set_irg_initial_mem(irg, mem);
1967 /* Generate the Prologue */
1968 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
1970 /* do the stack allocation BEFORE the barrier, or spill code
1971 might be added before it */
1972 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1973 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1974 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1976 create_barrier(start_bl, &mem, env->regs, 0);
1978 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1979 arch_set_irn_register(env->init_sp, sp);
1981 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1982 set_irg_frame(irg, frame_pointer);
1983 pset_insert_ptr(env->ignore_regs, fp_reg);
1985 /* rewire old mem users to new mem */
1986 exchange(old_mem, mem);
1988 /* keep the mem (for functions with an endless loop = no return) */
1991 set_irg_initial_mem(irg, mem);
1993 /* Now, introduce stack param nodes for all parameters passed on the stack */
1994 for (i = 0; i < n_params; ++i) {
1995 ir_node *arg_proj = args[i];
1996 ir_node *repl = NULL;
1998 if (arg_proj != NULL) {
1999 be_abi_call_arg_t *arg;
2000 ir_type *param_type;
2001 int nr = get_Proj_proj(arg_proj);
2004 nr = MIN(nr, n_params);
2005 arg = get_call_arg(call, 0, nr, 1);
2006 param_type = get_method_param_type(method_type, nr);
2009 repl = pmap_get(env->regs, (void *) arg->reg);
2010 } else if (arg->on_stack) {
2011 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2013 /* For atomic parameters which are actually used, we create a Load node. */
2014 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2015 ir_mode *mode = get_type_mode(param_type);
2016 ir_mode *load_mode = arg->load_mode;
2018 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2019 repl = new_r_Proj(load, load_mode, pn_Load_res);
2021 if (mode != load_mode) {
2022 repl = new_r_Conv(start_bl, repl, mode);
2025 /* The stack parameter is not primitive (it is a struct or array),
2026 * we thus will create a node representing the parameter's address
2032 assert(repl != NULL);
2034 /* Beware: the mode of the register parameters is always the mode of the register class
2035 which may be wrong. Add Conv's then. */
2036 mode = get_irn_mode(args[i]);
2037 if (mode != get_irn_mode(repl)) {
2038 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2040 exchange(args[i], repl);
2044 /* the arg proj is not needed anymore now and should be only used by the anchor */
2045 assert(get_irn_n_edges(arg_tuple) == 1);
2046 kill_node(arg_tuple);
2047 set_irg_args(irg, new_r_Bad(irg));
2049 /* All Return nodes hang on the End node, so look for them there. */
2050 end = get_irg_end_block(irg);
2051 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2052 ir_node *irn = get_Block_cfgpred(end, i);
2054 if (is_Return(irn)) {
2055 ir_node *blk = get_nodes_block(irn);
2056 ir_node *mem = get_Return_mem(irn);
2057 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2062 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2063 the code is dead and will never be executed. */
2066 /** Fix the state inputs of calls that still hang on unknowns */
2067 static void fix_call_state_inputs(be_abi_irg_t *env)
2069 const arch_env_t *arch_env = env->arch_env;
2071 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2073 /* Collect caller save registers */
2074 n = arch_env_get_n_reg_class(arch_env);
2075 for (i = 0; i < n; ++i) {
2077 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2078 for (j = 0; j < cls->n_regs; ++j) {
2079 const arch_register_t *reg = arch_register_for_index(cls, j);
2080 if (arch_register_type_is(reg, state)) {
2081 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2086 n = ARR_LEN(env->calls);
2087 n_states = ARR_LEN(stateregs);
2088 for (i = 0; i < n; ++i) {
2090 ir_node *call = env->calls[i];
2092 arity = get_irn_arity(call);
2094 /* the state reg inputs are the last n inputs of the calls */
2095 for (s = 0; s < n_states; ++s) {
2096 int inp = arity - n_states + s;
2097 const arch_register_t *reg = stateregs[s];
2098 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2100 set_irn_n(call, inp, regnode);
2104 DEL_ARR_F(stateregs);
2108 * Create a trampoline entity for the given method.
2110 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2112 ir_type *type = get_entity_type(method);
2113 ident *old_id = get_entity_ld_ident(method);
2114 ident *id = id_mangle3("", old_id, "$stub");
2115 ir_type *parent = be->pic_trampolines_type;
2116 ir_entity *ent = new_entity(parent, old_id, type);
2117 set_entity_ld_ident(ent, id);
2118 set_entity_visibility(ent, ir_visibility_private);
2124 * Returns the trampoline entity for the given method.
2126 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2128 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2129 if (result == NULL) {
2130 result = create_trampoline(env, method);
2131 pmap_insert(env->ent_trampoline_map, method, result);
2137 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2139 ident *old_id = get_entity_ld_ident(entity);
2140 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
2141 ir_type *e_type = get_entity_type(entity);
2142 ir_type *type = new_type_pointer(e_type);
2143 ir_type *parent = be->pic_symbols_type;
2144 ir_entity *ent = new_entity(parent, old_id, type);
2145 set_entity_ld_ident(ent, id);
2146 set_entity_visibility(ent, ir_visibility_private);
2151 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2153 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2154 if (result == NULL) {
2155 result = create_pic_symbol(env, entity);
2156 pmap_insert(env->ent_pic_symbol_map, entity, result);
2165 * Returns non-zero if a given entity can be accessed using a relative address.
2167 static int can_address_relative(ir_entity *entity)
2169 return get_entity_visibility(entity) != ir_visibility_external
2170 && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
2173 /** patches SymConsts to work in position independent code */
2174 static void fix_pic_symconsts(ir_node *node, void *data)
2183 be_abi_irg_t *env = data;
2185 be_main_env_t *be = env->birg->main_env;
2187 arity = get_irn_arity(node);
2188 for (i = 0; i < arity; ++i) {
2190 ir_node *pred = get_irn_n(node, i);
2192 ir_entity *pic_symbol;
2193 ir_node *pic_symconst;
2195 if (!is_SymConst(pred))
2198 entity = get_SymConst_entity(pred);
2199 block = get_nodes_block(pred);
2200 irg = get_irn_irg(pred);
2202 /* calls can jump to relative addresses, so we can directly jump to
2203 the (relatively) known call address or the trampoline */
2204 if (i == 1 && is_Call(node)) {
2205 ir_entity *trampoline;
2206 ir_node *trampoline_const;
2208 if (can_address_relative(entity))
2211 dbgi = get_irn_dbg_info(pred);
2212 trampoline = get_trampoline(be, entity);
2213 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2215 set_irn_n(node, i, trampoline_const);
2219 /* everything else is accessed relative to EIP */
2220 mode = get_irn_mode(pred);
2221 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2223 /* all ok now for locally constructed stuff */
2224 if (can_address_relative(entity)) {
2225 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2227 /* make sure the walker doesn't visit this add again */
2228 mark_irn_visited(add);
2229 set_irn_n(node, i, add);
2233 /* get entry from pic symbol segment */
2234 dbgi = get_irn_dbg_info(pred);
2235 pic_symbol = get_pic_symbol(be, entity);
2236 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2238 add = new_r_Add(block, pic_base, pic_symconst, mode);
2239 mark_irn_visited(add);
2241 /* we need an extra indirection for global data outside our current
2242 module. The loads are always safe and can therefore float
2243 and need no memory input */
2244 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2245 load_res = new_r_Proj(load, mode, pn_Load_res);
2247 set_irn_n(node, i, load_res);
2251 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2253 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2254 ir_node *old_frame = get_irg_frame(birg->irg);
2255 ir_graph *irg = birg->irg;
2256 struct obstack *obst = be_get_birg_obst(irg);
2260 unsigned *limited_bitset;
2261 arch_register_req_t *sp_req;
2263 be_omit_fp = birg->main_env->options->omit_fp;
2264 be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
2268 env->arch_env = birg->main_env->arch_env;
2269 env->method_type = get_entity_type(get_irg_entity(irg));
2270 env->call = be_abi_call_new(env->arch_env->sp->reg_class);
2271 arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
2273 env->ignore_regs = pset_new_ptr_default();
2274 env->keep_map = pmap_create();
2275 env->dce_survivor = new_survive_dce();
2279 sp_req = OALLOCZ(obst, arch_register_req_t);
2280 env->sp_req = sp_req;
2282 sp_req->type = arch_register_req_type_limited
2283 | arch_register_req_type_produces_sp;
2284 sp_req->cls = arch_register_get_class(env->arch_env->sp);
2286 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2287 rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
2288 sp_req->limited = limited_bitset;
2289 if (env->arch_env->sp->type & arch_register_type_ignore) {
2290 sp_req->type |= arch_register_req_type_ignore;
2293 env->init_sp = dummy = new_r_Dummy(irg, env->arch_env->sp->reg_class->mode);
2295 env->calls = NEW_ARR_F(ir_node*, 0);
2297 if (birg->main_env->options->pic) {
2298 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2301 /* Lower all call nodes in the IRG. */
2305 Beware: init backend abi call object after processing calls,
2306 otherwise some information might be not yet available.
2308 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2310 /* Process the IRG */
2313 /* fix call inputs for state registers */
2314 fix_call_state_inputs(env);
2316 /* We don't need the keep map anymore. */
2317 pmap_destroy(env->keep_map);
2318 env->keep_map = NULL;
2320 /* calls array is not needed anymore */
2321 DEL_ARR_F(env->calls);
2324 /* reroute the stack origin of the calls to the true stack origin. */
2325 exchange(dummy, env->init_sp);
2326 exchange(old_frame, get_irg_frame(irg));
2328 /* Make some important node pointers survive the dead node elimination. */
2329 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2330 foreach_pmap(env->regs, ent) {
2331 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2334 env->call->cb->done(env->cb);
2339 void be_abi_free(be_abi_irg_t *env)
2341 be_abi_call_free(env->call);
2342 free_survive_dce(env->dce_survivor);
2343 del_pset(env->ignore_regs);
2344 pmap_destroy(env->regs);
2348 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2350 arch_register_t *reg;
2352 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2353 if (reg->reg_class == cls)
2354 bitset_set(bs, reg->index);
2357 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2360 arch_register_t *reg;
2362 for (i = 0; i < cls->n_regs; ++i) {
2363 if (arch_register_type_is(&cls->regs[i], ignore))
2366 rbitset_set(raw_bitset, i);
2369 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2370 reg = pset_next(abi->ignore_regs)) {
2371 if (reg->reg_class != cls)
2374 rbitset_clear(raw_bitset, reg->index);
2378 /* Returns the stack layout from a abi environment. */
2379 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2387 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2388 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2389 | _| | |> < ___) | || (_| | (__| <
2390 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2394 typedef ir_node **node_array;
2396 typedef struct fix_stack_walker_env_t {
2397 node_array sp_nodes;
2398 } fix_stack_walker_env_t;
2401 * Walker. Collect all stack modifying nodes.
2403 static void collect_stack_nodes_walker(ir_node *node, void *data)
2405 ir_node *insn = node;
2406 fix_stack_walker_env_t *env = data;
2407 const arch_register_req_t *req;
2409 if (is_Proj(node)) {
2410 insn = get_Proj_pred(node);
2413 if (arch_irn_get_n_outs(insn) == 0)
2415 if (get_irn_mode(node) == mode_T)
2418 req = arch_get_register_req_out(node);
2419 if (! (req->type & arch_register_req_type_produces_sp))
2422 ARR_APP1(ir_node*, env->sp_nodes, node);
2425 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2427 be_ssa_construction_env_t senv;
2430 be_irg_t *birg = env->birg;
2431 be_lv_t *lv = be_get_birg_liveness(birg);
2432 fix_stack_walker_env_t walker_env;
2434 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2436 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2438 /* nothing to be done if we didn't find any node, in fact we mustn't
2439 * continue, as for endless loops incsp might have had no users and is bad
2442 len = ARR_LEN(walker_env.sp_nodes);
2444 DEL_ARR_F(walker_env.sp_nodes);
2448 be_ssa_construction_init(&senv, birg);
2449 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2450 ARR_LEN(walker_env.sp_nodes));
2451 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2452 ARR_LEN(walker_env.sp_nodes));
2455 len = ARR_LEN(walker_env.sp_nodes);
2456 for (i = 0; i < len; ++i) {
2457 be_liveness_update(lv, walker_env.sp_nodes[i]);
2459 be_ssa_construction_update_liveness_phis(&senv, lv);
2462 phis = be_ssa_construction_get_new_phis(&senv);
2464 /* set register requirements for stack phis */
2465 len = ARR_LEN(phis);
2466 for (i = 0; i < len; ++i) {
2467 ir_node *phi = phis[i];
2468 be_set_phi_reg_req(phi, env->sp_req);
2469 arch_set_irn_register(phi, env->arch_env->sp);
2471 be_ssa_construction_destroy(&senv);
2473 DEL_ARR_F(walker_env.sp_nodes);
2477 * Fix all stack accessing operations in the block bl.
2479 * @param env the abi environment
2480 * @param bl the block to process
2481 * @param real_bias the bias value
2483 * @return the bias at the end of this block
2485 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2487 int omit_fp = env->call->flags.bits.try_omit_fp;
2489 int wanted_bias = real_bias;
2491 sched_foreach(bl, irn) {
2495 Check, if the node relates to an entity on the stack frame.
2496 If so, set the true offset (including the bias) for that
2499 ir_entity *ent = arch_get_frame_entity(irn);
2501 int bias = omit_fp ? real_bias : 0;
2502 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2503 arch_set_frame_offset(irn, offset);
2504 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2505 ent, offset, bias));
2509 * If the node modifies the stack pointer by a constant offset,
2510 * record that in the bias.
2512 ofs = arch_get_sp_bias(irn);
2514 if (be_is_IncSP(irn)) {
2515 /* fill in real stack frame size */
2516 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2517 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2518 ofs = (int) get_type_size_bytes(frame_type);
2519 be_set_IncSP_offset(irn, ofs);
2520 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2521 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2522 ofs = - (int)get_type_size_bytes(frame_type);
2523 be_set_IncSP_offset(irn, ofs);
2525 if (be_get_IncSP_align(irn)) {
2526 /* patch IncSP to produce an aligned stack pointer */
2527 ir_type *between_type = env->frame.between_type;
2528 int between_size = get_type_size_bytes(between_type);
2529 int alignment = 1 << env->arch_env->stack_alignment;
2530 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2533 be_set_IncSP_offset(irn, ofs + alignment - delta);
2534 real_bias += alignment - delta;
2537 /* adjust so real_bias corresponds with wanted_bias */
2538 int delta = wanted_bias - real_bias;
2541 be_set_IncSP_offset(irn, ofs + delta);
2552 assert(real_bias == wanted_bias);
2557 * A helper struct for the bias walker.
2560 be_abi_irg_t *env; /**< The ABI irg environment. */
2561 int start_block_bias; /**< The bias at the end of the start block. */
2563 ir_node *start_block; /**< The start block of the current graph. */
2567 * Block-Walker: fix all stack offsets for all blocks
2568 * except the start block
2570 static void stack_bias_walker(ir_node *bl, void *data)
2572 struct bias_walk *bw = data;
2573 if (bl != bw->start_block) {
2574 process_stack_bias(bw->env, bl, bw->start_block_bias);
2579 * Walker: finally lower all Sels of outer frame or parameter
2582 static void lower_outer_frame_sels(ir_node *sel, void *ctx)
2584 be_abi_irg_t *env = ctx;
2592 ent = get_Sel_entity(sel);
2593 owner = get_entity_owner(ent);
2594 ptr = get_Sel_ptr(sel);
2596 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2597 /* found access to outer frame or arguments */
2598 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2601 ir_node *bl = get_nodes_block(sel);
2602 dbg_info *dbgi = get_irn_dbg_info(sel);
2603 ir_mode *mode = get_irn_mode(sel);
2604 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2605 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2607 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2613 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2615 ir_graph *irg = env->birg->irg;
2618 struct bias_walk bw;
2620 stack_frame_compute_initial_offset(&env->frame);
2621 // stack_layout_dump(stdout, frame);
2623 /* Determine the stack bias at the end of the start block. */
2624 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2625 bw.between_size = get_type_size_bytes(env->frame.between_type);
2627 /* fix the bias is all other blocks */
2629 bw.start_block = get_irg_start_block(irg);
2630 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2632 /* fix now inner functions: these still have Sel node to outer
2633 frame and parameter entities */
2634 frame_tp = get_irg_frame_type(irg);
2635 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2636 ir_entity *ent = get_class_member(frame_tp, i);
2637 ir_graph *irg = get_entity_irg(ent);
2640 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2645 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2647 assert(arch_register_type_is(reg, callee_save));
2648 assert(pmap_contains(abi->regs, (void *) reg));
2649 return pmap_get(abi->regs, (void *) reg);
2652 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2654 assert(arch_register_type_is(reg, ignore));
2655 assert(pmap_contains(abi->regs, (void *) reg));
2656 return pmap_get(abi->regs, (void *) reg);
2660 * Returns non-zero if the ABI has omitted the frame pointer in
2661 * the current graph.
2663 int be_abi_omit_fp(const be_abi_irg_t *abi)
2665 return abi->call->flags.bits.try_omit_fp;
2668 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
2669 void be_init_abi(void)
2671 FIRM_DBG_REGISTER(dbg, "firm.be.abi");