2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
33 #include "irgraph_t.h"
36 #include "iredges_t.h"
39 #include "irprintf_t.h"
46 #include "raw_bitset.h"
57 #include "bessaconstr.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
62 typedef struct _be_abi_call_arg_t {
63 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
64 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
65 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
66 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
69 const arch_register_t *reg;
72 unsigned alignment; /**< stack alignment */
73 unsigned space_before; /**< allocate space before */
74 unsigned space_after; /**< allocate space after */
77 struct _be_abi_call_t {
78 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
79 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
80 const be_abi_callbacks_t *cb;
81 ir_type *between_type;
83 const arch_register_class_t *cls_addr; /**< register class of the call address */
87 * The ABI information for the current birg.
89 struct _be_abi_irg_t {
90 be_irg_t *birg; /**< The back end IRG. */
92 const arch_env_t *arch_env;
93 survive_dce_t *dce_survivor;
95 be_abi_call_t *call; /**< The ABI call information. */
96 ir_type *method_type; /**< The type of the method of the IRG. */
98 ir_node *init_sp; /**< The node representing the stack pointer
99 at the start of the function. */
101 ir_node *start; /**< The be_Start params node. */
102 pmap *regs; /**< A map of all callee-save and ignore regs to
103 their Projs to the RegParams node. */
105 int start_block_bias; /**< The stack bias at the end of the start block. */
107 void *cb; /**< ABI Callback self pointer. */
109 pmap *keep_map; /**< mapping blocks to keep nodes. */
110 pset *ignore_regs; /**< Additional registers which shall be ignored. */
112 ir_node **calls; /**< flexible array containing all be_Call nodes */
114 arch_register_req_t *sp_req;
116 be_stack_layout_t frame; /**< The stack frame model. */
119 static heights_t *ir_heights;
121 /** Flag: if set, try to omit the frame pointer in all routines. */
122 static int be_omit_fp = 1;
124 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
125 static int be_omit_leaf_fp = 1;
128 _ ____ ___ ____ _ _ _ _
129 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
130 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
131 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
132 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
134 These callbacks are used by the backend to set the parameters
135 for a specific call type.
139 * Set compare function: compares two ABI call object arguments.
141 static int cmp_call_arg(const void *a, const void *b, size_t n)
143 const be_abi_call_arg_t *p = a, *q = b;
145 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
149 * Get an ABI call object argument.
151 * @param call the abi call
152 * @param is_res true for call results, false for call arguments
153 * @param pos position of the argument
154 * @param callee context type - if we are callee or caller
156 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
158 be_abi_call_arg_t arg;
161 memset(&arg, 0, sizeof(arg));
166 hash = is_res * 128 + pos;
168 return set_find(call->params, &arg, sizeof(arg), hash);
172 * Set an ABI call object argument.
174 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
176 unsigned hash = arg->is_res * 128 + arg->pos;
177 if (context & ABI_CONTEXT_CALLEE) {
179 set_insert(call->params, arg, sizeof(*arg), hash);
181 if (context & ABI_CONTEXT_CALLER) {
183 set_insert(call->params, arg, sizeof(*arg), hash);
187 /* Set the flags for a call. */
188 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
194 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
195 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
201 /* Set register class for call address */
202 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
204 call->cls_addr = cls;
208 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
209 ir_mode *load_mode, unsigned alignment,
210 unsigned space_before, unsigned space_after,
211 be_abi_context_t context)
213 be_abi_call_arg_t arg;
214 memset(&arg, 0, sizeof(arg));
215 assert(alignment > 0 && "Alignment must be greater than 0");
217 arg.load_mode = load_mode;
218 arg.alignment = alignment;
219 arg.space_before = space_before;
220 arg.space_after = space_after;
224 remember_call_arg(&arg, call, context);
227 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
229 be_abi_call_arg_t arg;
230 memset(&arg, 0, sizeof(arg));
237 remember_call_arg(&arg, call, context);
240 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
242 be_abi_call_arg_t arg;
243 memset(&arg, 0, sizeof(arg));
250 remember_call_arg(&arg, call, context);
253 /* Get the flags of a ABI call object. */
254 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
260 * Constructor for a new ABI call object.
262 * @param cls_addr register class of the call address
264 * @return the new ABI call object
266 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
268 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
271 call->params = new_set(cmp_call_arg, 16);
273 call->cls_addr = cls_addr;
275 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
281 * Destructor for an ABI call object.
283 static void be_abi_call_free(be_abi_call_t *call)
285 del_set(call->params);
291 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
292 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
293 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
294 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
297 Handling of the stack frame. It is composed of three types:
298 1) The type of the arguments which are pushed on the stack.
299 2) The "between type" which consists of stuff the call of the
300 function pushes on the stack (like the return address and
301 the old base pointer for ia32).
302 3) The Firm frame type which consists of all local variables
306 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
309 ir_type *t = get_entity_owner(ent);
310 int ofs = get_entity_offset(ent);
314 /* Find the type the entity is contained in. */
315 for (index = 0; index < N_FRAME_TYPES; ++index) {
316 if (frame->order[index] == t)
318 /* Add the size of all the types below the one of the entity to the entity's offset */
319 ofs += get_type_size_bytes(frame->order[index]);
322 /* correct the offset by the initial position of the frame pointer */
323 ofs -= frame->initial_offset;
325 /* correct the offset with the current bias. */
332 * Retrieve the entity with given offset from a frame type.
334 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
338 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
339 ir_entity *ent = get_compound_member(t, i);
340 if (get_entity_offset(ent) == offset)
347 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
349 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
350 ir_entity *ent = search_ent_with_offset(base, 0);
353 frame->initial_offset
354 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
356 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
359 return frame->initial_offset;
363 * Initializes the frame layout from parts
365 * @param frame the stack layout that will be initialized
366 * @param args the stack argument layout type
367 * @param between the between layout type
368 * @param locals the method frame type
369 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
370 * @param param_map an array mapping method argument positions to the stack argument type
372 * @return the initialized stack layout
374 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
375 ir_type *between, ir_type *locals, int stack_dir,
376 ir_entity *param_map[])
378 frame->arg_type = args;
379 frame->between_type = between;
380 frame->frame_type = locals;
381 frame->initial_offset = 0;
382 frame->initial_bias = 0;
383 frame->stack_dir = stack_dir;
384 frame->order[1] = between;
385 frame->param_map = param_map;
388 frame->order[0] = args;
389 frame->order[2] = locals;
392 /* typical decreasing stack: locals have the
393 * lowest addresses, arguments the highest */
394 frame->order[0] = locals;
395 frame->order[2] = args;
407 Adjustment of the calls inside a graph.
412 * Transform a call node into a be_Call node.
414 * @param env The ABI environment for the current irg.
415 * @param irn The call node.
416 * @param curr_sp The stack pointer node to use.
417 * @return The stack pointer after the call.
419 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
421 ir_graph *irg = env->birg->irg;
422 const arch_env_t *arch_env = env->birg->main_env->arch_env;
423 ir_type *call_tp = get_Call_type(irn);
424 ir_node *call_ptr = get_Call_ptr(irn);
425 int n_params = get_method_n_params(call_tp);
426 ir_node *curr_mem = get_Call_mem(irn);
427 ir_node *bl = get_nodes_block(irn);
429 int stack_dir = arch_env->stack_dir;
430 const arch_register_t *sp = arch_env->sp;
431 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
432 ir_mode *mach_mode = sp->reg_class->mode;
433 struct obstack *obst = be_get_birg_obst(irg);
434 int no_alloc = call->flags.bits.frame_is_setup_on_call;
435 int n_res = get_method_n_ress(call_tp);
436 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
438 ir_node *res_proj = NULL;
439 int n_reg_params = 0;
440 int n_stack_params = 0;
443 pset_new_t destroyed_regs, states;
444 pset_new_iterator_t iter;
448 int n_reg_results = 0;
449 const arch_register_t *reg;
450 const ir_edge_t *edge;
452 int *stack_param_idx;
453 int i, n, destroy_all_regs;
456 pset_new_init(&destroyed_regs);
457 pset_new_init(&states);
459 /* Let the isa fill out the abi description for that call node. */
460 arch_env_get_call_abi(arch_env, call_tp, call);
462 /* Insert code to put the stack arguments on the stack. */
463 assert(get_Call_n_params(irn) == n_params);
464 assert(obstack_object_size(obst) == 0);
465 stack_param_idx = ALLOCAN(int, n_params);
466 for (i = 0; i < n_params; ++i) {
467 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
470 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
472 stack_size += round_up2(arg->space_before, arg->alignment);
473 stack_size += round_up2(arg_size, arg->alignment);
474 stack_size += round_up2(arg->space_after, arg->alignment);
476 stack_param_idx[n_stack_params++] = i;
480 /* Collect all arguments which are passed in registers. */
481 reg_param_idxs = ALLOCAN(int, n_params);
482 for (i = 0; i < n_params; ++i) {
483 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
484 if (arg && arg->in_reg) {
485 reg_param_idxs[n_reg_params++] = i;
490 * If the stack is decreasing and we do not want to store sequentially,
491 * or someone else allocated the call frame
492 * we allocate as much space on the stack all parameters need, by
493 * moving the stack pointer along the stack's direction.
495 * Note: we also have to do this for stack_size == 0, because we may have
496 * to adjust stack alignment for the call.
498 if (stack_dir < 0 && !do_seq && !no_alloc) {
499 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
502 dbgi = get_irn_dbg_info(irn);
503 /* If there are some parameters which shall be passed on the stack. */
504 if (n_stack_params > 0) {
506 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
510 * Reverse list of stack parameters if call arguments are from left to right.
511 * We must them reverse again if they are pushed (not stored) and the stack
512 * direction is downwards.
514 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
515 for (i = 0; i < n_stack_params >> 1; ++i) {
516 int other = n_stack_params - i - 1;
517 int tmp = stack_param_idx[i];
518 stack_param_idx[i] = stack_param_idx[other];
519 stack_param_idx[other] = tmp;
523 curr_mem = get_Call_mem(irn);
525 in[n_in++] = curr_mem;
528 for (i = 0; i < n_stack_params; ++i) {
529 int p = stack_param_idx[i];
530 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
531 ir_node *param = get_Call_param(irn, p);
532 ir_node *addr = curr_sp;
534 ir_type *param_type = get_method_param_type(call_tp, p);
535 int param_size = get_type_size_bytes(param_type) + arg->space_after;
538 * If we wanted to build the arguments sequentially,
539 * the stack pointer for the next must be incremented,
540 * and the memory value propagated.
544 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
545 param_size + arg->space_before, 0);
546 add_irn_dep(curr_sp, curr_mem);
548 curr_ofs += arg->space_before;
549 curr_ofs = round_up2(curr_ofs, arg->alignment);
551 /* Make the expression to compute the argument's offset. */
553 ir_mode *constmode = mach_mode;
554 if (mode_is_reference(mach_mode)) {
557 addr = new_r_Const_long(irg, constmode, curr_ofs);
558 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
562 /* Insert a store for primitive arguments. */
563 if (is_atomic_type(param_type)) {
565 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
566 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
567 mem = new_r_Proj(store, mode_M, pn_Store_M);
569 /* Make a mem copy for compound arguments. */
572 assert(mode_is_reference(get_irn_mode(param)));
573 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
574 mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular);
577 curr_ofs += param_size;
585 /* We need the sync only, if we didn't build the stores sequentially. */
587 if (n_stack_params >= 1) {
588 curr_mem = new_r_Sync(bl, n_in, in);
590 curr_mem = get_Call_mem(irn);
595 /* check for the return_twice property */
596 destroy_all_regs = 0;
597 if (is_SymConst_addr_ent(call_ptr)) {
598 ir_entity *ent = get_SymConst_entity(call_ptr);
600 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
601 destroy_all_regs = 1;
603 ir_type *call_tp = get_Call_type(irn);
605 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
606 destroy_all_regs = 1;
609 /* Put caller save into the destroyed set and state registers in the states set */
610 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
612 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
613 for (j = 0; j < cls->n_regs; ++j) {
614 const arch_register_t *reg = arch_register_for_index(cls, j);
616 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
617 if (! arch_register_type_is(reg, ignore))
618 pset_new_insert(&destroyed_regs, (void *) reg);
620 if (arch_register_type_is(reg, state)) {
621 pset_new_insert(&destroyed_regs, (void*) reg);
622 pset_new_insert(&states, (void*) reg);
627 if (destroy_all_regs) {
628 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
629 pset_new_remove(&destroyed_regs, arch_env->sp);
630 pset_new_remove(&destroyed_regs, arch_env->bp);
633 /* search the largest result proj number */
634 res_projs = ALLOCANZ(ir_node*, n_res);
636 foreach_out_edge(irn, edge) {
637 const ir_edge_t *res_edge;
638 ir_node *irn = get_edge_src_irn(edge);
640 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
643 foreach_out_edge(irn, res_edge) {
645 ir_node *res = get_edge_src_irn(res_edge);
647 assert(is_Proj(res));
649 proj = get_Proj_proj(res);
650 assert(proj < n_res);
651 assert(res_projs[proj] == NULL);
652 res_projs[proj] = res;
658 /** TODO: this is not correct for cases where return values are passed
659 * on the stack, but no known ABI does this currently...
661 n_reg_results = n_res;
663 assert(obstack_object_size(obst) == 0);
665 in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
667 /* make the back end call node and set its register requirements. */
668 for (i = 0; i < n_reg_params; ++i) {
669 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
672 /* add state registers ins */
673 foreach_pset_new(&states, reg, iter) {
674 const arch_register_class_t *cls = arch_register_get_class(reg);
676 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
677 ir_fprintf(stderr, "Adding %+F\n", regnode);
679 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
680 in[n_ins++] = regnode;
682 assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
684 /* ins collected, build the call */
685 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
687 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
688 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
689 n_ins, in, get_Call_type(irn));
690 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
693 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
694 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
695 n_ins, in, get_Call_type(irn));
697 be_Call_set_pop(low_call, call->pop);
699 /* put the call into the list of all calls for later processing */
700 ARR_APP1(ir_node *, env->calls, low_call);
702 /* create new stack pointer */
703 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
704 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
705 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
706 arch_set_irn_register(curr_sp, sp);
708 /* now handle results */
709 for (i = 0; i < n_res; ++i) {
711 ir_node *proj = res_projs[i];
712 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
714 /* returns values on stack not supported yet */
718 shift the proj number to the right, since we will drop the
719 unspeakable Proj_T from the Call. Therefore, all real argument
720 Proj numbers must be increased by pn_be_Call_first_res
722 pn = i + pn_be_Call_first_res;
725 ir_type *res_type = get_method_res_type(call_tp, i);
726 ir_mode *mode = get_type_mode(res_type);
727 proj = new_r_Proj(low_call, mode, pn);
730 set_Proj_pred(proj, low_call);
731 set_Proj_proj(proj, pn);
735 pset_new_remove(&destroyed_regs, arg->reg);
740 Set the register class of the call address to
741 the backend provided class (default: stack pointer class)
743 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
745 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
747 /* Set the register classes and constraints of the Call parameters. */
748 for (i = 0; i < n_reg_params; ++i) {
749 int index = reg_param_idxs[i];
750 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
751 assert(arg->reg != NULL);
753 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
757 /* Set the register constraints of the results. */
758 for (i = 0; i < n_res; ++i) {
759 ir_node *proj = res_projs[i];
760 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
761 int pn = get_Proj_proj(proj);
764 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
765 arch_set_irn_register(proj, arg->reg);
767 exchange(irn, low_call);
769 /* kill the ProjT node */
770 if (res_proj != NULL) {
774 /* Make additional projs for the caller save registers
775 and the Keep node which keeps them alive. */
777 const arch_register_t *reg;
781 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
782 pset_new_iterator_t iter;
785 n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
786 in = ALLOCAN(ir_node *, n_ins);
788 /* also keep the stack pointer */
789 set_irn_link(curr_sp, (void*) sp);
792 foreach_pset_new(&destroyed_regs, reg, iter) {
793 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
795 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
796 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
797 arch_set_irn_register(proj, reg);
799 set_irn_link(proj, (void*) reg);
804 for (i = 0; i < n_reg_results; ++i) {
805 ir_node *proj = res_projs[i];
806 const arch_register_t *reg = arch_get_irn_register(proj);
807 set_irn_link(proj, (void*) reg);
812 /* create the Keep for the caller save registers */
813 keep = be_new_Keep(bl, n, in);
814 for (i = 0; i < n; ++i) {
815 const arch_register_t *reg = get_irn_link(in[i]);
816 be_node_set_reg_class_in(keep, i, reg->reg_class);
820 /* Clean up the stack. */
821 assert(stack_size >= call->pop);
822 stack_size -= call->pop;
824 if (stack_size > 0) {
825 ir_node *mem_proj = NULL;
827 foreach_out_edge(low_call, edge) {
828 ir_node *irn = get_edge_src_irn(edge);
829 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
836 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
837 keep_alive(mem_proj);
840 /* Clean up the stack frame or revert alignment fixes if we allocated it */
842 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
845 be_abi_call_free(call);
847 pset_new_destroy(&states);
848 pset_new_destroy(&destroyed_regs);
854 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
856 * @param alignment the minimum stack alignment
857 * @param size the node containing the non-aligned size
858 * @param block the block where new nodes are allocated on
859 * @param dbg debug info for new nodes
861 * @return a node representing the aligned size
863 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
864 ir_node *block, dbg_info *dbg)
866 if (stack_alignment > 1) {
872 assert(is_po2(stack_alignment));
874 mode = get_irn_mode(size);
875 tv = new_tarval_from_long(stack_alignment-1, mode);
876 irg = get_Block_irg(block);
877 mask = new_r_Const(irg, tv);
878 size = new_rd_Add(dbg, block, size, mask, mode);
880 tv = new_tarval_from_long(-(long)stack_alignment, mode);
881 mask = new_r_Const(irg, tv);
882 size = new_rd_And(dbg, block, size, mask, mode);
888 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
890 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
899 const ir_edge_t *edge;
904 unsigned stack_alignment;
906 assert(get_Alloc_where(alloc) == stack_alloc);
908 block = get_nodes_block(alloc);
909 irg = get_Block_irg(block);
912 type = get_Alloc_type(alloc);
914 foreach_out_edge(alloc, edge) {
915 ir_node *irn = get_edge_src_irn(edge);
917 assert(is_Proj(irn));
918 switch (get_Proj_proj(irn)) {
930 /* Beware: currently Alloc nodes without a result might happen,
931 only escape analysis kills them and this phase runs only for object
932 oriented source. We kill the Alloc here. */
933 if (alloc_res == NULL && alloc_mem) {
934 exchange(alloc_mem, get_Alloc_mem(alloc));
938 dbg = get_irn_dbg_info(alloc);
939 count = get_Alloc_count(alloc);
941 /* we might need to multiply the count with the element size */
942 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
943 ir_mode *mode = get_irn_mode(count);
944 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
946 ir_node *cnst = new_rd_Const(dbg, irg, tv);
947 size = new_rd_Mul(dbg, block, count, cnst, mode);
952 /* The stack pointer will be modified in an unknown manner.
953 We cannot omit it. */
954 env->call->flags.bits.try_omit_fp = 0;
956 stack_alignment = 1 << env->arch_env->stack_alignment;
957 size = adjust_alloc_size(stack_alignment, size, block, dbg);
958 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
959 set_irn_dbg_info(new_alloc, dbg);
961 if (alloc_mem != NULL) {
965 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
967 /* We need to sync the output mem of the AddSP with the input mem
968 edge into the alloc node. */
969 ins[0] = get_Alloc_mem(alloc);
971 sync = new_r_Sync(block, 2, ins);
973 exchange(alloc_mem, sync);
976 exchange(alloc, new_alloc);
978 /* fix projnum of alloca res */
979 set_Proj_proj(alloc_res, pn_be_AddSP_res);
981 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
988 * The Free is transformed into a back end free node and connected to the stack nodes.
990 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
994 ir_node *subsp, *mem, *res, *size, *sync;
998 unsigned stack_alignment;
1001 assert(get_Free_where(free) == stack_alloc);
1003 block = get_nodes_block(free);
1004 irg = get_irn_irg(block);
1005 type = get_Free_type(free);
1006 sp_mode = env->arch_env->sp->reg_class->mode;
1007 dbg = get_irn_dbg_info(free);
1009 /* we might need to multiply the size with the element size */
1010 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1011 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1012 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1013 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1017 size = get_Free_size(free);
1020 stack_alignment = 1 << env->arch_env->stack_alignment;
1021 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1023 /* The stack pointer will be modified in an unknown manner.
1024 We cannot omit it. */
1025 env->call->flags.bits.try_omit_fp = 0;
1026 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1027 set_irn_dbg_info(subsp, dbg);
1029 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
1030 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
1032 /* we need to sync the memory */
1033 in[0] = get_Free_mem(free);
1035 sync = new_r_Sync(block, 2, in);
1037 /* and make the AddSP dependent on the former memory */
1038 add_irn_dep(subsp, get_Free_mem(free));
1041 exchange(free, sync);
1048 * Check if a node is somehow data dependent on another one.
1049 * both nodes must be in the same basic block.
1050 * @param n1 The first node.
1051 * @param n2 The second node.
1052 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1054 static int dependent_on(ir_node *n1, ir_node *n2)
1056 assert(get_nodes_block(n1) == get_nodes_block(n2));
1058 return heights_reachable_in_block(ir_heights, n1, n2);
1061 static int cmp_call_dependency(const void *c1, const void *c2)
1063 ir_node *n1 = *(ir_node **) c1;
1064 ir_node *n2 = *(ir_node **) c2;
1067 Classical qsort() comparison function behavior:
1068 0 if both elements are equal
1069 1 if second is "smaller" that first
1070 -1 if first is "smaller" that second
1072 if (dependent_on(n1, n2))
1075 if (dependent_on(n2, n1))
1078 /* The nodes have no depth order, but we need a total order because qsort()
1080 return get_irn_idx(n1) - get_irn_idx(n2);
1084 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1085 * Clears the irg_is_leaf flag if a Call is detected.
1087 static void link_ops_in_block_walker(ir_node *irn, void *data)
1089 be_abi_irg_t *env = data;
1090 ir_opcode code = get_irn_opcode(irn);
1092 if (code == iro_Call ||
1093 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1094 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1095 ir_node *bl = get_nodes_block(irn);
1096 void *save = get_irn_link(bl);
1098 if (code == iro_Call)
1099 env->call->flags.bits.irg_is_leaf = 0;
1101 set_irn_link(irn, save);
1102 set_irn_link(bl, irn);
1105 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1106 ir_node *param = get_Builtin_param(irn, 0);
1107 tarval *tv = get_Const_tarval(param);
1108 unsigned long value = get_tarval_long(tv);
1109 /* use ebp, so the climbframe algo works... */
1111 env->call->flags.bits.try_omit_fp = 0;
1118 * Process all Call/Alloc/Free nodes inside a basic block.
1119 * Note that the link field of the block must contain a linked list of all
1120 * Call nodes inside the Block. We first order this list according to data dependency
1121 * and that connect the calls together.
1123 static void process_ops_in_block(ir_node *bl, void *data)
1125 be_abi_irg_t *env = data;
1126 ir_node *curr_sp = env->init_sp;
1133 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1137 nodes = ALLOCAN(ir_node*, n_nodes);
1138 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1142 /* If there were call nodes in the block. */
1147 /* order the call nodes according to data dependency */
1148 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1150 for (i = n_nodes - 1; i >= 0; --i) {
1151 ir_node *irn = nodes[i];
1153 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1154 switch (get_irn_opcode(irn)) {
1157 /* The stack pointer will be modified due to a call. */
1158 env->call->flags.bits.try_omit_fp = 0;
1160 curr_sp = adjust_call(env, irn, curr_sp);
1163 if (get_Alloc_where(irn) == stack_alloc)
1164 curr_sp = adjust_alloc(env, irn, curr_sp);
1167 if (get_Free_where(irn) == stack_alloc)
1168 curr_sp = adjust_free(env, irn, curr_sp);
1171 panic("invalid call");
1175 /* Keep the last stack state in the block by tying it to Keep node,
1176 * the proj from calls is already kept */
1177 if (curr_sp != env->init_sp &&
1178 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1180 keep = be_new_Keep(bl, 1, nodes);
1181 pmap_insert(env->keep_map, bl, keep);
1185 set_irn_link(bl, curr_sp);
1189 * Adjust all call nodes in the graph to the ABI conventions.
1191 static void process_calls(be_abi_irg_t *env)
1193 ir_graph *irg = env->birg->irg;
1195 env->call->flags.bits.irg_is_leaf = 1;
1196 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1198 ir_heights = heights_new(env->birg->irg);
1199 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1200 heights_free(ir_heights);
1204 * Computes the stack argument layout type.
1205 * Changes a possibly allocated value param type by moving
1206 * entities to the stack layout type.
1208 * @param env the ABI environment
1209 * @param call the current call ABI
1210 * @param method_type the method type
1211 * @param val_param_tp the value parameter type, will be destroyed
1212 * @param param_map an array mapping method arguments to the stack layout type
1214 * @return the stack argument layout type
1216 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1217 ir_type *method_type, ir_type *val_param_tp,
1218 ir_entity ***param_map)
1220 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1221 int inc = env->birg->main_env->arch_env->stack_dir * dir;
1222 int n = get_method_n_params(method_type);
1223 int curr = inc > 0 ? 0 : n - 1;
1224 struct obstack *obst = be_get_birg_obst(env->irg);
1230 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1233 *param_map = map = OALLOCN(obst, ir_entity*, n);
1234 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1235 for (i = 0; i < n; ++i, curr += inc) {
1236 ir_type *param_type = get_method_param_type(method_type, curr);
1237 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
1240 if (arg->on_stack) {
1241 if (val_param_tp != NULL) {
1242 /* the entity was already created, create a copy in the param type */
1243 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1244 arg->stack_ent = copy_entity_own(val_ent, res);
1245 set_entity_link(val_ent, arg->stack_ent);
1246 set_entity_link(arg->stack_ent, NULL);
1248 /* create a new entity */
1249 snprintf(buf, sizeof(buf), "param_%d", i);
1250 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1252 ofs += arg->space_before;
1253 ofs = round_up2(ofs, arg->alignment);
1254 set_entity_offset(arg->stack_ent, ofs);
1255 ofs += arg->space_after;
1256 ofs += get_type_size_bytes(param_type);
1257 map[i] = arg->stack_ent;
1260 set_type_size_bytes(res, ofs);
1261 set_type_state(res, layout_fixed);
1266 const arch_register_t *reg;
1270 static int cmp_regs(const void *a, const void *b)
1272 const reg_node_map_t *p = a;
1273 const reg_node_map_t *q = b;
1275 if (p->reg->reg_class == q->reg->reg_class)
1276 return p->reg->index - q->reg->index;
1278 return p->reg->reg_class - q->reg->reg_class;
1281 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1284 int n = pmap_count(reg_map);
1287 foreach_pmap(reg_map, ent) {
1288 res[i].reg = ent->key;
1289 res[i].irn = ent->value;
1293 qsort(res, n, sizeof(res[0]), cmp_regs);
1297 * Creates a barrier.
1299 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1302 int n_regs = pmap_count(regs);
1308 in = ALLOCAN(ir_node*, n_regs+1);
1309 rm = ALLOCAN(reg_node_map_t, n_regs);
1310 reg_map_to_arr(rm, regs);
1311 for (n = 0; n < n_regs; ++n) {
1319 irn = be_new_Barrier(bl, n, in);
1321 for (n = 0; n < n_regs; ++n) {
1322 ir_node *pred = rm[n].irn;
1323 const arch_register_t *reg = rm[n].reg;
1324 arch_register_type_t add_type = 0;
1326 const backend_info_t *info;
1328 /* stupid workaround for now... as not all nodes report register
1330 info = be_get_info(skip_Proj(pred));
1331 if (info != NULL && info->out_infos != NULL) {
1332 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1333 if (ireq->type & arch_register_req_type_ignore)
1334 add_type |= arch_register_req_type_ignore;
1335 if (ireq->type & arch_register_req_type_produces_sp)
1336 add_type |= arch_register_req_type_produces_sp;
1339 proj = new_r_Proj(irn, get_irn_mode(pred), n);
1340 be_node_set_reg_class_in(irn, n, reg->reg_class);
1342 be_set_constr_single_reg_in(irn, n, reg, 0);
1343 be_set_constr_single_reg_out(irn, n, reg, add_type);
1344 arch_set_irn_register(proj, reg);
1346 pmap_insert(regs, (void *) reg, proj);
1350 *mem = new_r_Proj(irn, mode_M, n);
1357 * Creates a be_Return for a Return node.
1359 * @param @env the abi environment
1360 * @param irn the Return node or NULL if there was none
1361 * @param bl the block where the be_Retun should be placed
1362 * @param mem the current memory
1363 * @param n_res number of return results
1365 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1366 ir_node *mem, int n_res)
1368 be_abi_call_t *call = env->call;
1369 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1371 pmap *reg_map = pmap_create();
1372 ir_node *keep = pmap_get(env->keep_map, bl);
1379 const arch_register_t **regs;
1383 get the valid stack node in this block.
1384 If we had a call in that block there is a Keep constructed by process_calls()
1385 which points to the last stack modification in that block. we'll use
1386 it then. Else we use the stack from the start block and let
1387 the ssa construction fix the usage.
1389 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1391 stack = get_irn_n(keep, 0);
1393 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1396 /* Insert results for Return into the register map. */
1397 for (i = 0; i < n_res; ++i) {
1398 ir_node *res = get_Return_res(irn, i);
1399 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1400 assert(arg->in_reg && "return value must be passed in register");
1401 pmap_insert(reg_map, (void *) arg->reg, res);
1404 /* Add uses of the callee save registers. */
1405 foreach_pmap(env->regs, ent) {
1406 const arch_register_t *reg = ent->key;
1407 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1408 pmap_insert(reg_map, ent->key, ent->value);
1411 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1413 /* Make the Epilogue node and call the arch's epilogue maker. */
1414 create_barrier(bl, &mem, reg_map, 1);
1415 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1418 Maximum size of the in array for Return nodes is
1419 return args + callee save/ignore registers + memory + stack pointer
1421 in_max = pmap_count(reg_map) + n_res + 2;
1423 in = ALLOCAN(ir_node*, in_max);
1424 regs = ALLOCAN(arch_register_t const*, in_max);
1427 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1429 regs[1] = arch_env->sp;
1432 /* clear SP entry, since it has already been grown. */
1433 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1434 for (i = 0; i < n_res; ++i) {
1435 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1437 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1438 regs[n++] = arg->reg;
1440 /* Clear the map entry to mark the register as processed. */
1441 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1444 /* grow the rest of the stuff. */
1445 foreach_pmap(reg_map, ent) {
1448 regs[n++] = ent->key;
1452 /* The in array for the new back end return is now ready. */
1454 dbgi = get_irn_dbg_info(irn);
1458 /* we have to pop the shadow parameter in in case of struct returns */
1460 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1462 /* Set the register classes of the return's parameter accordingly. */
1463 for (i = 0; i < n; ++i) {
1464 if (regs[i] == NULL)
1467 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1470 /* Free the space of the Epilog's in array and the register <-> proj map. */
1471 pmap_destroy(reg_map);
1476 typedef struct ent_pos_pair ent_pos_pair;
1477 struct ent_pos_pair {
1478 ir_entity *ent; /**< a value param entity */
1479 int pos; /**< its parameter number */
1480 ent_pos_pair *next; /**< for linking */
1483 typedef struct lower_frame_sels_env_t {
1484 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1485 ir_node *frame; /**< the current frame */
1486 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1487 const arch_register_class_t *link_class; /**< register class of the link pointer */
1488 ir_type *value_tp; /**< the value type if any */
1489 ir_type *frame_tp; /**< the frame type */
1490 int static_link_pos; /**< argument number of the hidden static link */
1491 } lower_frame_sels_env_t;
1494 * Return an entity from the backend for an value param entity.
1496 * @param ent an value param type entity
1497 * @param ctx context
1499 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1501 ir_entity *argument_ent = get_entity_link(ent);
1503 if (argument_ent == NULL) {
1504 /* we have NO argument entity yet: This is bad, as we will
1505 * need one for backing store.
1508 ir_type *frame_tp = ctx->frame_tp;
1509 unsigned offset = get_type_size_bytes(frame_tp);
1510 ir_type *tp = get_entity_type(ent);
1511 unsigned align = get_type_alignment_bytes(tp);
1513 offset += align - 1;
1514 offset &= ~(align - 1);
1516 argument_ent = copy_entity_own(ent, frame_tp);
1518 /* must be automatic to set a fixed layout */
1519 set_entity_offset(argument_ent, offset);
1520 offset += get_type_size_bytes(tp);
1522 set_type_size_bytes(frame_tp, offset);
1523 set_entity_link(ent, argument_ent);
1525 return argument_ent;
1528 * Walker: Replaces Sels of frame type and
1529 * value param type entities by FrameAddress.
1530 * Links all used entities.
1532 static void lower_frame_sels_walker(ir_node *irn, void *data)
1534 lower_frame_sels_env_t *ctx = data;
1537 ir_node *ptr = get_Sel_ptr(irn);
1539 if (ptr == ctx->frame) {
1540 ir_entity *ent = get_Sel_entity(irn);
1541 ir_node *bl = get_nodes_block(irn);
1544 int is_value_param = 0;
1546 if (get_entity_owner(ent) == ctx->value_tp) {
1549 /* replace by its copy from the argument type */
1550 pos = get_struct_member_index(ctx->value_tp, ent);
1551 ent = get_argument_entity(ent, ctx);
1554 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1557 /* check, if it's a param Sel and if have not seen this entity before */
1558 if (is_value_param && get_entity_link(ent) == NULL) {
1564 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1566 set_entity_link(ent, ctx->value_param_list);
1573 * Check if a value parameter is transmitted as a register.
1574 * This might happen if the address of an parameter is taken which is
1575 * transmitted in registers.
1577 * Note that on some architectures this case must be handled specially
1578 * because the place of the backing store is determined by their ABI.
1580 * In the default case we move the entity to the frame type and create
1581 * a backing store into the first block.
1583 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1585 be_abi_call_t *call = env->call;
1586 ir_graph *irg = env->birg->irg;
1587 ent_pos_pair *entry, *new_list;
1589 int i, n = ARR_LEN(value_param_list);
1592 for (i = 0; i < n; ++i) {
1593 int pos = value_param_list[i].pos;
1594 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
1597 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1598 value_param_list[i].next = new_list;
1599 new_list = &value_param_list[i];
1602 if (new_list != NULL) {
1603 /* ok, change the graph */
1604 ir_node *start_bl = get_irg_start_block(irg);
1605 ir_node *first_bl = get_first_block_succ(start_bl);
1606 ir_node *frame, *imem, *nmem, *store, *mem, *args;
1607 optimization_state_t state;
1610 assert(first_bl && first_bl != start_bl);
1611 /* we had already removed critical edges, so the following
1612 assertion should be always true. */
1613 assert(get_Block_n_cfgpreds(first_bl) == 1);
1615 /* now create backing stores */
1616 frame = get_irg_frame(irg);
1617 imem = get_irg_initial_mem(irg);
1619 save_optimization_state(&state);
1621 nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
1622 restore_optimization_state(&state);
1624 /* reroute all edges to the new memory source */
1625 edges_reroute(imem, nmem, irg);
1629 args = get_irg_args(irg);
1630 for (entry = new_list; entry != NULL; entry = entry->next) {
1632 ir_type *tp = get_entity_type(entry->ent);
1633 ir_mode *mode = get_type_mode(tp);
1636 /* address for the backing store */
1637 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1640 mem = new_r_Proj(store, mode_M, pn_Store_M);
1642 /* the backing store itself */
1643 store = new_r_Store(first_bl, mem, addr,
1644 new_r_Proj(args, mode, i), 0);
1646 /* the new memory Proj gets the last Proj from store */
1647 set_Proj_pred(nmem, store);
1648 set_Proj_proj(nmem, pn_Store_M);
1650 /* move all entities to the frame type */
1651 frame_tp = get_irg_frame_type(irg);
1652 offset = get_type_size_bytes(frame_tp);
1654 /* we will add new entities: set the layout to undefined */
1655 assert(get_type_state(frame_tp) == layout_fixed);
1656 set_type_state(frame_tp, layout_undefined);
1657 for (entry = new_list; entry != NULL; entry = entry->next) {
1658 ir_entity *ent = entry->ent;
1660 /* If the entity is still on the argument type, move it to the frame type.
1661 This happens if the value_param type was build due to compound
1663 if (get_entity_owner(ent) != frame_tp) {
1664 ir_type *tp = get_entity_type(ent);
1665 unsigned align = get_type_alignment_bytes(tp);
1667 offset += align - 1;
1668 offset &= ~(align - 1);
1669 set_entity_owner(ent, frame_tp);
1670 add_class_member(frame_tp, ent);
1671 /* must be automatic to set a fixed layout */
1672 set_entity_offset(ent, offset);
1673 offset += get_type_size_bytes(tp);
1676 set_type_size_bytes(frame_tp, offset);
1677 /* fix the layout again */
1678 set_type_state(frame_tp, layout_fixed);
1683 * The start block has no jump, instead it has an initial exec Proj.
1684 * The backend wants to handle all blocks the same way, so we replace
1685 * the out cfg edge with a real jump.
1687 static void fix_start_block(ir_graph *irg)
1689 ir_node *initial_X = get_irg_initial_exec(irg);
1690 ir_node *start_block = get_irg_start_block(irg);
1691 const ir_edge_t *edge;
1693 assert(is_Proj(initial_X));
1695 foreach_out_edge(initial_X, edge) {
1696 ir_node *block = get_edge_src_irn(edge);
1698 if (is_Anchor(block))
1700 if (block != start_block) {
1701 ir_node *jmp = new_r_Jmp(start_block);
1702 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1703 set_irg_initial_exec(irg, jmp);
1707 panic("Initial exec has no follow block in %+F", irg);
1711 * Update the entity of Sels to the outer value parameters.
1713 static void update_outer_frame_sels(ir_node *irn, void *env)
1715 lower_frame_sels_env_t *ctx = env;
1722 ptr = get_Sel_ptr(irn);
1723 if (! is_arg_Proj(ptr))
1725 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1727 ent = get_Sel_entity(irn);
1729 if (get_entity_owner(ent) == ctx->value_tp) {
1730 /* replace by its copy from the argument type */
1731 pos = get_struct_member_index(ctx->value_tp, ent);
1732 ent = get_argument_entity(ent, ctx);
1733 set_Sel_entity(irn, ent);
1735 /* check, if we have not seen this entity before */
1736 if (get_entity_link(ent) == NULL) {
1742 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1744 set_entity_link(ent, ctx->value_param_list);
1750 * Fix access to outer local variables.
1752 static void fix_outer_variable_access(be_abi_irg_t *env,
1753 lower_frame_sels_env_t *ctx)
1759 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1760 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1762 if (! is_method_entity(ent))
1765 irg = get_entity_irg(ent);
1770 * FIXME: find the number of the static link parameter
1771 * for now we assume 0 here
1773 ctx->static_link_pos = 0;
1775 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1780 * Modify the irg itself and the frame type.
1782 static void modify_irg(be_abi_irg_t *env)
1784 be_abi_call_t *call = env->call;
1785 const arch_env_t *arch_env= env->birg->main_env->arch_env;
1786 const arch_register_t *sp = arch_env->sp;
1787 ir_graph *irg = env->birg->irg;
1790 ir_node *new_mem_proj;
1792 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1793 struct obstack *obst = be_get_birg_obst(irg);
1798 unsigned frame_size;
1801 const arch_register_t *fp_reg;
1802 ir_node *frame_pointer;
1806 const ir_edge_t *edge;
1807 ir_type *arg_type, *bet_type, *tp;
1808 lower_frame_sels_env_t ctx;
1809 ir_entity **param_map;
1811 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1813 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1814 * memory, which leads to loops in the DAG. */
1815 old_mem = get_irg_initial_mem(irg);
1817 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1819 /* set the links of all frame entities to NULL, we use it
1820 to detect if an entity is already linked in the value_param_list */
1821 tp = get_method_value_param_type(method_type);
1824 /* clear the links of the clone type, let the
1825 original entities point to its clones */
1826 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1827 ir_entity *mem = get_struct_member(tp, i);
1828 set_entity_link(mem, NULL);
1832 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1834 /* Convert the Sel nodes in the irg to frame addr nodes: */
1835 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1836 ctx.frame = get_irg_frame(irg);
1837 ctx.sp_class = env->arch_env->sp->reg_class;
1838 ctx.link_class = env->arch_env->link_class;
1839 ctx.frame_tp = get_irg_frame_type(irg);
1841 /* layout the stackframe now */
1842 if (get_type_state(ctx.frame_tp) == layout_undefined) {
1843 default_layout_compound_type(ctx.frame_tp);
1846 /* we will possible add new entities to the frame: set the layout to undefined */
1847 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1848 set_type_state(ctx.frame_tp, layout_undefined);
1850 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1852 /* fix the frame type layout again */
1853 set_type_state(ctx.frame_tp, layout_fixed);
1854 /* align stackframe to 4 byte */
1855 frame_size = get_type_size_bytes(ctx.frame_tp);
1856 if (frame_size % 4 != 0) {
1857 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1860 env->regs = pmap_create();
1862 n_params = get_method_n_params(method_type);
1863 args = OALLOCNZ(obst, ir_node*, n_params);
1866 * for inner function we must now fix access to outer frame entities.
1868 fix_outer_variable_access(env, &ctx);
1870 /* Check if a value parameter is transmitted as a register.
1871 * This might happen if the address of an parameter is taken which is
1872 * transmitted in registers.
1874 * Note that on some architectures this case must be handled specially
1875 * because the place of the backing store is determined by their ABI.
1877 * In the default case we move the entity to the frame type and create
1878 * a backing store into the first block.
1880 fix_address_of_parameter_access(env, ctx.value_param_list);
1882 DEL_ARR_F(ctx.value_param_list);
1883 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1885 /* Fill the argument vector */
1886 arg_tuple = get_irg_args(irg);
1887 foreach_out_edge(arg_tuple, edge) {
1888 ir_node *irn = get_edge_src_irn(edge);
1889 if (! is_Anchor(irn)) {
1890 int nr = get_Proj_proj(irn);
1892 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1896 bet_type = call->cb->get_between_type(env->cb);
1897 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1899 /* Count the register params and add them to the number of Projs for the RegParams node */
1900 for (i = 0; i < n_params; ++i) {
1901 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1902 if (arg->in_reg && args[i]) {
1903 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1904 assert(i == get_Proj_proj(args[i]));
1906 /* For now, associate the register with the old Proj from Start representing that argument. */
1907 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1908 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1912 /* Collect all callee-save registers */
1913 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1914 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1915 for (j = 0; j < cls->n_regs; ++j) {
1916 const arch_register_t *reg = &cls->regs[j];
1917 if (arch_register_type_is(reg, callee_save) ||
1918 arch_register_type_is(reg, state)) {
1919 pmap_insert(env->regs, (void *) reg, NULL);
1924 /* handle start block here (place a jump in the block) */
1925 fix_start_block(irg);
1927 pmap_insert(env->regs, (void *) sp, NULL);
1928 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1929 start_bl = get_irg_start_block(irg);
1930 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1933 * make proj nodes for the callee save registers.
1934 * memorize them, since Return nodes get those as inputs.
1936 * Note, that if a register corresponds to an argument, the regs map contains
1937 * the old Proj from start for that argument.
1940 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1941 reg_map_to_arr(rm, env->regs);
1942 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1943 arch_register_t *reg = (void *) rm[i].reg;
1944 ir_mode *mode = reg->reg_class->mode;
1946 arch_register_req_type_t add_type = 0;
1950 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1953 proj = new_r_Proj(env->start, mode, nr + 1);
1954 pmap_insert(env->regs, (void *) reg, proj);
1955 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1956 arch_set_irn_register(proj, reg);
1958 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1961 /* create a new initial memory proj */
1962 assert(is_Proj(old_mem));
1963 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1964 new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1966 set_irg_initial_mem(irg, mem);
1968 /* Generate the Prologue */
1969 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
1971 /* do the stack allocation BEFORE the barrier, or spill code
1972 might be added before it */
1973 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1974 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1975 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1977 create_barrier(start_bl, &mem, env->regs, 0);
1979 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1980 arch_set_irn_register(env->init_sp, sp);
1982 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1983 set_irg_frame(irg, frame_pointer);
1984 pset_insert_ptr(env->ignore_regs, fp_reg);
1986 /* rewire old mem users to new mem */
1987 exchange(old_mem, mem);
1989 /* keep the mem (for functions with an endless loop = no return) */
1992 set_irg_initial_mem(irg, mem);
1994 /* Now, introduce stack param nodes for all parameters passed on the stack */
1995 for (i = 0; i < n_params; ++i) {
1996 ir_node *arg_proj = args[i];
1997 ir_node *repl = NULL;
1999 if (arg_proj != NULL) {
2000 be_abi_call_arg_t *arg;
2001 ir_type *param_type;
2002 int nr = get_Proj_proj(arg_proj);
2005 nr = MIN(nr, n_params);
2006 arg = get_call_arg(call, 0, nr, 1);
2007 param_type = get_method_param_type(method_type, nr);
2010 repl = pmap_get(env->regs, (void *) arg->reg);
2011 } else if (arg->on_stack) {
2012 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2014 /* For atomic parameters which are actually used, we create a Load node. */
2015 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2016 ir_mode *mode = get_type_mode(param_type);
2017 ir_mode *load_mode = arg->load_mode;
2019 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2020 repl = new_r_Proj(load, load_mode, pn_Load_res);
2022 if (mode != load_mode) {
2023 repl = new_r_Conv(start_bl, repl, mode);
2026 /* The stack parameter is not primitive (it is a struct or array),
2027 * we thus will create a node representing the parameter's address
2033 assert(repl != NULL);
2035 /* Beware: the mode of the register parameters is always the mode of the register class
2036 which may be wrong. Add Conv's then. */
2037 mode = get_irn_mode(args[i]);
2038 if (mode != get_irn_mode(repl)) {
2039 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2041 exchange(args[i], repl);
2045 /* the arg proj is not needed anymore now and should be only used by the anchor */
2046 assert(get_irn_n_edges(arg_tuple) == 1);
2047 kill_node(arg_tuple);
2048 set_irg_args(irg, new_r_Bad(irg));
2050 /* All Return nodes hang on the End node, so look for them there. */
2051 end = get_irg_end_block(irg);
2052 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2053 ir_node *irn = get_Block_cfgpred(end, i);
2055 if (is_Return(irn)) {
2056 ir_node *blk = get_nodes_block(irn);
2057 ir_node *mem = get_Return_mem(irn);
2058 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2063 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2064 the code is dead and will never be executed. */
2067 /** Fix the state inputs of calls that still hang on unknowns */
2068 static void fix_call_state_inputs(be_abi_irg_t *env)
2070 const arch_env_t *arch_env = env->arch_env;
2072 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2074 /* Collect caller save registers */
2075 n = arch_env_get_n_reg_class(arch_env);
2076 for (i = 0; i < n; ++i) {
2078 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2079 for (j = 0; j < cls->n_regs; ++j) {
2080 const arch_register_t *reg = arch_register_for_index(cls, j);
2081 if (arch_register_type_is(reg, state)) {
2082 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2087 n = ARR_LEN(env->calls);
2088 n_states = ARR_LEN(stateregs);
2089 for (i = 0; i < n; ++i) {
2091 ir_node *call = env->calls[i];
2093 arity = get_irn_arity(call);
2095 /* the state reg inputs are the last n inputs of the calls */
2096 for (s = 0; s < n_states; ++s) {
2097 int inp = arity - n_states + s;
2098 const arch_register_t *reg = stateregs[s];
2099 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2101 set_irn_n(call, inp, regnode);
2105 DEL_ARR_F(stateregs);
2109 * Create a trampoline entity for the given method.
2111 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2113 ir_type *type = get_entity_type(method);
2114 ident *old_id = get_entity_ld_ident(method);
2115 ident *id = id_mangle3("", old_id, "$stub");
2116 ir_type *parent = be->pic_trampolines_type;
2117 ir_entity *ent = new_entity(parent, old_id, type);
2118 set_entity_ld_ident(ent, id);
2119 set_entity_visibility(ent, ir_visibility_private);
2125 * Returns the trampoline entity for the given method.
2127 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2129 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2130 if (result == NULL) {
2131 result = create_trampoline(env, method);
2132 pmap_insert(env->ent_trampoline_map, method, result);
2138 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2140 ident *old_id = get_entity_ld_ident(entity);
2141 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
2142 ir_type *e_type = get_entity_type(entity);
2143 ir_type *type = new_type_pointer(e_type);
2144 ir_type *parent = be->pic_symbols_type;
2145 ir_entity *ent = new_entity(parent, old_id, type);
2146 set_entity_ld_ident(ent, id);
2147 set_entity_visibility(ent, ir_visibility_private);
2152 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2154 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2155 if (result == NULL) {
2156 result = create_pic_symbol(env, entity);
2157 pmap_insert(env->ent_pic_symbol_map, entity, result);
2166 * Returns non-zero if a given entity can be accessed using a relative address.
2168 static int can_address_relative(ir_entity *entity)
2170 return get_entity_visibility(entity) != ir_visibility_external
2171 && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
2174 /** patches SymConsts to work in position independent code */
2175 static void fix_pic_symconsts(ir_node *node, void *data)
2184 be_abi_irg_t *env = data;
2186 be_main_env_t *be = env->birg->main_env;
2188 arity = get_irn_arity(node);
2189 for (i = 0; i < arity; ++i) {
2191 ir_node *pred = get_irn_n(node, i);
2193 ir_entity *pic_symbol;
2194 ir_node *pic_symconst;
2196 if (!is_SymConst(pred))
2199 entity = get_SymConst_entity(pred);
2200 block = get_nodes_block(pred);
2201 irg = get_irn_irg(pred);
2203 /* calls can jump to relative addresses, so we can directly jump to
2204 the (relatively) known call address or the trampoline */
2205 if (i == 1 && is_Call(node)) {
2206 ir_entity *trampoline;
2207 ir_node *trampoline_const;
2209 if (can_address_relative(entity))
2212 dbgi = get_irn_dbg_info(pred);
2213 trampoline = get_trampoline(be, entity);
2214 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2216 set_irn_n(node, i, trampoline_const);
2220 /* everything else is accessed relative to EIP */
2221 mode = get_irn_mode(pred);
2222 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2224 /* all ok now for locally constructed stuff */
2225 if (can_address_relative(entity)) {
2226 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2228 /* make sure the walker doesn't visit this add again */
2229 mark_irn_visited(add);
2230 set_irn_n(node, i, add);
2234 /* get entry from pic symbol segment */
2235 dbgi = get_irn_dbg_info(pred);
2236 pic_symbol = get_pic_symbol(be, entity);
2237 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2239 add = new_r_Add(block, pic_base, pic_symconst, mode);
2240 mark_irn_visited(add);
2242 /* we need an extra indirection for global data outside our current
2243 module. The loads are always safe and can therefore float
2244 and need no memory input */
2245 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2246 load_res = new_r_Proj(load, mode, pn_Load_res);
2248 set_irn_n(node, i, load_res);
2252 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2254 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2255 ir_node *old_frame = get_irg_frame(birg->irg);
2256 ir_graph *irg = birg->irg;
2257 struct obstack *obst = be_get_birg_obst(irg);
2261 unsigned *limited_bitset;
2262 arch_register_req_t *sp_req;
2264 be_omit_fp = birg->main_env->options->omit_fp;
2265 be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
2269 env->arch_env = birg->main_env->arch_env;
2270 env->method_type = get_entity_type(get_irg_entity(irg));
2271 env->call = be_abi_call_new(env->arch_env->sp->reg_class);
2272 arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
2274 env->ignore_regs = pset_new_ptr_default();
2275 env->keep_map = pmap_create();
2276 env->dce_survivor = new_survive_dce();
2280 sp_req = OALLOCZ(obst, arch_register_req_t);
2281 env->sp_req = sp_req;
2283 sp_req->type = arch_register_req_type_limited
2284 | arch_register_req_type_produces_sp;
2285 sp_req->cls = arch_register_get_class(env->arch_env->sp);
2287 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2288 rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
2289 sp_req->limited = limited_bitset;
2290 if (env->arch_env->sp->type & arch_register_type_ignore) {
2291 sp_req->type |= arch_register_req_type_ignore;
2294 env->init_sp = dummy = new_r_Dummy(irg, env->arch_env->sp->reg_class->mode);
2296 env->calls = NEW_ARR_F(ir_node*, 0);
2298 if (birg->main_env->options->pic) {
2299 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2302 /* Lower all call nodes in the IRG. */
2306 Beware: init backend abi call object after processing calls,
2307 otherwise some information might be not yet available.
2309 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2311 /* Process the IRG */
2314 /* fix call inputs for state registers */
2315 fix_call_state_inputs(env);
2317 /* We don't need the keep map anymore. */
2318 pmap_destroy(env->keep_map);
2319 env->keep_map = NULL;
2321 /* calls array is not needed anymore */
2322 DEL_ARR_F(env->calls);
2325 /* reroute the stack origin of the calls to the true stack origin. */
2326 exchange(dummy, env->init_sp);
2327 exchange(old_frame, get_irg_frame(irg));
2329 /* Make some important node pointers survive the dead node elimination. */
2330 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2331 foreach_pmap(env->regs, ent) {
2332 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2335 env->call->cb->done(env->cb);
2340 void be_abi_free(be_abi_irg_t *env)
2342 be_abi_call_free(env->call);
2343 free_survive_dce(env->dce_survivor);
2344 del_pset(env->ignore_regs);
2345 pmap_destroy(env->regs);
2349 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2351 arch_register_t *reg;
2353 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2354 if (reg->reg_class == cls)
2355 bitset_set(bs, reg->index);
2358 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2361 arch_register_t *reg;
2363 for (i = 0; i < cls->n_regs; ++i) {
2364 if (arch_register_type_is(&cls->regs[i], ignore))
2367 rbitset_set(raw_bitset, i);
2370 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2371 reg = pset_next(abi->ignore_regs)) {
2372 if (reg->reg_class != cls)
2375 rbitset_clear(raw_bitset, reg->index);
2379 /* Returns the stack layout from a abi environment. */
2380 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2388 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2389 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2390 | _| | |> < ___) | || (_| | (__| <
2391 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2395 typedef ir_node **node_array;
2397 typedef struct fix_stack_walker_env_t {
2398 node_array sp_nodes;
2399 } fix_stack_walker_env_t;
2402 * Walker. Collect all stack modifying nodes.
2404 static void collect_stack_nodes_walker(ir_node *node, void *data)
2406 ir_node *insn = node;
2407 fix_stack_walker_env_t *env = data;
2408 const arch_register_req_t *req;
2410 if (is_Proj(node)) {
2411 insn = get_Proj_pred(node);
2414 if (arch_irn_get_n_outs(insn) == 0)
2417 req = arch_get_register_req_out(node);
2418 if (! (req->type & arch_register_req_type_produces_sp))
2421 ARR_APP1(ir_node*, env->sp_nodes, node);
2424 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2426 be_ssa_construction_env_t senv;
2429 be_irg_t *birg = env->birg;
2430 be_lv_t *lv = be_get_birg_liveness(birg);
2431 fix_stack_walker_env_t walker_env;
2433 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2435 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2437 /* nothing to be done if we didn't find any node, in fact we mustn't
2438 * continue, as for endless loops incsp might have had no users and is bad
2441 len = ARR_LEN(walker_env.sp_nodes);
2443 DEL_ARR_F(walker_env.sp_nodes);
2447 be_ssa_construction_init(&senv, birg);
2448 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2449 ARR_LEN(walker_env.sp_nodes));
2450 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2451 ARR_LEN(walker_env.sp_nodes));
2454 len = ARR_LEN(walker_env.sp_nodes);
2455 for (i = 0; i < len; ++i) {
2456 be_liveness_update(lv, walker_env.sp_nodes[i]);
2458 be_ssa_construction_update_liveness_phis(&senv, lv);
2461 phis = be_ssa_construction_get_new_phis(&senv);
2463 /* set register requirements for stack phis */
2464 len = ARR_LEN(phis);
2465 for (i = 0; i < len; ++i) {
2466 ir_node *phi = phis[i];
2467 be_set_phi_reg_req(phi, env->sp_req);
2468 arch_set_irn_register(phi, env->arch_env->sp);
2470 be_ssa_construction_destroy(&senv);
2472 DEL_ARR_F(walker_env.sp_nodes);
2476 * Fix all stack accessing operations in the block bl.
2478 * @param env the abi environment
2479 * @param bl the block to process
2480 * @param real_bias the bias value
2482 * @return the bias at the end of this block
2484 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2486 int omit_fp = env->call->flags.bits.try_omit_fp;
2488 int wanted_bias = real_bias;
2490 sched_foreach(bl, irn) {
2494 Check, if the node relates to an entity on the stack frame.
2495 If so, set the true offset (including the bias) for that
2498 ir_entity *ent = arch_get_frame_entity(irn);
2500 int bias = omit_fp ? real_bias : 0;
2501 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2502 arch_set_frame_offset(irn, offset);
2503 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2504 ent, offset, bias));
2508 * If the node modifies the stack pointer by a constant offset,
2509 * record that in the bias.
2511 ofs = arch_get_sp_bias(irn);
2513 if (be_is_IncSP(irn)) {
2514 /* fill in real stack frame size */
2515 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2516 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2517 ofs = (int) get_type_size_bytes(frame_type);
2518 be_set_IncSP_offset(irn, ofs);
2519 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2520 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2521 ofs = - (int)get_type_size_bytes(frame_type);
2522 be_set_IncSP_offset(irn, ofs);
2524 if (be_get_IncSP_align(irn)) {
2525 /* patch IncSP to produce an aligned stack pointer */
2526 ir_type *between_type = env->frame.between_type;
2527 int between_size = get_type_size_bytes(between_type);
2528 int alignment = 1 << env->arch_env->stack_alignment;
2529 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2532 be_set_IncSP_offset(irn, ofs + alignment - delta);
2533 real_bias += alignment - delta;
2536 /* adjust so real_bias corresponds with wanted_bias */
2537 int delta = wanted_bias - real_bias;
2540 be_set_IncSP_offset(irn, ofs + delta);
2551 assert(real_bias == wanted_bias);
2556 * A helper struct for the bias walker.
2559 be_abi_irg_t *env; /**< The ABI irg environment. */
2560 int start_block_bias; /**< The bias at the end of the start block. */
2562 ir_node *start_block; /**< The start block of the current graph. */
2566 * Block-Walker: fix all stack offsets for all blocks
2567 * except the start block
2569 static void stack_bias_walker(ir_node *bl, void *data)
2571 struct bias_walk *bw = data;
2572 if (bl != bw->start_block) {
2573 process_stack_bias(bw->env, bl, bw->start_block_bias);
2578 * Walker: finally lower all Sels of outer frame or parameter
2581 static void lower_outer_frame_sels(ir_node *sel, void *ctx)
2583 be_abi_irg_t *env = ctx;
2591 ent = get_Sel_entity(sel);
2592 owner = get_entity_owner(ent);
2593 ptr = get_Sel_ptr(sel);
2595 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2596 /* found access to outer frame or arguments */
2597 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2600 ir_node *bl = get_nodes_block(sel);
2601 dbg_info *dbgi = get_irn_dbg_info(sel);
2602 ir_mode *mode = get_irn_mode(sel);
2603 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2604 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2606 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2612 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2614 ir_graph *irg = env->birg->irg;
2617 struct bias_walk bw;
2619 stack_frame_compute_initial_offset(&env->frame);
2620 // stack_layout_dump(stdout, frame);
2622 /* Determine the stack bias at the end of the start block. */
2623 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2624 bw.between_size = get_type_size_bytes(env->frame.between_type);
2626 /* fix the bias is all other blocks */
2628 bw.start_block = get_irg_start_block(irg);
2629 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2631 /* fix now inner functions: these still have Sel node to outer
2632 frame and parameter entities */
2633 frame_tp = get_irg_frame_type(irg);
2634 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2635 ir_entity *ent = get_class_member(frame_tp, i);
2636 ir_graph *irg = get_entity_irg(ent);
2639 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2644 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2646 assert(arch_register_type_is(reg, callee_save));
2647 assert(pmap_contains(abi->regs, (void *) reg));
2648 return pmap_get(abi->regs, (void *) reg);
2651 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2653 assert(arch_register_type_is(reg, ignore));
2654 assert(pmap_contains(abi->regs, (void *) reg));
2655 return pmap_get(abi->regs, (void *) reg);
2659 * Returns non-zero if the ABI has omitted the frame pointer in
2660 * the current graph.
2662 int be_abi_omit_fp(const be_abi_irg_t *abi)
2664 return abi->call->flags.bits.try_omit_fp;
2667 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
2668 void be_init_abi(void)
2670 FIRM_DBG_REGISTER(dbg, "firm.be.abi");