2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
32 #include "irgraph_t.h"
35 #include "iredges_t.h"
38 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 typedef struct _be_abi_call_arg_t {
62 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
64 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
65 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct _be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
80 ir_type *between_type;
82 const arch_register_class_t *cls_addr; /**< register class of the call address */
86 * The ABI information for the current graph.
88 struct _be_abi_irg_t {
90 const arch_env_t *arch_env;
91 survive_dce_t *dce_survivor;
93 be_abi_call_t *call; /**< The ABI call information. */
94 ir_type *method_type; /**< The type of the method of the IRG. */
96 ir_node *init_sp; /**< The node representing the stack pointer
97 at the start of the function. */
99 ir_node *start; /**< The be_Start params node. */
100 pmap *regs; /**< A map of all callee-save and ignore regs to
101 their Projs to the RegParams node. */
103 int start_block_bias; /**< The stack bias at the end of the start block. */
105 void *cb; /**< ABI Callback self pointer. */
107 pmap *keep_map; /**< mapping blocks to keep nodes. */
108 pset *ignore_regs; /**< Additional registers which shall be ignored. */
110 ir_node **calls; /**< flexible array containing all be_Call nodes */
112 arch_register_req_t *sp_req;
114 be_stack_layout_t frame; /**< The stack frame model. */
117 static heights_t *ir_heights;
119 /** Flag: if set, try to omit the frame pointer in all routines. */
120 static int be_omit_fp = 1;
122 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
123 static int be_omit_leaf_fp = 1;
126 _ ____ ___ ____ _ _ _ _
127 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
128 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
129 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
130 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
132 These callbacks are used by the backend to set the parameters
133 for a specific call type.
137 * Set compare function: compares two ABI call object arguments.
139 static int cmp_call_arg(const void *a, const void *b, size_t n)
141 const be_abi_call_arg_t *p = a, *q = b;
143 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
147 * Get an ABI call object argument.
149 * @param call the abi call
150 * @param is_res true for call results, false for call arguments
151 * @param pos position of the argument
152 * @param callee context type - if we are callee or caller
154 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
156 be_abi_call_arg_t arg;
159 memset(&arg, 0, sizeof(arg));
164 hash = is_res * 128 + pos;
166 return set_find(call->params, &arg, sizeof(arg), hash);
170 * Set an ABI call object argument.
172 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
174 unsigned hash = arg->is_res * 128 + arg->pos;
175 if (context & ABI_CONTEXT_CALLEE) {
177 set_insert(call->params, arg, sizeof(*arg), hash);
179 if (context & ABI_CONTEXT_CALLER) {
181 set_insert(call->params, arg, sizeof(*arg), hash);
185 /* Set the flags for a call. */
186 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
192 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
193 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
199 /* Set register class for call address */
200 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
202 call->cls_addr = cls;
206 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
207 ir_mode *load_mode, unsigned alignment,
208 unsigned space_before, unsigned space_after,
209 be_abi_context_t context)
211 be_abi_call_arg_t arg;
212 memset(&arg, 0, sizeof(arg));
213 assert(alignment > 0 && "Alignment must be greater than 0");
215 arg.load_mode = load_mode;
216 arg.alignment = alignment;
217 arg.space_before = space_before;
218 arg.space_after = space_after;
222 remember_call_arg(&arg, call, context);
225 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
227 be_abi_call_arg_t arg;
228 memset(&arg, 0, sizeof(arg));
235 remember_call_arg(&arg, call, context);
238 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
240 be_abi_call_arg_t arg;
241 memset(&arg, 0, sizeof(arg));
248 remember_call_arg(&arg, call, context);
251 /* Get the flags of a ABI call object. */
252 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
258 * Constructor for a new ABI call object.
260 * @param cls_addr register class of the call address
262 * @return the new ABI call object
264 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
266 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
269 call->params = new_set(cmp_call_arg, 16);
271 call->cls_addr = cls_addr;
273 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
279 * Destructor for an ABI call object.
281 static void be_abi_call_free(be_abi_call_t *call)
283 del_set(call->params);
289 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
290 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
291 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
292 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
295 Handling of the stack frame. It is composed of three types:
296 1) The type of the arguments which are pushed on the stack.
297 2) The "between type" which consists of stuff the call of the
298 function pushes on the stack (like the return address and
299 the old base pointer for ia32).
300 3) The Firm frame type which consists of all local variables
304 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
307 ir_type *t = get_entity_owner(ent);
308 int ofs = get_entity_offset(ent);
312 /* Find the type the entity is contained in. */
313 for (index = 0; index < N_FRAME_TYPES; ++index) {
314 if (frame->order[index] == t)
316 /* Add the size of all the types below the one of the entity to the entity's offset */
317 ofs += get_type_size_bytes(frame->order[index]);
320 /* correct the offset by the initial position of the frame pointer */
321 ofs -= frame->initial_offset;
323 /* correct the offset with the current bias. */
330 * Retrieve the entity with given offset from a frame type.
332 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
336 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
337 ir_entity *ent = get_compound_member(t, i);
338 if (get_entity_offset(ent) == offset)
345 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
347 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
348 ir_entity *ent = search_ent_with_offset(base, 0);
351 frame->initial_offset
352 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
354 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
357 return frame->initial_offset;
361 * Initializes the frame layout from parts
363 * @param frame the stack layout that will be initialized
364 * @param args the stack argument layout type
365 * @param between the between layout type
366 * @param locals the method frame type
367 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
368 * @param param_map an array mapping method argument positions to the stack argument type
370 * @return the initialized stack layout
372 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
373 ir_type *between, ir_type *locals, int stack_dir,
374 ir_entity *param_map[])
376 frame->arg_type = args;
377 frame->between_type = between;
378 frame->frame_type = locals;
379 frame->initial_offset = 0;
380 frame->initial_bias = 0;
381 frame->stack_dir = stack_dir;
382 frame->order[1] = between;
383 frame->param_map = param_map;
386 frame->order[0] = args;
387 frame->order[2] = locals;
390 /* typical decreasing stack: locals have the
391 * lowest addresses, arguments the highest */
392 frame->order[0] = locals;
393 frame->order[2] = args;
405 Adjustment of the calls inside a graph.
410 * Transform a call node into a be_Call node.
412 * @param env The ABI environment for the current irg.
413 * @param irn The call node.
414 * @param curr_sp The stack pointer node to use.
415 * @return The stack pointer after the call.
417 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
419 ir_graph *irg = env->irg;
420 const arch_env_t *arch_env = env->arch_env;
421 ir_type *call_tp = get_Call_type(irn);
422 ir_node *call_ptr = get_Call_ptr(irn);
423 int n_params = get_method_n_params(call_tp);
424 ir_node *curr_mem = get_Call_mem(irn);
425 ir_node *bl = get_nodes_block(irn);
427 int stack_dir = arch_env->stack_dir;
428 const arch_register_t *sp = arch_env->sp;
429 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
430 ir_mode *mach_mode = sp->reg_class->mode;
431 struct obstack *obst = be_get_be_obst(irg);
432 int no_alloc = call->flags.bits.frame_is_setup_on_call;
433 int n_res = get_method_n_ress(call_tp);
434 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
436 ir_node *res_proj = NULL;
437 int n_reg_params = 0;
438 int n_stack_params = 0;
441 pset_new_t destroyed_regs, states;
442 pset_new_iterator_t iter;
446 int n_reg_results = 0;
447 const arch_register_t *reg;
448 const ir_edge_t *edge;
450 int *stack_param_idx;
451 int i, n, destroy_all_regs;
454 pset_new_init(&destroyed_regs);
455 pset_new_init(&states);
457 /* Let the isa fill out the abi description for that call node. */
458 arch_env_get_call_abi(arch_env, call_tp, call);
460 /* Insert code to put the stack arguments on the stack. */
461 assert(get_Call_n_params(irn) == n_params);
462 assert(obstack_object_size(obst) == 0);
463 stack_param_idx = ALLOCAN(int, n_params);
464 for (i = 0; i < n_params; ++i) {
465 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
468 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
470 stack_size += round_up2(arg->space_before, arg->alignment);
471 stack_size += round_up2(arg_size, arg->alignment);
472 stack_size += round_up2(arg->space_after, arg->alignment);
474 stack_param_idx[n_stack_params++] = i;
478 /* Collect all arguments which are passed in registers. */
479 reg_param_idxs = ALLOCAN(int, n_params);
480 for (i = 0; i < n_params; ++i) {
481 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
482 if (arg && arg->in_reg) {
483 reg_param_idxs[n_reg_params++] = i;
488 * If the stack is decreasing and we do not want to store sequentially,
489 * or someone else allocated the call frame
490 * we allocate as much space on the stack all parameters need, by
491 * moving the stack pointer along the stack's direction.
493 * Note: we also have to do this for stack_size == 0, because we may have
494 * to adjust stack alignment for the call.
496 if (stack_dir < 0 && !do_seq && !no_alloc) {
497 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
500 dbgi = get_irn_dbg_info(irn);
501 /* If there are some parameters which shall be passed on the stack. */
502 if (n_stack_params > 0) {
504 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
508 * Reverse list of stack parameters if call arguments are from left to right.
509 * We must them reverse again if they are pushed (not stored) and the stack
510 * direction is downwards.
512 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
513 for (i = 0; i < n_stack_params >> 1; ++i) {
514 int other = n_stack_params - i - 1;
515 int tmp = stack_param_idx[i];
516 stack_param_idx[i] = stack_param_idx[other];
517 stack_param_idx[other] = tmp;
521 curr_mem = get_Call_mem(irn);
523 in[n_in++] = curr_mem;
526 for (i = 0; i < n_stack_params; ++i) {
527 int p = stack_param_idx[i];
528 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
529 ir_node *param = get_Call_param(irn, p);
530 ir_node *addr = curr_sp;
532 ir_type *param_type = get_method_param_type(call_tp, p);
533 int param_size = get_type_size_bytes(param_type) + arg->space_after;
536 * If we wanted to build the arguments sequentially,
537 * the stack pointer for the next must be incremented,
538 * and the memory value propagated.
542 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
543 param_size + arg->space_before, 0);
544 add_irn_dep(curr_sp, curr_mem);
546 curr_ofs += arg->space_before;
547 curr_ofs = round_up2(curr_ofs, arg->alignment);
549 /* Make the expression to compute the argument's offset. */
551 ir_mode *constmode = mach_mode;
552 if (mode_is_reference(mach_mode)) {
555 addr = new_r_Const_long(irg, constmode, curr_ofs);
556 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
560 /* Insert a store for primitive arguments. */
561 if (is_atomic_type(param_type)) {
563 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
564 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
565 mem = new_r_Proj(store, mode_M, pn_Store_M);
567 /* Make a mem copy for compound arguments. */
570 assert(mode_is_reference(get_irn_mode(param)));
571 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
572 mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular);
575 curr_ofs += param_size;
583 /* We need the sync only, if we didn't build the stores sequentially. */
585 if (n_stack_params >= 1) {
586 curr_mem = new_r_Sync(bl, n_in, in);
588 curr_mem = get_Call_mem(irn);
593 /* check for the return_twice property */
594 destroy_all_regs = 0;
595 if (is_SymConst_addr_ent(call_ptr)) {
596 ir_entity *ent = get_SymConst_entity(call_ptr);
598 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
599 destroy_all_regs = 1;
601 ir_type *call_tp = get_Call_type(irn);
603 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
604 destroy_all_regs = 1;
607 /* Put caller save into the destroyed set and state registers in the states set */
608 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
610 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
611 for (j = 0; j < cls->n_regs; ++j) {
612 const arch_register_t *reg = arch_register_for_index(cls, j);
614 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
615 if (! arch_register_type_is(reg, ignore))
616 pset_new_insert(&destroyed_regs, (void *) reg);
618 if (arch_register_type_is(reg, state)) {
619 pset_new_insert(&destroyed_regs, (void*) reg);
620 pset_new_insert(&states, (void*) reg);
625 if (destroy_all_regs) {
626 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
627 pset_new_remove(&destroyed_regs, arch_env->sp);
628 pset_new_remove(&destroyed_regs, arch_env->bp);
631 /* search the largest result proj number */
632 res_projs = ALLOCANZ(ir_node*, n_res);
634 foreach_out_edge(irn, edge) {
635 const ir_edge_t *res_edge;
636 ir_node *irn = get_edge_src_irn(edge);
638 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
641 foreach_out_edge(irn, res_edge) {
643 ir_node *res = get_edge_src_irn(res_edge);
645 assert(is_Proj(res));
647 proj = get_Proj_proj(res);
648 assert(proj < n_res);
649 assert(res_projs[proj] == NULL);
650 res_projs[proj] = res;
656 /** TODO: this is not correct for cases where return values are passed
657 * on the stack, but no known ABI does this currently...
659 n_reg_results = n_res;
661 assert(obstack_object_size(obst) == 0);
663 in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
665 /* make the back end call node and set its register requirements. */
666 for (i = 0; i < n_reg_params; ++i) {
667 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
670 /* add state registers ins */
671 foreach_pset_new(&states, reg, iter) {
672 const arch_register_class_t *cls = arch_register_get_class(reg);
674 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
675 ir_fprintf(stderr, "Adding %+F\n", regnode);
677 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
678 in[n_ins++] = regnode;
680 assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
682 /* ins collected, build the call */
683 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
685 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
686 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
687 n_ins, in, get_Call_type(irn));
688 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
691 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
692 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
693 n_ins, in, get_Call_type(irn));
695 be_Call_set_pop(low_call, call->pop);
697 /* put the call into the list of all calls for later processing */
698 ARR_APP1(ir_node *, env->calls, low_call);
700 /* create new stack pointer */
701 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
702 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
703 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
704 arch_set_irn_register(curr_sp, sp);
706 /* now handle results */
707 for (i = 0; i < n_res; ++i) {
709 ir_node *proj = res_projs[i];
710 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
712 /* returns values on stack not supported yet */
716 shift the proj number to the right, since we will drop the
717 unspeakable Proj_T from the Call. Therefore, all real argument
718 Proj numbers must be increased by pn_be_Call_first_res
720 pn = i + pn_be_Call_first_res;
723 ir_type *res_type = get_method_res_type(call_tp, i);
724 ir_mode *mode = get_type_mode(res_type);
725 proj = new_r_Proj(low_call, mode, pn);
728 set_Proj_pred(proj, low_call);
729 set_Proj_proj(proj, pn);
733 pset_new_remove(&destroyed_regs, arg->reg);
738 Set the register class of the call address to
739 the backend provided class (default: stack pointer class)
741 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
743 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
745 /* Set the register classes and constraints of the Call parameters. */
746 for (i = 0; i < n_reg_params; ++i) {
747 int index = reg_param_idxs[i];
748 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
749 assert(arg->reg != NULL);
751 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
755 /* Set the register constraints of the results. */
756 for (i = 0; i < n_res; ++i) {
757 ir_node *proj = res_projs[i];
758 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
759 int pn = get_Proj_proj(proj);
762 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
763 arch_set_irn_register(proj, arg->reg);
765 exchange(irn, low_call);
767 /* kill the ProjT node */
768 if (res_proj != NULL) {
772 /* Make additional projs for the caller save registers
773 and the Keep node which keeps them alive. */
775 const arch_register_t *reg;
779 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
780 pset_new_iterator_t iter;
783 n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
784 in = ALLOCAN(ir_node *, n_ins);
786 /* also keep the stack pointer */
787 set_irn_link(curr_sp, (void*) sp);
790 foreach_pset_new(&destroyed_regs, reg, iter) {
791 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
793 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
794 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
795 arch_set_irn_register(proj, reg);
797 set_irn_link(proj, (void*) reg);
802 for (i = 0; i < n_reg_results; ++i) {
803 ir_node *proj = res_projs[i];
804 const arch_register_t *reg = arch_get_irn_register(proj);
805 set_irn_link(proj, (void*) reg);
810 /* create the Keep for the caller save registers */
811 keep = be_new_Keep(bl, n, in);
812 for (i = 0; i < n; ++i) {
813 const arch_register_t *reg = get_irn_link(in[i]);
814 be_node_set_reg_class_in(keep, i, reg->reg_class);
818 /* Clean up the stack. */
819 assert(stack_size >= call->pop);
820 stack_size -= call->pop;
822 if (stack_size > 0) {
823 ir_node *mem_proj = NULL;
825 foreach_out_edge(low_call, edge) {
826 ir_node *irn = get_edge_src_irn(edge);
827 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
834 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
835 keep_alive(mem_proj);
838 /* Clean up the stack frame or revert alignment fixes if we allocated it */
840 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
843 be_abi_call_free(call);
845 pset_new_destroy(&states);
846 pset_new_destroy(&destroyed_regs);
852 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
854 * @param alignment the minimum stack alignment
855 * @param size the node containing the non-aligned size
856 * @param block the block where new nodes are allocated on
857 * @param dbg debug info for new nodes
859 * @return a node representing the aligned size
861 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
862 ir_node *block, dbg_info *dbg)
864 if (stack_alignment > 1) {
870 assert(is_po2(stack_alignment));
872 mode = get_irn_mode(size);
873 tv = new_tarval_from_long(stack_alignment-1, mode);
874 irg = get_Block_irg(block);
875 mask = new_r_Const(irg, tv);
876 size = new_rd_Add(dbg, block, size, mask, mode);
878 tv = new_tarval_from_long(-(long)stack_alignment, mode);
879 mask = new_r_Const(irg, tv);
880 size = new_rd_And(dbg, block, size, mask, mode);
886 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
888 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
897 const ir_edge_t *edge;
902 unsigned stack_alignment;
904 assert(get_Alloc_where(alloc) == stack_alloc);
906 block = get_nodes_block(alloc);
907 irg = get_Block_irg(block);
910 type = get_Alloc_type(alloc);
912 foreach_out_edge(alloc, edge) {
913 ir_node *irn = get_edge_src_irn(edge);
915 assert(is_Proj(irn));
916 switch (get_Proj_proj(irn)) {
928 /* Beware: currently Alloc nodes without a result might happen,
929 only escape analysis kills them and this phase runs only for object
930 oriented source. We kill the Alloc here. */
931 if (alloc_res == NULL && alloc_mem) {
932 exchange(alloc_mem, get_Alloc_mem(alloc));
936 dbg = get_irn_dbg_info(alloc);
937 count = get_Alloc_count(alloc);
939 /* we might need to multiply the count with the element size */
940 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
941 ir_mode *mode = get_irn_mode(count);
942 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
944 ir_node *cnst = new_rd_Const(dbg, irg, tv);
945 size = new_rd_Mul(dbg, block, count, cnst, mode);
950 /* The stack pointer will be modified in an unknown manner.
951 We cannot omit it. */
952 env->call->flags.bits.try_omit_fp = 0;
954 stack_alignment = 1 << env->arch_env->stack_alignment;
955 size = adjust_alloc_size(stack_alignment, size, block, dbg);
956 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
957 set_irn_dbg_info(new_alloc, dbg);
959 if (alloc_mem != NULL) {
963 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
965 /* We need to sync the output mem of the AddSP with the input mem
966 edge into the alloc node. */
967 ins[0] = get_Alloc_mem(alloc);
969 sync = new_r_Sync(block, 2, ins);
971 exchange(alloc_mem, sync);
974 exchange(alloc, new_alloc);
976 /* fix projnum of alloca res */
977 set_Proj_proj(alloc_res, pn_be_AddSP_res);
979 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
986 * The Free is transformed into a back end free node and connected to the stack nodes.
988 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
992 ir_node *subsp, *mem, *res, *size, *sync;
996 unsigned stack_alignment;
999 assert(get_Free_where(free) == stack_alloc);
1001 block = get_nodes_block(free);
1002 irg = get_irn_irg(block);
1003 type = get_Free_type(free);
1004 sp_mode = env->arch_env->sp->reg_class->mode;
1005 dbg = get_irn_dbg_info(free);
1007 /* we might need to multiply the size with the element size */
1008 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1009 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1010 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1011 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1015 size = get_Free_size(free);
1018 stack_alignment = 1 << env->arch_env->stack_alignment;
1019 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1021 /* The stack pointer will be modified in an unknown manner.
1022 We cannot omit it. */
1023 env->call->flags.bits.try_omit_fp = 0;
1024 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1025 set_irn_dbg_info(subsp, dbg);
1027 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
1028 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
1030 /* we need to sync the memory */
1031 in[0] = get_Free_mem(free);
1033 sync = new_r_Sync(block, 2, in);
1035 /* and make the AddSP dependent on the former memory */
1036 add_irn_dep(subsp, get_Free_mem(free));
1039 exchange(free, sync);
1046 * Check if a node is somehow data dependent on another one.
1047 * both nodes must be in the same basic block.
1048 * @param n1 The first node.
1049 * @param n2 The second node.
1050 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1052 static int dependent_on(ir_node *n1, ir_node *n2)
1054 assert(get_nodes_block(n1) == get_nodes_block(n2));
1056 return heights_reachable_in_block(ir_heights, n1, n2);
1059 static int cmp_call_dependency(const void *c1, const void *c2)
1061 ir_node *n1 = *(ir_node **) c1;
1062 ir_node *n2 = *(ir_node **) c2;
1065 Classical qsort() comparison function behavior:
1066 0 if both elements are equal
1067 1 if second is "smaller" that first
1068 -1 if first is "smaller" that second
1070 if (dependent_on(n1, n2))
1073 if (dependent_on(n2, n1))
1076 /* The nodes have no depth order, but we need a total order because qsort()
1078 return get_irn_idx(n1) - get_irn_idx(n2);
1082 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1083 * Clears the irg_is_leaf flag if a Call is detected.
1085 static void link_ops_in_block_walker(ir_node *irn, void *data)
1087 be_abi_irg_t *env = data;
1088 ir_opcode code = get_irn_opcode(irn);
1090 if (code == iro_Call ||
1091 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1092 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1093 ir_node *bl = get_nodes_block(irn);
1094 void *save = get_irn_link(bl);
1096 if (code == iro_Call)
1097 env->call->flags.bits.irg_is_leaf = 0;
1099 set_irn_link(irn, save);
1100 set_irn_link(bl, irn);
1103 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1104 ir_node *param = get_Builtin_param(irn, 0);
1105 tarval *tv = get_Const_tarval(param);
1106 unsigned long value = get_tarval_long(tv);
1107 /* use ebp, so the climbframe algo works... */
1109 env->call->flags.bits.try_omit_fp = 0;
1116 * Process all Call/Alloc/Free nodes inside a basic block.
1117 * Note that the link field of the block must contain a linked list of all
1118 * Call nodes inside the Block. We first order this list according to data dependency
1119 * and that connect the calls together.
1121 static void process_ops_in_block(ir_node *bl, void *data)
1123 be_abi_irg_t *env = data;
1124 ir_node *curr_sp = env->init_sp;
1131 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1135 nodes = ALLOCAN(ir_node*, n_nodes);
1136 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1140 /* If there were call nodes in the block. */
1145 /* order the call nodes according to data dependency */
1146 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1148 for (i = n_nodes - 1; i >= 0; --i) {
1149 ir_node *irn = nodes[i];
1151 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1152 switch (get_irn_opcode(irn)) {
1155 /* The stack pointer will be modified due to a call. */
1156 env->call->flags.bits.try_omit_fp = 0;
1158 curr_sp = adjust_call(env, irn, curr_sp);
1161 if (get_Alloc_where(irn) == stack_alloc)
1162 curr_sp = adjust_alloc(env, irn, curr_sp);
1165 if (get_Free_where(irn) == stack_alloc)
1166 curr_sp = adjust_free(env, irn, curr_sp);
1169 panic("invalid call");
1173 /* Keep the last stack state in the block by tying it to Keep node,
1174 * the proj from calls is already kept */
1175 if (curr_sp != env->init_sp &&
1176 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1178 keep = be_new_Keep(bl, 1, nodes);
1179 pmap_insert(env->keep_map, bl, keep);
1183 set_irn_link(bl, curr_sp);
1187 * Adjust all call nodes in the graph to the ABI conventions.
1189 static void process_calls(be_abi_irg_t *env)
1191 ir_graph *irg = env->irg;
1193 env->call->flags.bits.irg_is_leaf = 1;
1194 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1196 ir_heights = heights_new(env->irg);
1197 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1198 heights_free(ir_heights);
1202 * Computes the stack argument layout type.
1203 * Changes a possibly allocated value param type by moving
1204 * entities to the stack layout type.
1206 * @param env the ABI environment
1207 * @param call the current call ABI
1208 * @param method_type the method type
1209 * @param val_param_tp the value parameter type, will be destroyed
1210 * @param param_map an array mapping method arguments to the stack layout type
1212 * @return the stack argument layout type
1214 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1215 ir_type *method_type, ir_type *val_param_tp,
1216 ir_entity ***param_map)
1218 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1219 int inc = env->arch_env->stack_dir * dir;
1220 int n = get_method_n_params(method_type);
1221 int curr = inc > 0 ? 0 : n - 1;
1222 struct obstack *obst = be_get_be_obst(env->irg);
1228 ident *id = get_entity_ident(get_irg_entity(env->irg));
1231 *param_map = map = OALLOCN(obst, ir_entity*, n);
1232 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1233 for (i = 0; i < n; ++i, curr += inc) {
1234 ir_type *param_type = get_method_param_type(method_type, curr);
1235 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
1238 if (arg->on_stack) {
1239 if (val_param_tp != NULL) {
1240 /* the entity was already created, create a copy in the param type */
1241 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1242 arg->stack_ent = copy_entity_own(val_ent, res);
1243 set_entity_link(val_ent, arg->stack_ent);
1244 set_entity_link(arg->stack_ent, NULL);
1246 /* create a new entity */
1247 snprintf(buf, sizeof(buf), "param_%d", i);
1248 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1250 ofs += arg->space_before;
1251 ofs = round_up2(ofs, arg->alignment);
1252 set_entity_offset(arg->stack_ent, ofs);
1253 ofs += arg->space_after;
1254 ofs += get_type_size_bytes(param_type);
1255 map[i] = arg->stack_ent;
1258 set_type_size_bytes(res, ofs);
1259 set_type_state(res, layout_fixed);
1264 const arch_register_t *reg;
1268 static int cmp_regs(const void *a, const void *b)
1270 const reg_node_map_t *p = a;
1271 const reg_node_map_t *q = b;
1273 if (p->reg->reg_class == q->reg->reg_class)
1274 return p->reg->index - q->reg->index;
1276 return p->reg->reg_class - q->reg->reg_class;
1279 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1282 int n = pmap_count(reg_map);
1285 foreach_pmap(reg_map, ent) {
1286 res[i].reg = ent->key;
1287 res[i].irn = ent->value;
1291 qsort(res, n, sizeof(res[0]), cmp_regs);
1295 * Creates a barrier.
1297 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1300 int n_regs = pmap_count(regs);
1306 in = ALLOCAN(ir_node*, n_regs+1);
1307 rm = ALLOCAN(reg_node_map_t, n_regs);
1308 reg_map_to_arr(rm, regs);
1309 for (n = 0; n < n_regs; ++n) {
1317 irn = be_new_Barrier(bl, n, in);
1319 for (n = 0; n < n_regs; ++n) {
1320 ir_node *pred = rm[n].irn;
1321 const arch_register_t *reg = rm[n].reg;
1322 arch_register_type_t add_type = 0;
1324 const backend_info_t *info;
1326 /* stupid workaround for now... as not all nodes report register
1328 info = be_get_info(skip_Proj(pred));
1329 if (info != NULL && info->out_infos != NULL) {
1330 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1331 if (ireq->type & arch_register_req_type_ignore)
1332 add_type |= arch_register_req_type_ignore;
1333 if (ireq->type & arch_register_req_type_produces_sp)
1334 add_type |= arch_register_req_type_produces_sp;
1337 proj = new_r_Proj(irn, get_irn_mode(pred), n);
1338 be_node_set_reg_class_in(irn, n, reg->reg_class);
1340 be_set_constr_single_reg_in(irn, n, reg, 0);
1341 be_set_constr_single_reg_out(irn, n, reg, add_type);
1342 arch_set_irn_register(proj, reg);
1344 pmap_insert(regs, (void *) reg, proj);
1348 *mem = new_r_Proj(irn, mode_M, n);
1355 * Creates a be_Return for a Return node.
1357 * @param @env the abi environment
1358 * @param irn the Return node or NULL if there was none
1359 * @param bl the block where the be_Retun should be placed
1360 * @param mem the current memory
1361 * @param n_res number of return results
1363 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1364 ir_node *mem, int n_res)
1366 be_abi_call_t *call = env->call;
1367 const arch_env_t *arch_env = env->arch_env;
1369 pmap *reg_map = pmap_create();
1370 ir_node *keep = pmap_get(env->keep_map, bl);
1377 const arch_register_t **regs;
1381 get the valid stack node in this block.
1382 If we had a call in that block there is a Keep constructed by process_calls()
1383 which points to the last stack modification in that block. we'll use
1384 it then. Else we use the stack from the start block and let
1385 the ssa construction fix the usage.
1387 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1389 stack = get_irn_n(keep, 0);
1391 remove_End_keepalive(get_irg_end(env->irg), keep);
1394 /* Insert results for Return into the register map. */
1395 for (i = 0; i < n_res; ++i) {
1396 ir_node *res = get_Return_res(irn, i);
1397 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1398 assert(arg->in_reg && "return value must be passed in register");
1399 pmap_insert(reg_map, (void *) arg->reg, res);
1402 /* Add uses of the callee save registers. */
1403 foreach_pmap(env->regs, ent) {
1404 const arch_register_t *reg = ent->key;
1405 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1406 pmap_insert(reg_map, ent->key, ent->value);
1409 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1411 /* Make the Epilogue node and call the arch's epilogue maker. */
1412 create_barrier(bl, &mem, reg_map, 1);
1413 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1416 Maximum size of the in array for Return nodes is
1417 return args + callee save/ignore registers + memory + stack pointer
1419 in_max = pmap_count(reg_map) + n_res + 2;
1421 in = ALLOCAN(ir_node*, in_max);
1422 regs = ALLOCAN(arch_register_t const*, in_max);
1425 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1427 regs[1] = arch_env->sp;
1430 /* clear SP entry, since it has already been grown. */
1431 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1432 for (i = 0; i < n_res; ++i) {
1433 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1435 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1436 regs[n++] = arg->reg;
1438 /* Clear the map entry to mark the register as processed. */
1439 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1442 /* grow the rest of the stuff. */
1443 foreach_pmap(reg_map, ent) {
1446 regs[n++] = ent->key;
1450 /* The in array for the new back end return is now ready. */
1452 dbgi = get_irn_dbg_info(irn);
1456 /* we have to pop the shadow parameter in in case of struct returns */
1458 ret = be_new_Return(dbgi, env->irg, bl, n_res, pop, n, in);
1460 /* Set the register classes of the return's parameter accordingly. */
1461 for (i = 0; i < n; ++i) {
1462 if (regs[i] == NULL)
1465 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1468 /* Free the space of the Epilog's in array and the register <-> proj map. */
1469 pmap_destroy(reg_map);
1474 typedef struct ent_pos_pair ent_pos_pair;
1475 struct ent_pos_pair {
1476 ir_entity *ent; /**< a value param entity */
1477 int pos; /**< its parameter number */
1478 ent_pos_pair *next; /**< for linking */
1481 typedef struct lower_frame_sels_env_t {
1482 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1483 ir_node *frame; /**< the current frame */
1484 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1485 const arch_register_class_t *link_class; /**< register class of the link pointer */
1486 ir_type *value_tp; /**< the value type if any */
1487 ir_type *frame_tp; /**< the frame type */
1488 int static_link_pos; /**< argument number of the hidden static link */
1489 } lower_frame_sels_env_t;
1492 * Return an entity from the backend for an value param entity.
1494 * @param ent an value param type entity
1495 * @param ctx context
1497 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1499 ir_entity *argument_ent = get_entity_link(ent);
1501 if (argument_ent == NULL) {
1502 /* we have NO argument entity yet: This is bad, as we will
1503 * need one for backing store.
1506 ir_type *frame_tp = ctx->frame_tp;
1507 unsigned offset = get_type_size_bytes(frame_tp);
1508 ir_type *tp = get_entity_type(ent);
1509 unsigned align = get_type_alignment_bytes(tp);
1511 offset += align - 1;
1512 offset &= ~(align - 1);
1514 argument_ent = copy_entity_own(ent, frame_tp);
1516 /* must be automatic to set a fixed layout */
1517 set_entity_offset(argument_ent, offset);
1518 offset += get_type_size_bytes(tp);
1520 set_type_size_bytes(frame_tp, offset);
1521 set_entity_link(ent, argument_ent);
1523 return argument_ent;
1526 * Walker: Replaces Sels of frame type and
1527 * value param type entities by FrameAddress.
1528 * Links all used entities.
1530 static void lower_frame_sels_walker(ir_node *irn, void *data)
1532 lower_frame_sels_env_t *ctx = data;
1535 ir_node *ptr = get_Sel_ptr(irn);
1537 if (ptr == ctx->frame) {
1538 ir_entity *ent = get_Sel_entity(irn);
1539 ir_node *bl = get_nodes_block(irn);
1542 int is_value_param = 0;
1544 if (get_entity_owner(ent) == ctx->value_tp) {
1547 /* replace by its copy from the argument type */
1548 pos = get_struct_member_index(ctx->value_tp, ent);
1549 ent = get_argument_entity(ent, ctx);
1552 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1555 /* check, if it's a param Sel and if have not seen this entity before */
1556 if (is_value_param && get_entity_link(ent) == NULL) {
1562 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1564 set_entity_link(ent, ctx->value_param_list);
1571 * Check if a value parameter is transmitted as a register.
1572 * This might happen if the address of an parameter is taken which is
1573 * transmitted in registers.
1575 * Note that on some architectures this case must be handled specially
1576 * because the place of the backing store is determined by their ABI.
1578 * In the default case we move the entity to the frame type and create
1579 * a backing store into the first block.
1581 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1583 be_abi_call_t *call = env->call;
1584 ir_graph *irg = env->irg;
1585 ent_pos_pair *entry, *new_list;
1587 int i, n = ARR_LEN(value_param_list);
1590 for (i = 0; i < n; ++i) {
1591 int pos = value_param_list[i].pos;
1592 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
1595 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1596 value_param_list[i].next = new_list;
1597 new_list = &value_param_list[i];
1600 if (new_list != NULL) {
1601 /* ok, change the graph */
1602 ir_node *start_bl = get_irg_start_block(irg);
1603 ir_node *first_bl = get_first_block_succ(start_bl);
1604 ir_node *frame, *imem, *nmem, *store, *mem, *args;
1605 optimization_state_t state;
1608 assert(first_bl && first_bl != start_bl);
1609 /* we had already removed critical edges, so the following
1610 assertion should be always true. */
1611 assert(get_Block_n_cfgpreds(first_bl) == 1);
1613 /* now create backing stores */
1614 frame = get_irg_frame(irg);
1615 imem = get_irg_initial_mem(irg);
1617 save_optimization_state(&state);
1619 nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
1620 restore_optimization_state(&state);
1622 /* reroute all edges to the new memory source */
1623 edges_reroute(imem, nmem, irg);
1627 args = get_irg_args(irg);
1628 for (entry = new_list; entry != NULL; entry = entry->next) {
1630 ir_type *tp = get_entity_type(entry->ent);
1631 ir_mode *mode = get_type_mode(tp);
1634 /* address for the backing store */
1635 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1638 mem = new_r_Proj(store, mode_M, pn_Store_M);
1640 /* the backing store itself */
1641 store = new_r_Store(first_bl, mem, addr,
1642 new_r_Proj(args, mode, i), 0);
1644 /* the new memory Proj gets the last Proj from store */
1645 set_Proj_pred(nmem, store);
1646 set_Proj_proj(nmem, pn_Store_M);
1648 /* move all entities to the frame type */
1649 frame_tp = get_irg_frame_type(irg);
1650 offset = get_type_size_bytes(frame_tp);
1652 /* we will add new entities: set the layout to undefined */
1653 assert(get_type_state(frame_tp) == layout_fixed);
1654 set_type_state(frame_tp, layout_undefined);
1655 for (entry = new_list; entry != NULL; entry = entry->next) {
1656 ir_entity *ent = entry->ent;
1658 /* If the entity is still on the argument type, move it to the
1660 * This happens if the value_param type was build due to compound
1662 if (get_entity_owner(ent) != frame_tp) {
1663 ir_type *tp = get_entity_type(ent);
1664 unsigned align = get_type_alignment_bytes(tp);
1666 offset += align - 1;
1667 offset &= ~(align - 1);
1668 set_entity_owner(ent, frame_tp);
1669 /* must be automatic to set a fixed layout */
1670 set_entity_offset(ent, offset);
1671 offset += get_type_size_bytes(tp);
1674 set_type_size_bytes(frame_tp, offset);
1675 /* fix the layout again */
1676 set_type_state(frame_tp, layout_fixed);
1681 * The start block has no jump, instead it has an initial exec Proj.
1682 * The backend wants to handle all blocks the same way, so we replace
1683 * the out cfg edge with a real jump.
1685 static void fix_start_block(ir_graph *irg)
1687 ir_node *initial_X = get_irg_initial_exec(irg);
1688 ir_node *start_block = get_irg_start_block(irg);
1689 const ir_edge_t *edge;
1691 assert(is_Proj(initial_X));
1693 foreach_out_edge(initial_X, edge) {
1694 ir_node *block = get_edge_src_irn(edge);
1696 if (is_Anchor(block))
1698 if (block != start_block) {
1699 ir_node *jmp = new_r_Jmp(start_block);
1700 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1701 set_irg_initial_exec(irg, jmp);
1705 panic("Initial exec has no follow block in %+F", irg);
1709 * Update the entity of Sels to the outer value parameters.
1711 static void update_outer_frame_sels(ir_node *irn, void *env)
1713 lower_frame_sels_env_t *ctx = env;
1720 ptr = get_Sel_ptr(irn);
1721 if (! is_arg_Proj(ptr))
1723 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1725 ent = get_Sel_entity(irn);
1727 if (get_entity_owner(ent) == ctx->value_tp) {
1728 /* replace by its copy from the argument type */
1729 pos = get_struct_member_index(ctx->value_tp, ent);
1730 ent = get_argument_entity(ent, ctx);
1731 set_Sel_entity(irn, ent);
1733 /* check, if we have not seen this entity before */
1734 if (get_entity_link(ent) == NULL) {
1740 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1742 set_entity_link(ent, ctx->value_param_list);
1748 * Fix access to outer local variables.
1750 static void fix_outer_variable_access(be_abi_irg_t *env,
1751 lower_frame_sels_env_t *ctx)
1757 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1758 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1760 if (! is_method_entity(ent))
1763 irg = get_entity_irg(ent);
1768 * FIXME: find the number of the static link parameter
1769 * for now we assume 0 here
1771 ctx->static_link_pos = 0;
1773 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1778 * Modify the irg itself and the frame type.
1780 static void modify_irg(be_abi_irg_t *env)
1782 be_abi_call_t *call = env->call;
1783 const arch_env_t *arch_env= env->arch_env;
1784 const arch_register_t *sp = arch_env->sp;
1785 ir_graph *irg = env->irg;
1788 ir_node *new_mem_proj;
1790 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1791 struct obstack *obst = be_get_be_obst(irg);
1796 unsigned frame_size;
1799 const arch_register_t *fp_reg;
1800 ir_node *frame_pointer;
1804 const ir_edge_t *edge;
1805 ir_type *arg_type, *bet_type, *tp;
1806 lower_frame_sels_env_t ctx;
1807 ir_entity **param_map;
1809 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1811 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1812 * memory, which leads to loops in the DAG. */
1813 old_mem = get_irg_initial_mem(irg);
1815 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1817 /* set the links of all frame entities to NULL, we use it
1818 to detect if an entity is already linked in the value_param_list */
1819 tp = get_method_value_param_type(method_type);
1822 /* clear the links of the clone type, let the
1823 original entities point to its clones */
1824 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1825 ir_entity *mem = get_struct_member(tp, i);
1826 set_entity_link(mem, NULL);
1830 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1832 /* Convert the Sel nodes in the irg to frame addr nodes: */
1833 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1834 ctx.frame = get_irg_frame(irg);
1835 ctx.sp_class = env->arch_env->sp->reg_class;
1836 ctx.link_class = env->arch_env->link_class;
1837 ctx.frame_tp = get_irg_frame_type(irg);
1839 /* layout the stackframe now */
1840 if (get_type_state(ctx.frame_tp) == layout_undefined) {
1841 default_layout_compound_type(ctx.frame_tp);
1844 /* we will possible add new entities to the frame: set the layout to undefined */
1845 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1846 set_type_state(ctx.frame_tp, layout_undefined);
1848 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1850 /* fix the frame type layout again */
1851 set_type_state(ctx.frame_tp, layout_fixed);
1852 /* align stackframe to 4 byte */
1853 frame_size = get_type_size_bytes(ctx.frame_tp);
1854 if (frame_size % 4 != 0) {
1855 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1858 env->regs = pmap_create();
1860 n_params = get_method_n_params(method_type);
1861 args = OALLOCNZ(obst, ir_node*, n_params);
1864 * for inner function we must now fix access to outer frame entities.
1866 fix_outer_variable_access(env, &ctx);
1868 /* Check if a value parameter is transmitted as a register.
1869 * This might happen if the address of an parameter is taken which is
1870 * transmitted in registers.
1872 * Note that on some architectures this case must be handled specially
1873 * because the place of the backing store is determined by their ABI.
1875 * In the default case we move the entity to the frame type and create
1876 * a backing store into the first block.
1878 fix_address_of_parameter_access(env, ctx.value_param_list);
1880 DEL_ARR_F(ctx.value_param_list);
1881 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1883 /* Fill the argument vector */
1884 arg_tuple = get_irg_args(irg);
1885 foreach_out_edge(arg_tuple, edge) {
1886 ir_node *irn = get_edge_src_irn(edge);
1887 if (! is_Anchor(irn)) {
1888 int nr = get_Proj_proj(irn);
1890 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1894 bet_type = call->cb->get_between_type(env->cb);
1895 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1897 /* Count the register params and add them to the number of Projs for the RegParams node */
1898 for (i = 0; i < n_params; ++i) {
1899 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1900 if (arg->in_reg && args[i]) {
1901 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1902 assert(i == get_Proj_proj(args[i]));
1904 /* For now, associate the register with the old Proj from Start representing that argument. */
1905 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1906 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1910 /* Collect all callee-save registers */
1911 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1912 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1913 for (j = 0; j < cls->n_regs; ++j) {
1914 const arch_register_t *reg = &cls->regs[j];
1915 if (arch_register_type_is(reg, callee_save) ||
1916 arch_register_type_is(reg, state)) {
1917 pmap_insert(env->regs, (void *) reg, NULL);
1922 /* handle start block here (place a jump in the block) */
1923 fix_start_block(irg);
1925 pmap_insert(env->regs, (void *) sp, NULL);
1926 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1927 start_bl = get_irg_start_block(irg);
1928 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1931 * make proj nodes for the callee save registers.
1932 * memorize them, since Return nodes get those as inputs.
1934 * Note, that if a register corresponds to an argument, the regs map contains
1935 * the old Proj from start for that argument.
1938 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1939 reg_map_to_arr(rm, env->regs);
1940 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1941 arch_register_t *reg = (void *) rm[i].reg;
1942 ir_mode *mode = reg->reg_class->mode;
1944 arch_register_req_type_t add_type = 0;
1948 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1951 proj = new_r_Proj(env->start, mode, nr + 1);
1952 pmap_insert(env->regs, (void *) reg, proj);
1953 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1954 arch_set_irn_register(proj, reg);
1956 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1959 /* create a new initial memory proj */
1960 assert(is_Proj(old_mem));
1961 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1962 new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1964 set_irg_initial_mem(irg, mem);
1966 /* Generate the Prologue */
1967 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
1969 /* do the stack allocation BEFORE the barrier, or spill code
1970 might be added before it */
1971 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1972 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1973 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1975 create_barrier(start_bl, &mem, env->regs, 0);
1977 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1978 arch_set_irn_register(env->init_sp, sp);
1980 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1981 set_irg_frame(irg, frame_pointer);
1982 pset_insert_ptr(env->ignore_regs, fp_reg);
1984 /* rewire old mem users to new mem */
1985 exchange(old_mem, mem);
1987 /* keep the mem (for functions with an endless loop = no return) */
1990 set_irg_initial_mem(irg, mem);
1992 /* Now, introduce stack param nodes for all parameters passed on the stack */
1993 for (i = 0; i < n_params; ++i) {
1994 ir_node *arg_proj = args[i];
1995 ir_node *repl = NULL;
1997 if (arg_proj != NULL) {
1998 be_abi_call_arg_t *arg;
1999 ir_type *param_type;
2000 int nr = get_Proj_proj(arg_proj);
2003 nr = MIN(nr, n_params);
2004 arg = get_call_arg(call, 0, nr, 1);
2005 param_type = get_method_param_type(method_type, nr);
2008 repl = pmap_get(env->regs, (void *) arg->reg);
2009 } else if (arg->on_stack) {
2010 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2012 /* For atomic parameters which are actually used, we create a Load node. */
2013 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2014 ir_mode *mode = get_type_mode(param_type);
2015 ir_mode *load_mode = arg->load_mode;
2017 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2018 repl = new_r_Proj(load, load_mode, pn_Load_res);
2020 if (mode != load_mode) {
2021 repl = new_r_Conv(start_bl, repl, mode);
2024 /* The stack parameter is not primitive (it is a struct or array),
2025 * we thus will create a node representing the parameter's address
2031 assert(repl != NULL);
2033 /* Beware: the mode of the register parameters is always the mode of the register class
2034 which may be wrong. Add Conv's then. */
2035 mode = get_irn_mode(args[i]);
2036 if (mode != get_irn_mode(repl)) {
2037 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2039 exchange(args[i], repl);
2043 /* the arg proj is not needed anymore now and should be only used by the anchor */
2044 assert(get_irn_n_edges(arg_tuple) == 1);
2045 kill_node(arg_tuple);
2046 set_irg_args(irg, new_r_Bad(irg));
2048 /* All Return nodes hang on the End node, so look for them there. */
2049 end = get_irg_end_block(irg);
2050 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2051 ir_node *irn = get_Block_cfgpred(end, i);
2053 if (is_Return(irn)) {
2054 ir_node *blk = get_nodes_block(irn);
2055 ir_node *mem = get_Return_mem(irn);
2056 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2061 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2062 the code is dead and will never be executed. */
2065 /** Fix the state inputs of calls that still hang on unknowns */
2066 static void fix_call_state_inputs(be_abi_irg_t *env)
2068 const arch_env_t *arch_env = env->arch_env;
2070 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2072 /* Collect caller save registers */
2073 n = arch_env_get_n_reg_class(arch_env);
2074 for (i = 0; i < n; ++i) {
2076 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2077 for (j = 0; j < cls->n_regs; ++j) {
2078 const arch_register_t *reg = arch_register_for_index(cls, j);
2079 if (arch_register_type_is(reg, state)) {
2080 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2085 n = ARR_LEN(env->calls);
2086 n_states = ARR_LEN(stateregs);
2087 for (i = 0; i < n; ++i) {
2089 ir_node *call = env->calls[i];
2091 arity = get_irn_arity(call);
2093 /* the state reg inputs are the last n inputs of the calls */
2094 for (s = 0; s < n_states; ++s) {
2095 int inp = arity - n_states + s;
2096 const arch_register_t *reg = stateregs[s];
2097 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2099 set_irn_n(call, inp, regnode);
2103 DEL_ARR_F(stateregs);
2107 * Create a trampoline entity for the given method.
2109 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2111 ir_type *type = get_entity_type(method);
2112 ident *old_id = get_entity_ld_ident(method);
2113 ident *id = id_mangle3("", old_id, "$stub");
2114 ir_type *parent = be->pic_trampolines_type;
2115 ir_entity *ent = new_entity(parent, old_id, type);
2116 set_entity_ld_ident(ent, id);
2117 set_entity_visibility(ent, ir_visibility_private);
2123 * Returns the trampoline entity for the given method.
2125 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2127 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2128 if (result == NULL) {
2129 result = create_trampoline(env, method);
2130 pmap_insert(env->ent_trampoline_map, method, result);
2136 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2138 ident *old_id = get_entity_ld_ident(entity);
2139 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
2140 ir_type *e_type = get_entity_type(entity);
2141 ir_type *type = new_type_pointer(e_type);
2142 ir_type *parent = be->pic_symbols_type;
2143 ir_entity *ent = new_entity(parent, old_id, type);
2144 set_entity_ld_ident(ent, id);
2145 set_entity_visibility(ent, ir_visibility_private);
2150 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2152 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2153 if (result == NULL) {
2154 result = create_pic_symbol(env, entity);
2155 pmap_insert(env->ent_pic_symbol_map, entity, result);
2164 * Returns non-zero if a given entity can be accessed using a relative address.
2166 static int can_address_relative(ir_entity *entity)
2168 return get_entity_visibility(entity) != ir_visibility_external
2169 && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
2172 /** patches SymConsts to work in position independent code */
2173 static void fix_pic_symconsts(ir_node *node, void *data)
2182 be_abi_irg_t *env = data;
2184 be_main_env_t *be = be_birg_from_irg(env->irg)->main_env;
2186 arity = get_irn_arity(node);
2187 for (i = 0; i < arity; ++i) {
2189 ir_node *pred = get_irn_n(node, i);
2191 ir_entity *pic_symbol;
2192 ir_node *pic_symconst;
2194 if (!is_SymConst(pred))
2197 entity = get_SymConst_entity(pred);
2198 block = get_nodes_block(pred);
2199 irg = get_irn_irg(pred);
2201 /* calls can jump to relative addresses, so we can directly jump to
2202 the (relatively) known call address or the trampoline */
2203 if (i == 1 && is_Call(node)) {
2204 ir_entity *trampoline;
2205 ir_node *trampoline_const;
2207 if (can_address_relative(entity))
2210 dbgi = get_irn_dbg_info(pred);
2211 trampoline = get_trampoline(be, entity);
2212 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2214 set_irn_n(node, i, trampoline_const);
2218 /* everything else is accessed relative to EIP */
2219 mode = get_irn_mode(pred);
2220 pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(env->irg));
2222 /* all ok now for locally constructed stuff */
2223 if (can_address_relative(entity)) {
2224 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2226 /* make sure the walker doesn't visit this add again */
2227 mark_irn_visited(add);
2228 set_irn_n(node, i, add);
2232 /* get entry from pic symbol segment */
2233 dbgi = get_irn_dbg_info(pred);
2234 pic_symbol = get_pic_symbol(be, entity);
2235 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2237 add = new_r_Add(block, pic_base, pic_symconst, mode);
2238 mark_irn_visited(add);
2240 /* we need an extra indirection for global data outside our current
2241 module. The loads are always safe and can therefore float
2242 and need no memory input */
2243 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2244 load_res = new_r_Proj(load, mode, pn_Load_res);
2246 set_irn_n(node, i, load_res);
2250 be_abi_irg_t *be_abi_introduce(ir_graph *irg)
2252 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2253 ir_node *old_frame = get_irg_frame(irg);
2254 struct obstack *obst = be_get_be_obst(irg);
2255 be_options_t *options = be_get_irg_options(irg);
2256 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2260 unsigned *limited_bitset;
2261 arch_register_req_t *sp_req;
2263 be_omit_fp = options->omit_fp;
2264 be_omit_leaf_fp = options->omit_leaf_fp;
2268 env->arch_env = arch_env;
2269 env->method_type = get_entity_type(get_irg_entity(irg));
2270 env->call = be_abi_call_new(arch_env->sp->reg_class);
2271 arch_env_get_call_abi(arch_env, env->method_type, env->call);
2273 env->ignore_regs = pset_new_ptr_default();
2274 env->keep_map = pmap_create();
2275 env->dce_survivor = new_survive_dce();
2278 sp_req = OALLOCZ(obst, arch_register_req_t);
2279 env->sp_req = sp_req;
2281 sp_req->type = arch_register_req_type_limited
2282 | arch_register_req_type_produces_sp;
2283 sp_req->cls = arch_register_get_class(arch_env->sp);
2285 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2286 rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
2287 sp_req->limited = limited_bitset;
2288 if (arch_env->sp->type & arch_register_type_ignore) {
2289 sp_req->type |= arch_register_req_type_ignore;
2292 env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
2294 env->calls = NEW_ARR_F(ir_node*, 0);
2297 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2300 /* Lower all call nodes in the IRG. */
2304 Beware: init backend abi call object after processing calls,
2305 otherwise some information might be not yet available.
2307 env->cb = env->call->cb->init(env->call, arch_env, irg);
2309 /* Process the IRG */
2312 /* fix call inputs for state registers */
2313 fix_call_state_inputs(env);
2315 /* We don't need the keep map anymore. */
2316 pmap_destroy(env->keep_map);
2317 env->keep_map = NULL;
2319 /* calls array is not needed anymore */
2320 DEL_ARR_F(env->calls);
2323 /* reroute the stack origin of the calls to the true stack origin. */
2324 exchange(dummy, env->init_sp);
2325 exchange(old_frame, get_irg_frame(irg));
2327 /* Make some important node pointers survive the dead node elimination. */
2328 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2329 foreach_pmap(env->regs, ent) {
2330 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2333 env->call->cb->done(env->cb);
2338 void be_abi_free(be_abi_irg_t *env)
2340 be_abi_call_free(env->call);
2341 free_survive_dce(env->dce_survivor);
2342 del_pset(env->ignore_regs);
2343 pmap_destroy(env->regs);
2347 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2349 arch_register_t *reg;
2351 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2352 if (reg->reg_class == cls)
2353 bitset_set(bs, reg->index);
2356 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2359 arch_register_t *reg;
2361 for (i = 0; i < cls->n_regs; ++i) {
2362 if (arch_register_type_is(&cls->regs[i], ignore))
2365 rbitset_set(raw_bitset, i);
2368 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2369 reg = pset_next(abi->ignore_regs)) {
2370 if (reg->reg_class != cls)
2373 rbitset_clear(raw_bitset, reg->index);
2377 /* Returns the stack layout from a abi environment. */
2378 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2386 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2387 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2388 | _| | |> < ___) | || (_| | (__| <
2389 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2393 typedef ir_node **node_array;
2395 typedef struct fix_stack_walker_env_t {
2396 node_array sp_nodes;
2397 } fix_stack_walker_env_t;
2400 * Walker. Collect all stack modifying nodes.
2402 static void collect_stack_nodes_walker(ir_node *node, void *data)
2404 ir_node *insn = node;
2405 fix_stack_walker_env_t *env = data;
2406 const arch_register_req_t *req;
2408 if (is_Proj(node)) {
2409 insn = get_Proj_pred(node);
2412 if (arch_irn_get_n_outs(insn) == 0)
2414 if (get_irn_mode(node) == mode_T)
2417 req = arch_get_register_req_out(node);
2418 if (! (req->type & arch_register_req_type_produces_sp))
2421 ARR_APP1(ir_node*, env->sp_nodes, node);
2424 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2426 be_ssa_construction_env_t senv;
2429 ir_graph *irg = env->irg;
2430 be_lv_t *lv = be_get_irg_liveness(irg);
2431 fix_stack_walker_env_t walker_env;
2433 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2435 irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
2437 /* nothing to be done if we didn't find any node, in fact we mustn't
2438 * continue, as for endless loops incsp might have had no users and is bad
2441 len = ARR_LEN(walker_env.sp_nodes);
2443 DEL_ARR_F(walker_env.sp_nodes);
2447 be_ssa_construction_init(&senv, irg);
2448 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2449 ARR_LEN(walker_env.sp_nodes));
2450 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2451 ARR_LEN(walker_env.sp_nodes));
2454 len = ARR_LEN(walker_env.sp_nodes);
2455 for (i = 0; i < len; ++i) {
2456 be_liveness_update(lv, walker_env.sp_nodes[i]);
2458 be_ssa_construction_update_liveness_phis(&senv, lv);
2461 phis = be_ssa_construction_get_new_phis(&senv);
2463 /* set register requirements for stack phis */
2464 len = ARR_LEN(phis);
2465 for (i = 0; i < len; ++i) {
2466 ir_node *phi = phis[i];
2467 be_set_phi_reg_req(phi, env->sp_req);
2468 arch_set_irn_register(phi, env->arch_env->sp);
2470 be_ssa_construction_destroy(&senv);
2472 DEL_ARR_F(walker_env.sp_nodes);
2476 * Fix all stack accessing operations in the block bl.
2478 * @param env the abi environment
2479 * @param bl the block to process
2480 * @param real_bias the bias value
2482 * @return the bias at the end of this block
2484 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2486 int omit_fp = env->call->flags.bits.try_omit_fp;
2488 int wanted_bias = real_bias;
2490 sched_foreach(bl, irn) {
2494 Check, if the node relates to an entity on the stack frame.
2495 If so, set the true offset (including the bias) for that
2498 ir_entity *ent = arch_get_frame_entity(irn);
2500 int bias = omit_fp ? real_bias : 0;
2501 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2502 arch_set_frame_offset(irn, offset);
2503 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2504 ent, offset, bias));
2508 * If the node modifies the stack pointer by a constant offset,
2509 * record that in the bias.
2511 ofs = arch_get_sp_bias(irn);
2513 if (be_is_IncSP(irn)) {
2514 /* fill in real stack frame size */
2515 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2516 ir_type *frame_type = get_irg_frame_type(env->irg);
2517 ofs = (int) get_type_size_bytes(frame_type);
2518 be_set_IncSP_offset(irn, ofs);
2519 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2520 ir_type *frame_type = get_irg_frame_type(env->irg);
2521 ofs = - (int)get_type_size_bytes(frame_type);
2522 be_set_IncSP_offset(irn, ofs);
2524 if (be_get_IncSP_align(irn)) {
2525 /* patch IncSP to produce an aligned stack pointer */
2526 ir_type *between_type = env->frame.between_type;
2527 int between_size = get_type_size_bytes(between_type);
2528 int alignment = 1 << env->arch_env->stack_alignment;
2529 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2532 be_set_IncSP_offset(irn, ofs + alignment - delta);
2533 real_bias += alignment - delta;
2536 /* adjust so real_bias corresponds with wanted_bias */
2537 int delta = wanted_bias - real_bias;
2540 be_set_IncSP_offset(irn, ofs + delta);
2551 assert(real_bias == wanted_bias);
2556 * A helper struct for the bias walker.
2559 be_abi_irg_t *env; /**< The ABI irg environment. */
2560 int start_block_bias; /**< The bias at the end of the start block. */
2562 ir_node *start_block; /**< The start block of the current graph. */
2566 * Block-Walker: fix all stack offsets for all blocks
2567 * except the start block
2569 static void stack_bias_walker(ir_node *bl, void *data)
2571 struct bias_walk *bw = data;
2572 if (bl != bw->start_block) {
2573 process_stack_bias(bw->env, bl, bw->start_block_bias);
2578 * Walker: finally lower all Sels of outer frame or parameter
2581 static void lower_outer_frame_sels(ir_node *sel, void *ctx)
2583 be_abi_irg_t *env = ctx;
2591 ent = get_Sel_entity(sel);
2592 owner = get_entity_owner(ent);
2593 ptr = get_Sel_ptr(sel);
2595 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2596 /* found access to outer frame or arguments */
2597 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2600 ir_node *bl = get_nodes_block(sel);
2601 dbg_info *dbgi = get_irn_dbg_info(sel);
2602 ir_mode *mode = get_irn_mode(sel);
2603 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2604 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2606 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2612 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2614 ir_graph *irg = env->irg;
2617 struct bias_walk bw;
2619 stack_frame_compute_initial_offset(&env->frame);
2620 // stack_layout_dump(stdout, frame);
2622 /* Determine the stack bias at the end of the start block. */
2623 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2624 bw.between_size = get_type_size_bytes(env->frame.between_type);
2626 /* fix the bias is all other blocks */
2628 bw.start_block = get_irg_start_block(irg);
2629 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2631 /* fix now inner functions: these still have Sel node to outer
2632 frame and parameter entities */
2633 frame_tp = get_irg_frame_type(irg);
2634 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2635 ir_entity *ent = get_class_member(frame_tp, i);
2636 ir_graph *irg = get_entity_irg(ent);
2639 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2644 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2646 assert(arch_register_type_is(reg, callee_save));
2647 assert(pmap_contains(abi->regs, (void *) reg));
2648 return pmap_get(abi->regs, (void *) reg);
2651 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2653 assert(arch_register_type_is(reg, ignore));
2654 assert(pmap_contains(abi->regs, (void *) reg));
2655 return pmap_get(abi->regs, (void *) reg);
2659 * Returns non-zero if the ABI has omitted the frame pointer in
2660 * the current graph.
2662 int be_abi_omit_fp(const be_abi_irg_t *abi)
2664 return abi->call->flags.bits.try_omit_fp;
2667 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
2668 void be_init_abi(void)
2670 FIRM_DBG_REGISTER(dbg, "firm.be.abi");