2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
32 #include "irgraph_t.h"
35 #include "iredges_t.h"
38 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 typedef struct _be_abi_call_arg_t {
62 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
64 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
65 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct _be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
80 ir_type *between_type;
82 const arch_register_class_t *cls_addr; /**< register class of the call address */
86 * The ABI information for the current graph.
88 struct _be_abi_irg_t {
89 survive_dce_t *dce_survivor;
91 be_abi_call_t *call; /**< The ABI call information. */
93 ir_node *init_sp; /**< The node representing the stack pointer
94 at the start of the function. */
96 ir_node *start; /**< The be_Start params node. */
97 pmap *regs; /**< A map of all callee-save and ignore regs to
98 their Projs to the RegParams node. */
100 int start_block_bias; /**< The stack bias at the end of the start block. */
102 void *cb; /**< ABI Callback self pointer. */
104 pmap *keep_map; /**< mapping blocks to keep nodes. */
105 pset *ignore_regs; /**< Additional registers which shall be ignored. */
107 ir_node **calls; /**< flexible array containing all be_Call nodes */
109 arch_register_req_t *sp_req;
112 static heights_t *ir_heights;
114 /** Flag: if set, try to omit the frame pointer in all routines. */
115 static int be_omit_fp = 1;
117 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
118 static int be_omit_leaf_fp = 1;
121 _ ____ ___ ____ _ _ _ _
122 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
123 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
124 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
125 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
127 These callbacks are used by the backend to set the parameters
128 for a specific call type.
132 * Set compare function: compares two ABI call object arguments.
134 static int cmp_call_arg(const void *a, const void *b, size_t n)
136 const be_abi_call_arg_t *p = a, *q = b;
138 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
142 * Get an ABI call object argument.
144 * @param call the abi call
145 * @param is_res true for call results, false for call arguments
146 * @param pos position of the argument
147 * @param callee context type - if we are callee or caller
149 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
151 be_abi_call_arg_t arg;
154 memset(&arg, 0, sizeof(arg));
159 hash = is_res * 128 + pos;
161 return set_find(call->params, &arg, sizeof(arg), hash);
165 * Set an ABI call object argument.
167 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
169 unsigned hash = arg->is_res * 128 + arg->pos;
170 if (context & ABI_CONTEXT_CALLEE) {
172 set_insert(call->params, arg, sizeof(*arg), hash);
174 if (context & ABI_CONTEXT_CALLER) {
176 set_insert(call->params, arg, sizeof(*arg), hash);
180 /* Set the flags for a call. */
181 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
187 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
188 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
194 /* Set register class for call address */
195 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
197 call->cls_addr = cls;
201 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
202 ir_mode *load_mode, unsigned alignment,
203 unsigned space_before, unsigned space_after,
204 be_abi_context_t context)
206 be_abi_call_arg_t arg;
207 memset(&arg, 0, sizeof(arg));
208 assert(alignment > 0 && "Alignment must be greater than 0");
210 arg.load_mode = load_mode;
211 arg.alignment = alignment;
212 arg.space_before = space_before;
213 arg.space_after = space_after;
217 remember_call_arg(&arg, call, context);
220 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
222 be_abi_call_arg_t arg;
223 memset(&arg, 0, sizeof(arg));
230 remember_call_arg(&arg, call, context);
233 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
235 be_abi_call_arg_t arg;
236 memset(&arg, 0, sizeof(arg));
243 remember_call_arg(&arg, call, context);
246 /* Get the flags of a ABI call object. */
247 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
253 * Constructor for a new ABI call object.
255 * @param cls_addr register class of the call address
257 * @return the new ABI call object
259 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
261 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
264 call->params = new_set(cmp_call_arg, 16);
266 call->cls_addr = cls_addr;
268 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
274 * Destructor for an ABI call object.
276 static void be_abi_call_free(be_abi_call_t *call)
278 del_set(call->params);
284 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
285 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
286 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
287 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
290 Handling of the stack frame. It is composed of three types:
291 1) The type of the arguments which are pushed on the stack.
292 2) The "between type" which consists of stuff the call of the
293 function pushes on the stack (like the return address and
294 the old base pointer for ia32).
295 3) The Firm frame type which consists of all local variables
299 int be_get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
302 ir_type *t = get_entity_owner(ent);
303 int ofs = get_entity_offset(ent);
307 /* Find the type the entity is contained in. */
308 for (index = 0; index < N_FRAME_TYPES; ++index) {
309 if (frame->order[index] == t)
311 /* Add the size of all the types below the one of the entity to the entity's offset */
312 ofs += get_type_size_bytes(frame->order[index]);
315 /* correct the offset by the initial position of the frame pointer */
316 ofs -= frame->initial_offset;
318 /* correct the offset with the current bias. */
325 * Retrieve the entity with given offset from a frame type.
327 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
331 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
332 ir_entity *ent = get_compound_member(t, i);
333 if (get_entity_offset(ent) == offset)
340 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
342 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
343 ir_entity *ent = search_ent_with_offset(base, 0);
346 frame->initial_offset
347 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
349 frame->initial_offset = be_get_stack_entity_offset(frame, ent, 0);
352 return frame->initial_offset;
356 * Initializes the frame layout from parts
358 * @param frame the stack layout that will be initialized
359 * @param args the stack argument layout type
360 * @param between the between layout type
361 * @param locals the method frame type
362 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
363 * @param param_map an array mapping method argument positions to the stack argument type
365 * @return the initialized stack layout
367 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
368 ir_type *between, ir_type *locals, int stack_dir,
369 ir_entity *param_map[])
371 frame->arg_type = args;
372 frame->between_type = between;
373 frame->frame_type = locals;
374 frame->initial_offset = 0;
375 frame->initial_bias = 0;
376 frame->stack_dir = stack_dir;
377 frame->order[1] = between;
378 frame->param_map = param_map;
381 frame->order[0] = args;
382 frame->order[2] = locals;
384 /* typical decreasing stack: locals have the
385 * lowest addresses, arguments the highest */
386 frame->order[0] = locals;
387 frame->order[2] = args;
399 Adjustment of the calls inside a graph.
404 * Transform a call node into a be_Call node.
406 * @param env The ABI environment for the current irg.
407 * @param irn The call node.
408 * @param curr_sp The stack pointer node to use.
409 * @return The stack pointer after the call.
411 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
413 ir_graph *irg = get_irn_irg(irn);
414 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
415 ir_type *call_tp = get_Call_type(irn);
416 ir_node *call_ptr = get_Call_ptr(irn);
417 int n_params = get_method_n_params(call_tp);
418 ir_node *curr_mem = get_Call_mem(irn);
419 ir_node *bl = get_nodes_block(irn);
421 int stack_dir = arch_env->stack_dir;
422 const arch_register_t *sp = arch_env->sp;
423 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
424 ir_mode *mach_mode = sp->reg_class->mode;
425 int no_alloc = call->flags.bits.frame_is_setup_on_call;
426 int n_res = get_method_n_ress(call_tp);
427 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
429 ir_node *res_proj = NULL;
430 int n_reg_params = 0;
431 int n_stack_params = 0;
434 const arch_register_t **states = NEW_ARR_F(const arch_register_t*, 0);
435 const arch_register_t **destroyed_regs = NEW_ARR_F(const arch_register_t*, 0);
439 int n_reg_results = 0;
440 const ir_edge_t *edge;
442 int *stack_param_idx;
443 int i, n, destroy_all_regs;
446 /* Let the isa fill out the abi description for that call node. */
447 arch_env_get_call_abi(arch_env, call_tp, call);
449 /* Insert code to put the stack arguments on the stack. */
450 assert(get_Call_n_params(irn) == n_params);
451 stack_param_idx = ALLOCAN(int, n_params);
452 for (i = 0; i < n_params; ++i) {
453 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
456 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
458 stack_size += round_up2(arg->space_before, arg->alignment);
459 stack_size += round_up2(arg_size, arg->alignment);
460 stack_size += round_up2(arg->space_after, arg->alignment);
462 stack_param_idx[n_stack_params++] = i;
466 /* Collect all arguments which are passed in registers. */
467 reg_param_idxs = ALLOCAN(int, n_params);
468 for (i = 0; i < n_params; ++i) {
469 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
470 if (arg && arg->in_reg) {
471 reg_param_idxs[n_reg_params++] = i;
476 * If the stack is decreasing and we do not want to store sequentially,
477 * or someone else allocated the call frame
478 * we allocate as much space on the stack all parameters need, by
479 * moving the stack pointer along the stack's direction.
481 * Note: we also have to do this for stack_size == 0, because we may have
482 * to adjust stack alignment for the call.
484 if (stack_dir < 0 && !do_seq && !no_alloc) {
485 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
488 dbgi = get_irn_dbg_info(irn);
489 /* If there are some parameters which shall be passed on the stack. */
490 if (n_stack_params > 0) {
492 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
496 * Reverse list of stack parameters if call arguments are from left to right.
497 * We must them reverse again if they are pushed (not stored) and the stack
498 * direction is downwards.
500 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
501 for (i = 0; i < n_stack_params >> 1; ++i) {
502 int other = n_stack_params - i - 1;
503 int tmp = stack_param_idx[i];
504 stack_param_idx[i] = stack_param_idx[other];
505 stack_param_idx[other] = tmp;
509 curr_mem = get_Call_mem(irn);
511 in[n_in++] = curr_mem;
514 for (i = 0; i < n_stack_params; ++i) {
515 int p = stack_param_idx[i];
516 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
517 ir_node *param = get_Call_param(irn, p);
518 ir_node *addr = curr_sp;
520 ir_type *param_type = get_method_param_type(call_tp, p);
521 int param_size = get_type_size_bytes(param_type) + arg->space_after;
524 * If we wanted to build the arguments sequentially,
525 * the stack pointer for the next must be incremented,
526 * and the memory value propagated.
530 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
531 param_size + arg->space_before, 0);
532 add_irn_dep(curr_sp, curr_mem);
534 curr_ofs += arg->space_before;
535 curr_ofs = round_up2(curr_ofs, arg->alignment);
537 /* Make the expression to compute the argument's offset. */
539 ir_mode *constmode = mach_mode;
540 if (mode_is_reference(mach_mode)) {
543 addr = new_r_Const_long(irg, constmode, curr_ofs);
544 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
548 /* Insert a store for primitive arguments. */
549 if (is_atomic_type(param_type)) {
551 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
552 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
553 mem = new_r_Proj(store, mode_M, pn_Store_M);
555 /* Make a mem copy for compound arguments. */
558 assert(mode_is_reference(get_irn_mode(param)));
559 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
560 mem = new_r_Proj(copy, mode_M, pn_CopyB_M);
563 curr_ofs += param_size;
571 /* We need the sync only, if we didn't build the stores sequentially. */
573 if (n_stack_params >= 1) {
574 curr_mem = new_r_Sync(bl, n_in, in);
576 curr_mem = get_Call_mem(irn);
581 /* check for the return_twice property */
582 destroy_all_regs = 0;
583 if (is_SymConst_addr_ent(call_ptr)) {
584 ir_entity *ent = get_SymConst_entity(call_ptr);
586 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
587 destroy_all_regs = 1;
589 ir_type *call_tp = get_Call_type(irn);
591 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
592 destroy_all_regs = 1;
595 /* Put caller save into the destroyed set and state registers in the states
597 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
599 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
600 for (j = 0; j < cls->n_regs; ++j) {
601 const arch_register_t *reg = arch_register_for_index(cls, j);
603 /* even if destroyed all is specified, neither SP nor FP are
604 * destroyed (else bad things will happen) */
605 if (reg == arch_env->sp || reg == arch_env->bp)
608 if (arch_register_type_is(reg, state)) {
609 ARR_APP1(const arch_register_t*, destroyed_regs, reg);
610 ARR_APP1(const arch_register_t*, states, reg);
611 /* we're already in the destroyed set so no need for further
615 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
616 if (! arch_register_type_is(reg, ignore)) {
617 ARR_APP1(const arch_register_t*, destroyed_regs, reg);
623 /* search the largest result proj number */
624 res_projs = ALLOCANZ(ir_node*, n_res);
626 foreach_out_edge(irn, edge) {
627 const ir_edge_t *res_edge;
628 ir_node *irn = get_edge_src_irn(edge);
630 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
633 foreach_out_edge(irn, res_edge) {
635 ir_node *res = get_edge_src_irn(res_edge);
637 assert(is_Proj(res));
639 proj = get_Proj_proj(res);
640 assert(proj < n_res);
641 assert(res_projs[proj] == NULL);
642 res_projs[proj] = res;
648 /** TODO: this is not correct for cases where return values are passed
649 * on the stack, but no known ABI does this currently...
651 n_reg_results = n_res;
654 in = ALLOCAN(ir_node*, n_reg_params + ARR_LEN(states));
656 /* make the back end call node and set its register requirements. */
657 for (i = 0; i < n_reg_params; ++i) {
658 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
661 /* add state registers ins */
662 for (i = 0; i < ARR_LEN(states); ++i) {
663 const arch_register_t *reg = states[i];
664 const arch_register_class_t *cls = arch_register_get_class(reg);
666 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
667 ir_fprintf(stderr, "Adding %+F\n", regnode);
669 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
670 in[n_ins++] = regnode;
672 assert(n_ins == (int) (n_reg_params + ARR_LEN(states)));
674 /* ins collected, build the call */
675 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
677 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
678 n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
679 n_ins, in, get_Call_type(irn));
680 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
683 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
684 n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
685 n_ins, in, get_Call_type(irn));
687 be_Call_set_pop(low_call, call->pop);
689 /* put the call into the list of all calls for later processing */
690 ARR_APP1(ir_node *, env->calls, low_call);
692 /* create new stack pointer */
693 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
694 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
695 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
696 arch_set_irn_register(curr_sp, sp);
698 /* now handle results */
699 for (i = 0; i < n_res; ++i) {
701 ir_node *proj = res_projs[i];
702 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
704 /* returns values on stack not supported yet */
708 shift the proj number to the right, since we will drop the
709 unspeakable Proj_T from the Call. Therefore, all real argument
710 Proj numbers must be increased by pn_be_Call_first_res
712 pn = i + pn_be_Call_first_res;
715 ir_type *res_type = get_method_res_type(call_tp, i);
716 ir_mode *mode = get_type_mode(res_type);
717 proj = new_r_Proj(low_call, mode, pn);
720 set_Proj_pred(proj, low_call);
721 set_Proj_proj(proj, pn);
725 /* remove register from destroyed regs */
727 int n = ARR_LEN(destroyed_regs);
728 for (j = 0; j < n; ++j) {
729 if (destroyed_regs[j] == arg->reg) {
730 destroyed_regs[j] = destroyed_regs[n-1];
731 ARR_SHRINKLEN(destroyed_regs,n-1);
739 Set the register class of the call address to
740 the backend provided class (default: stack pointer class)
742 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
744 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
746 /* Set the register classes and constraints of the Call parameters. */
747 for (i = 0; i < n_reg_params; ++i) {
748 int index = reg_param_idxs[i];
749 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
750 assert(arg->reg != NULL);
752 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
756 /* Set the register constraints of the results. */
757 for (i = 0; i < n_res; ++i) {
758 ir_node *proj = res_projs[i];
759 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
760 int pn = get_Proj_proj(proj);
763 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
764 arch_set_irn_register(proj, arg->reg);
766 exchange(irn, low_call);
768 /* kill the ProjT node */
769 if (res_proj != NULL) {
773 /* Make additional projs for the caller save registers
774 and the Keep node which keeps them alive. */
779 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
782 n_ins = ARR_LEN(destroyed_regs) + n_reg_results + 1;
783 in = ALLOCAN(ir_node *, n_ins);
785 /* also keep the stack pointer */
786 set_irn_link(curr_sp, (void*) sp);
789 for (i = 0; i < ARR_LEN(destroyed_regs); ++i) {
790 const arch_register_t *reg = destroyed_regs[i];
791 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
793 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
794 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
795 arch_set_irn_register(proj, reg);
797 set_irn_link(proj, (void*) reg);
802 for (i = 0; i < n_reg_results; ++i) {
803 ir_node *proj = res_projs[i];
804 const arch_register_t *reg = arch_get_irn_register(proj);
805 set_irn_link(proj, (void*) reg);
810 /* create the Keep for the caller save registers */
811 keep = be_new_Keep(bl, n, in);
812 for (i = 0; i < n; ++i) {
813 const arch_register_t *reg = get_irn_link(in[i]);
814 be_node_set_reg_class_in(keep, i, reg->reg_class);
818 /* Clean up the stack. */
819 assert(stack_size >= call->pop);
820 stack_size -= call->pop;
822 if (stack_size > 0) {
823 ir_node *mem_proj = NULL;
825 foreach_out_edge(low_call, edge) {
826 ir_node *irn = get_edge_src_irn(edge);
827 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
834 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
835 keep_alive(mem_proj);
838 /* Clean up the stack frame or revert alignment fixes if we allocated it */
840 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
843 be_abi_call_free(call);
846 DEL_ARR_F(destroyed_regs);
852 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
854 * @param alignment the minimum stack alignment
855 * @param size the node containing the non-aligned size
856 * @param block the block where new nodes are allocated on
857 * @param dbg debug info for new nodes
859 * @return a node representing the aligned size
861 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
862 ir_node *block, dbg_info *dbg)
864 if (stack_alignment > 1) {
870 assert(is_po2(stack_alignment));
872 mode = get_irn_mode(size);
873 tv = new_tarval_from_long(stack_alignment-1, mode);
874 irg = get_Block_irg(block);
875 mask = new_r_Const(irg, tv);
876 size = new_rd_Add(dbg, block, size, mask, mode);
878 tv = new_tarval_from_long(-(long)stack_alignment, mode);
879 mask = new_r_Const(irg, tv);
880 size = new_rd_And(dbg, block, size, mask, mode);
886 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
888 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
890 ir_node *block = get_nodes_block(alloc);
891 ir_graph *irg = get_Block_irg(block);
892 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
893 ir_node *alloc_mem = NULL;
894 ir_node *alloc_res = NULL;
895 ir_type *type = get_Alloc_type(alloc);
898 const ir_edge_t *edge;
903 unsigned stack_alignment;
905 /* all non-stack Alloc nodes should already be lowered before the backend */
906 assert(get_Alloc_where(alloc) == stack_alloc);
908 foreach_out_edge(alloc, edge) {
909 ir_node *irn = get_edge_src_irn(edge);
911 assert(is_Proj(irn));
912 switch (get_Proj_proj(irn)) {
924 /* Beware: currently Alloc nodes without a result might happen,
925 only escape analysis kills them and this phase runs only for object
926 oriented source. We kill the Alloc here. */
927 if (alloc_res == NULL && alloc_mem) {
928 exchange(alloc_mem, get_Alloc_mem(alloc));
932 dbg = get_irn_dbg_info(alloc);
933 count = get_Alloc_count(alloc);
935 /* we might need to multiply the count with the element size */
936 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
937 ir_mode *mode = get_irn_mode(count);
938 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
940 ir_node *cnst = new_rd_Const(dbg, irg, tv);
941 size = new_rd_Mul(dbg, block, count, cnst, mode);
946 /* The stack pointer will be modified in an unknown manner.
947 We cannot omit it. */
948 env->call->flags.bits.try_omit_fp = 0;
950 stack_alignment = 1 << arch_env->stack_alignment;
951 size = adjust_alloc_size(stack_alignment, size, block, dbg);
952 new_alloc = be_new_AddSP(arch_env->sp, block, curr_sp, size);
953 set_irn_dbg_info(new_alloc, dbg);
955 if (alloc_mem != NULL) {
959 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
961 /* We need to sync the output mem of the AddSP with the input mem
962 edge into the alloc node. */
963 ins[0] = get_Alloc_mem(alloc);
965 sync = new_r_Sync(block, 2, ins);
967 exchange(alloc_mem, sync);
970 exchange(alloc, new_alloc);
972 /* fix projnum of alloca res */
973 set_Proj_proj(alloc_res, pn_be_AddSP_res);
975 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
982 * The Free is transformed into a back end free node and connected to the stack nodes.
984 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
986 ir_node *block = get_nodes_block(free);
987 ir_graph *irg = get_irn_irg(free);
988 ir_type *type = get_Free_type(free);
989 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
990 ir_mode *sp_mode = arch_env->sp->reg_class->mode;
991 dbg_info *dbg = get_irn_dbg_info(free);
992 ir_node *subsp, *mem, *res, *size, *sync;
994 unsigned stack_alignment;
996 /* all non-stack-alloc Free nodes should already be lowered before the
998 assert(get_Free_where(free) == stack_alloc);
1000 /* we might need to multiply the size with the element size */
1001 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1002 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1003 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1004 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1008 size = get_Free_size(free);
1011 stack_alignment = 1 << arch_env->stack_alignment;
1012 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1014 /* The stack pointer will be modified in an unknown manner.
1015 We cannot omit it. */
1016 env->call->flags.bits.try_omit_fp = 0;
1017 subsp = be_new_SubSP(arch_env->sp, block, curr_sp, size);
1018 set_irn_dbg_info(subsp, dbg);
1020 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
1021 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
1023 /* we need to sync the memory */
1024 in[0] = get_Free_mem(free);
1026 sync = new_r_Sync(block, 2, in);
1028 /* and make the AddSP dependent on the former memory */
1029 add_irn_dep(subsp, get_Free_mem(free));
1032 exchange(free, sync);
1039 * Check if a node is somehow data dependent on another one.
1040 * both nodes must be in the same basic block.
1041 * @param n1 The first node.
1042 * @param n2 The second node.
1043 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1045 static int dependent_on(ir_node *n1, ir_node *n2)
1047 assert(get_nodes_block(n1) == get_nodes_block(n2));
1049 return heights_reachable_in_block(ir_heights, n1, n2);
1052 static int cmp_call_dependency(const void *c1, const void *c2)
1054 ir_node *n1 = *(ir_node **) c1;
1055 ir_node *n2 = *(ir_node **) c2;
1058 Classical qsort() comparison function behavior:
1059 0 if both elements are equal
1060 1 if second is "smaller" that first
1061 -1 if first is "smaller" that second
1063 if (dependent_on(n1, n2))
1066 if (dependent_on(n2, n1))
1069 /* The nodes have no depth order, but we need a total order because qsort()
1071 return get_irn_idx(n1) - get_irn_idx(n2);
1075 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1076 * Clears the irg_is_leaf flag if a Call is detected.
1078 static void link_ops_in_block_walker(ir_node *irn, void *data)
1080 be_abi_irg_t *env = data;
1081 ir_opcode code = get_irn_opcode(irn);
1083 if (code == iro_Call ||
1084 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1085 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1086 ir_node *bl = get_nodes_block(irn);
1087 void *save = get_irn_link(bl);
1089 if (code == iro_Call)
1090 env->call->flags.bits.irg_is_leaf = 0;
1092 set_irn_link(irn, save);
1093 set_irn_link(bl, irn);
1096 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1097 ir_node *param = get_Builtin_param(irn, 0);
1098 tarval *tv = get_Const_tarval(param);
1099 unsigned long value = get_tarval_long(tv);
1100 /* use ebp, so the climbframe algo works... */
1102 env->call->flags.bits.try_omit_fp = 0;
1109 * Process all Call/Alloc/Free nodes inside a basic block.
1110 * Note that the link field of the block must contain a linked list of all
1111 * Call nodes inside the Block. We first order this list according to data dependency
1112 * and that connect the calls together.
1114 static void process_ops_in_block(ir_node *bl, void *data)
1116 be_abi_irg_t *env = data;
1117 ir_node *curr_sp = env->init_sp;
1124 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1128 nodes = ALLOCAN(ir_node*, n_nodes);
1129 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1133 /* If there were call nodes in the block. */
1138 /* order the call nodes according to data dependency */
1139 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1141 for (i = n_nodes - 1; i >= 0; --i) {
1142 ir_node *irn = nodes[i];
1144 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1145 switch (get_irn_opcode(irn)) {
1148 /* The stack pointer will be modified due to a call. */
1149 env->call->flags.bits.try_omit_fp = 0;
1151 curr_sp = adjust_call(env, irn, curr_sp);
1154 if (get_Alloc_where(irn) == stack_alloc)
1155 curr_sp = adjust_alloc(env, irn, curr_sp);
1158 if (get_Free_where(irn) == stack_alloc)
1159 curr_sp = adjust_free(env, irn, curr_sp);
1162 panic("invalid call");
1166 /* Keep the last stack state in the block by tying it to Keep node,
1167 * the proj from calls is already kept */
1168 if (curr_sp != env->init_sp &&
1169 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1171 keep = be_new_Keep(bl, 1, nodes);
1172 pmap_insert(env->keep_map, bl, keep);
1176 set_irn_link(bl, curr_sp);
1180 * Adjust all call nodes in the graph to the ABI conventions.
1182 static void process_calls(ir_graph *irg)
1184 be_abi_irg_t *abi = be_get_irg_abi(irg);
1186 abi->call->flags.bits.irg_is_leaf = 1;
1187 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi);
1189 ir_heights = heights_new(irg);
1190 irg_block_walk_graph(irg, NULL, process_ops_in_block, abi);
1191 heights_free(ir_heights);
1195 * Computes the stack argument layout type.
1196 * Changes a possibly allocated value param type by moving
1197 * entities to the stack layout type.
1199 * @param env the ABI environment
1200 * @param call the current call ABI
1201 * @param method_type the method type
1202 * @param val_param_tp the value parameter type, will be destroyed
1203 * @param param_map an array mapping method arguments to the stack layout type
1205 * @return the stack argument layout type
1207 static ir_type *compute_arg_type(be_abi_irg_t *env, ir_graph *irg,
1208 be_abi_call_t *call,
1209 ir_type *method_type, ir_type *val_param_tp,
1210 ir_entity ***param_map)
1212 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1213 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1214 int inc = arch_env->stack_dir * dir;
1215 int n = get_method_n_params(method_type);
1216 int curr = inc > 0 ? 0 : n - 1;
1217 struct obstack *obst = be_get_be_obst(irg);
1223 ident *id = get_entity_ident(get_irg_entity(irg));
1226 *param_map = map = OALLOCN(obst, ir_entity*, n);
1227 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1228 for (i = 0; i < n; ++i, curr += inc) {
1229 ir_type *param_type = get_method_param_type(method_type, curr);
1230 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
1233 if (arg->on_stack) {
1234 if (val_param_tp != NULL) {
1235 /* the entity was already created, create a copy in the param type */
1236 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1237 arg->stack_ent = copy_entity_own(val_ent, res);
1238 set_entity_link(val_ent, arg->stack_ent);
1239 set_entity_link(arg->stack_ent, NULL);
1241 /* create a new entity */
1242 snprintf(buf, sizeof(buf), "param_%d", i);
1243 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1245 ofs += arg->space_before;
1246 ofs = round_up2(ofs, arg->alignment);
1247 set_entity_offset(arg->stack_ent, ofs);
1248 ofs += arg->space_after;
1249 ofs += get_type_size_bytes(param_type);
1250 map[i] = arg->stack_ent;
1253 set_type_size_bytes(res, ofs);
1254 set_type_state(res, layout_fixed);
1259 const arch_register_t *reg;
1263 static int cmp_regs(const void *a, const void *b)
1265 const reg_node_map_t *p = a;
1266 const reg_node_map_t *q = b;
1268 if (p->reg->reg_class == q->reg->reg_class)
1269 return p->reg->index - q->reg->index;
1271 return p->reg->reg_class - q->reg->reg_class;
1274 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1277 int n = pmap_count(reg_map);
1280 foreach_pmap(reg_map, ent) {
1281 res[i].reg = ent->key;
1282 res[i].irn = ent->value;
1286 qsort(res, n, sizeof(res[0]), cmp_regs);
1290 * Creates a barrier.
1292 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1295 int n_regs = pmap_count(regs);
1301 in = ALLOCAN(ir_node*, n_regs+1);
1302 rm = ALLOCAN(reg_node_map_t, n_regs);
1303 reg_map_to_arr(rm, regs);
1304 for (n = 0; n < n_regs; ++n) {
1312 irn = be_new_Barrier(bl, n, in);
1314 for (n = 0; n < n_regs; ++n) {
1315 ir_node *pred = rm[n].irn;
1316 const arch_register_t *reg = rm[n].reg;
1317 arch_register_type_t add_type = 0;
1319 const backend_info_t *info;
1321 /* stupid workaround for now... as not all nodes report register
1323 info = be_get_info(skip_Proj(pred));
1324 if (info != NULL && info->out_infos != NULL) {
1325 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1326 if (ireq->type & arch_register_req_type_ignore)
1327 add_type |= arch_register_req_type_ignore;
1328 if (ireq->type & arch_register_req_type_produces_sp)
1329 add_type |= arch_register_req_type_produces_sp;
1332 proj = new_r_Proj(irn, get_irn_mode(pred), n);
1333 be_node_set_reg_class_in(irn, n, reg->reg_class);
1335 be_set_constr_single_reg_in(irn, n, reg, 0);
1336 be_set_constr_single_reg_out(irn, n, reg, add_type);
1337 arch_set_irn_register(proj, reg);
1339 pmap_insert(regs, (void *) reg, proj);
1343 *mem = new_r_Proj(irn, mode_M, n);
1350 * Creates a be_Return for a Return node.
1352 * @param @env the abi environment
1353 * @param irn the Return node or NULL if there was none
1354 * @param bl the block where the be_Retun should be placed
1355 * @param mem the current memory
1356 * @param n_res number of return results
1358 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1359 ir_node *mem, int n_res)
1361 be_abi_call_t *call = env->call;
1362 ir_graph *irg = get_Block_irg(bl);
1363 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1365 pmap *reg_map = pmap_create();
1366 ir_node *keep = pmap_get(env->keep_map, bl);
1373 const arch_register_t **regs;
1377 get the valid stack node in this block.
1378 If we had a call in that block there is a Keep constructed by process_calls()
1379 which points to the last stack modification in that block. we'll use
1380 it then. Else we use the stack from the start block and let
1381 the ssa construction fix the usage.
1383 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1385 stack = get_irn_n(keep, 0);
1387 remove_End_keepalive(get_irg_end(irg), keep);
1390 /* Insert results for Return into the register map. */
1391 for (i = 0; i < n_res; ++i) {
1392 ir_node *res = get_Return_res(irn, i);
1393 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1394 assert(arg->in_reg && "return value must be passed in register");
1395 pmap_insert(reg_map, (void *) arg->reg, res);
1398 /* Add uses of the callee save registers. */
1399 foreach_pmap(env->regs, ent) {
1400 const arch_register_t *reg = ent->key;
1401 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1402 pmap_insert(reg_map, ent->key, ent->value);
1405 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1407 /* Make the Epilogue node and call the arch's epilogue maker. */
1408 create_barrier(bl, &mem, reg_map, 1);
1409 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1412 Maximum size of the in array for Return nodes is
1413 return args + callee save/ignore registers + memory + stack pointer
1415 in_max = pmap_count(reg_map) + n_res + 2;
1417 in = ALLOCAN(ir_node*, in_max);
1418 regs = ALLOCAN(arch_register_t const*, in_max);
1421 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1423 regs[1] = arch_env->sp;
1426 /* clear SP entry, since it has already been grown. */
1427 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1428 for (i = 0; i < n_res; ++i) {
1429 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1431 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1432 regs[n++] = arg->reg;
1434 /* Clear the map entry to mark the register as processed. */
1435 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1438 /* grow the rest of the stuff. */
1439 foreach_pmap(reg_map, ent) {
1442 regs[n++] = ent->key;
1446 /* The in array for the new back end return is now ready. */
1448 dbgi = get_irn_dbg_info(irn);
1452 /* we have to pop the shadow parameter in in case of struct returns */
1454 ret = be_new_Return(dbgi, irg, bl, n_res, pop, n, in);
1456 /* Set the register classes of the return's parameter accordingly. */
1457 for (i = 0; i < n; ++i) {
1458 if (regs[i] == NULL)
1461 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1464 /* Free the space of the Epilog's in array and the register <-> proj map. */
1465 pmap_destroy(reg_map);
1470 typedef struct ent_pos_pair ent_pos_pair;
1471 struct ent_pos_pair {
1472 ir_entity *ent; /**< a value param entity */
1473 int pos; /**< its parameter number */
1474 ent_pos_pair *next; /**< for linking */
1477 typedef struct lower_frame_sels_env_t {
1478 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1479 ir_node *frame; /**< the current frame */
1480 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1481 const arch_register_class_t *link_class; /**< register class of the link pointer */
1482 ir_type *value_tp; /**< the value type if any */
1483 ir_type *frame_tp; /**< the frame type */
1484 int static_link_pos; /**< argument number of the hidden static link */
1485 } lower_frame_sels_env_t;
1488 * Return an entity from the backend for an value param entity.
1490 * @param ent an value param type entity
1491 * @param ctx context
1493 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1495 ir_entity *argument_ent = get_entity_link(ent);
1497 if (argument_ent == NULL) {
1498 /* we have NO argument entity yet: This is bad, as we will
1499 * need one for backing store.
1502 ir_type *frame_tp = ctx->frame_tp;
1503 unsigned offset = get_type_size_bytes(frame_tp);
1504 ir_type *tp = get_entity_type(ent);
1505 unsigned align = get_type_alignment_bytes(tp);
1507 offset += align - 1;
1508 offset &= ~(align - 1);
1510 argument_ent = copy_entity_own(ent, frame_tp);
1512 /* must be automatic to set a fixed layout */
1513 set_entity_offset(argument_ent, offset);
1514 offset += get_type_size_bytes(tp);
1516 set_type_size_bytes(frame_tp, offset);
1517 set_entity_link(ent, argument_ent);
1519 return argument_ent;
1522 * Walker: Replaces Sels of frame type and
1523 * value param type entities by FrameAddress.
1524 * Links all used entities.
1526 static void lower_frame_sels_walker(ir_node *irn, void *data)
1528 lower_frame_sels_env_t *ctx = data;
1531 ir_node *ptr = get_Sel_ptr(irn);
1533 if (ptr == ctx->frame) {
1534 ir_entity *ent = get_Sel_entity(irn);
1535 ir_node *bl = get_nodes_block(irn);
1538 int is_value_param = 0;
1540 if (get_entity_owner(ent) == ctx->value_tp) {
1543 /* replace by its copy from the argument type */
1544 pos = get_struct_member_index(ctx->value_tp, ent);
1545 ent = get_argument_entity(ent, ctx);
1548 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1551 /* check, if it's a param Sel and if have not seen this entity before */
1552 if (is_value_param && get_entity_link(ent) == NULL) {
1558 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1560 set_entity_link(ent, ctx->value_param_list);
1567 * Check if a value parameter is transmitted as a register.
1568 * This might happen if the address of an parameter is taken which is
1569 * transmitted in registers.
1571 * Note that on some architectures this case must be handled specially
1572 * because the place of the backing store is determined by their ABI.
1574 * In the default case we move the entity to the frame type and create
1575 * a backing store into the first block.
1577 static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_graph *irg,
1578 ent_pos_pair *value_param_list)
1580 be_abi_call_t *call = env->call;
1581 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1582 ent_pos_pair *entry, *new_list;
1584 int i, n = ARR_LEN(value_param_list);
1587 for (i = 0; i < n; ++i) {
1588 int pos = value_param_list[i].pos;
1589 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
1592 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1593 value_param_list[i].next = new_list;
1594 new_list = &value_param_list[i];
1597 if (new_list != NULL) {
1598 /* ok, change the graph */
1599 ir_node *start_bl = get_irg_start_block(irg);
1600 ir_node *first_bl = get_first_block_succ(start_bl);
1601 ir_node *frame, *imem, *nmem, *store, *mem, *args;
1602 optimization_state_t state;
1605 assert(first_bl && first_bl != start_bl);
1606 /* we had already removed critical edges, so the following
1607 assertion should be always true. */
1608 assert(get_Block_n_cfgpreds(first_bl) == 1);
1610 /* now create backing stores */
1611 frame = get_irg_frame(irg);
1612 imem = get_irg_initial_mem(irg);
1614 save_optimization_state(&state);
1616 nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
1617 restore_optimization_state(&state);
1619 /* reroute all edges to the new memory source */
1620 edges_reroute(imem, nmem, irg);
1624 args = get_irg_args(irg);
1625 for (entry = new_list; entry != NULL; entry = entry->next) {
1627 ir_type *tp = get_entity_type(entry->ent);
1628 ir_mode *mode = get_type_mode(tp);
1631 /* address for the backing store */
1632 addr = be_new_FrameAddr(arch_env->sp->reg_class, first_bl, frame, entry->ent);
1635 mem = new_r_Proj(store, mode_M, pn_Store_M);
1637 /* the backing store itself */
1638 store = new_r_Store(first_bl, mem, addr,
1639 new_r_Proj(args, mode, i), 0);
1641 /* the new memory Proj gets the last Proj from store */
1642 set_Proj_pred(nmem, store);
1643 set_Proj_proj(nmem, pn_Store_M);
1645 /* move all entities to the frame type */
1646 frame_tp = get_irg_frame_type(irg);
1647 offset = get_type_size_bytes(frame_tp);
1649 /* we will add new entities: set the layout to undefined */
1650 assert(get_type_state(frame_tp) == layout_fixed);
1651 set_type_state(frame_tp, layout_undefined);
1652 for (entry = new_list; entry != NULL; entry = entry->next) {
1653 ir_entity *ent = entry->ent;
1655 /* If the entity is still on the argument type, move it to the
1657 * This happens if the value_param type was build due to compound
1659 if (get_entity_owner(ent) != frame_tp) {
1660 ir_type *tp = get_entity_type(ent);
1661 unsigned align = get_type_alignment_bytes(tp);
1663 offset += align - 1;
1664 offset &= ~(align - 1);
1665 set_entity_owner(ent, frame_tp);
1666 /* must be automatic to set a fixed layout */
1667 set_entity_offset(ent, offset);
1668 offset += get_type_size_bytes(tp);
1671 set_type_size_bytes(frame_tp, offset);
1672 /* fix the layout again */
1673 set_type_state(frame_tp, layout_fixed);
1678 * The start block has no jump, instead it has an initial exec Proj.
1679 * The backend wants to handle all blocks the same way, so we replace
1680 * the out cfg edge with a real jump.
1682 static void fix_start_block(ir_graph *irg)
1684 ir_node *initial_X = get_irg_initial_exec(irg);
1685 ir_node *start_block = get_irg_start_block(irg);
1686 const ir_edge_t *edge;
1688 assert(is_Proj(initial_X));
1690 foreach_out_edge(initial_X, edge) {
1691 ir_node *block = get_edge_src_irn(edge);
1693 if (is_Anchor(block))
1695 if (block != start_block) {
1696 ir_node *jmp = new_r_Jmp(start_block);
1697 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1698 set_irg_initial_exec(irg, jmp);
1702 panic("Initial exec has no follow block in %+F", irg);
1706 * Update the entity of Sels to the outer value parameters.
1708 static void update_outer_frame_sels(ir_node *irn, void *env)
1710 lower_frame_sels_env_t *ctx = env;
1717 ptr = get_Sel_ptr(irn);
1718 if (! is_arg_Proj(ptr))
1720 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1722 ent = get_Sel_entity(irn);
1724 if (get_entity_owner(ent) == ctx->value_tp) {
1725 /* replace by its copy from the argument type */
1726 pos = get_struct_member_index(ctx->value_tp, ent);
1727 ent = get_argument_entity(ent, ctx);
1728 set_Sel_entity(irn, ent);
1730 /* check, if we have not seen this entity before */
1731 if (get_entity_link(ent) == NULL) {
1737 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1739 set_entity_link(ent, ctx->value_param_list);
1745 * Fix access to outer local variables.
1747 static void fix_outer_variable_access(be_abi_irg_t *env,
1748 lower_frame_sels_env_t *ctx)
1754 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1755 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1757 if (! is_method_entity(ent))
1760 irg = get_entity_irg(ent);
1765 * FIXME: find the number of the static link parameter
1766 * for now we assume 0 here
1768 ctx->static_link_pos = 0;
1770 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1775 * Modify the irg itself and the frame type.
1777 static void modify_irg(ir_graph *irg)
1779 be_abi_irg_t *env = be_get_irg_abi(irg);
1780 be_abi_call_t *call = env->call;
1781 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1782 const arch_register_t *sp = arch_env->sp;
1783 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1784 struct obstack *obst = be_get_be_obst(irg);
1785 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1788 ir_node *new_mem_proj;
1794 unsigned frame_size;
1797 const arch_register_t *fp_reg;
1798 ir_node *frame_pointer;
1802 const ir_edge_t *edge;
1803 ir_type *arg_type, *bet_type, *tp;
1804 lower_frame_sels_env_t ctx;
1805 ir_entity **param_map;
1807 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1809 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1810 * memory, which leads to loops in the DAG. */
1811 old_mem = get_irg_initial_mem(irg);
1813 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1815 /* set the links of all frame entities to NULL, we use it
1816 to detect if an entity is already linked in the value_param_list */
1817 tp = get_method_value_param_type(method_type);
1820 /* clear the links of the clone type, let the
1821 original entities point to its clones */
1822 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1823 ir_entity *mem = get_struct_member(tp, i);
1824 set_entity_link(mem, NULL);
1828 arg_type = compute_arg_type(env, irg, call, method_type, tp, ¶m_map);
1830 /* Convert the Sel nodes in the irg to frame addr nodes: */
1831 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1832 ctx.frame = get_irg_frame(irg);
1833 ctx.sp_class = arch_env->sp->reg_class;
1834 ctx.link_class = arch_env->link_class;
1835 ctx.frame_tp = get_irg_frame_type(irg);
1837 /* layout the stackframe now */
1838 if (get_type_state(ctx.frame_tp) == layout_undefined) {
1839 default_layout_compound_type(ctx.frame_tp);
1842 /* we will possible add new entities to the frame: set the layout to undefined */
1843 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1844 set_type_state(ctx.frame_tp, layout_undefined);
1846 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1848 /* fix the frame type layout again */
1849 set_type_state(ctx.frame_tp, layout_fixed);
1850 /* align stackframe to 4 byte */
1851 frame_size = get_type_size_bytes(ctx.frame_tp);
1852 if (frame_size % 4 != 0) {
1853 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1856 env->regs = pmap_create();
1858 n_params = get_method_n_params(method_type);
1859 args = OALLOCNZ(obst, ir_node*, n_params);
1862 * for inner function we must now fix access to outer frame entities.
1864 fix_outer_variable_access(env, &ctx);
1866 /* Check if a value parameter is transmitted as a register.
1867 * This might happen if the address of an parameter is taken which is
1868 * transmitted in registers.
1870 * Note that on some architectures this case must be handled specially
1871 * because the place of the backing store is determined by their ABI.
1873 * In the default case we move the entity to the frame type and create
1874 * a backing store into the first block.
1876 fix_address_of_parameter_access(env, irg, ctx.value_param_list);
1878 DEL_ARR_F(ctx.value_param_list);
1879 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1881 /* Fill the argument vector */
1882 arg_tuple = get_irg_args(irg);
1883 foreach_out_edge(arg_tuple, edge) {
1884 ir_node *irn = get_edge_src_irn(edge);
1885 if (! is_Anchor(irn)) {
1886 int nr = get_Proj_proj(irn);
1888 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1892 bet_type = call->cb->get_between_type(env->cb);
1893 stack_frame_init(stack_layout, arg_type, bet_type,
1894 get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1895 stack_layout->sp_relative = call->flags.bits.try_omit_fp;
1897 /* Count the register params and add them to the number of Projs for the RegParams node */
1898 for (i = 0; i < n_params; ++i) {
1899 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1900 if (arg->in_reg && args[i]) {
1901 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1902 assert(i == get_Proj_proj(args[i]));
1904 /* For now, associate the register with the old Proj from Start representing that argument. */
1905 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1906 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1910 /* Collect all callee-save registers */
1911 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1912 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1913 for (j = 0; j < cls->n_regs; ++j) {
1914 const arch_register_t *reg = &cls->regs[j];
1915 if (arch_register_type_is(reg, callee_save) ||
1916 arch_register_type_is(reg, state)) {
1917 pmap_insert(env->regs, (void *) reg, NULL);
1922 /* handle start block here (place a jump in the block) */
1923 fix_start_block(irg);
1925 pmap_insert(env->regs, (void *) sp, NULL);
1926 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1927 start_bl = get_irg_start_block(irg);
1928 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1931 * make proj nodes for the callee save registers.
1932 * memorize them, since Return nodes get those as inputs.
1934 * Note, that if a register corresponds to an argument, the regs map contains
1935 * the old Proj from start for that argument.
1938 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1939 reg_map_to_arr(rm, env->regs);
1940 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1941 arch_register_t *reg = (void *) rm[i].reg;
1942 ir_mode *mode = reg->reg_class->mode;
1944 arch_register_req_type_t add_type = 0;
1948 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1951 proj = new_r_Proj(env->start, mode, nr + 1);
1952 pmap_insert(env->regs, (void *) reg, proj);
1953 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1954 arch_set_irn_register(proj, reg);
1956 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1959 /* create a new initial memory proj */
1960 assert(is_Proj(old_mem));
1961 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1962 new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1964 set_irg_initial_mem(irg, mem);
1966 /* Generate the Prologue */
1967 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &stack_layout->initial_bias);
1969 /* do the stack allocation BEFORE the barrier, or spill code
1970 might be added before it */
1971 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1972 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1973 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1975 create_barrier(start_bl, &mem, env->regs, 0);
1977 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1978 arch_set_irn_register(env->init_sp, sp);
1980 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1981 set_irg_frame(irg, frame_pointer);
1982 pset_insert_ptr(env->ignore_regs, fp_reg);
1984 /* rewire old mem users to new mem */
1985 exchange(old_mem, mem);
1987 /* keep the mem (for functions with an endless loop = no return) */
1990 set_irg_initial_mem(irg, mem);
1992 /* Now, introduce stack param nodes for all parameters passed on the stack */
1993 for (i = 0; i < n_params; ++i) {
1994 ir_node *arg_proj = args[i];
1995 ir_node *repl = NULL;
1997 if (arg_proj != NULL) {
1998 be_abi_call_arg_t *arg;
1999 ir_type *param_type;
2000 int nr = get_Proj_proj(arg_proj);
2003 nr = MIN(nr, n_params);
2004 arg = get_call_arg(call, 0, nr, 1);
2005 param_type = get_method_param_type(method_type, nr);
2008 repl = pmap_get(env->regs, (void *) arg->reg);
2009 } else if (arg->on_stack) {
2010 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2012 /* For atomic parameters which are actually used, we create a Load node. */
2013 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2014 ir_mode *mode = get_type_mode(param_type);
2015 ir_mode *load_mode = arg->load_mode;
2017 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2018 repl = new_r_Proj(load, load_mode, pn_Load_res);
2020 if (mode != load_mode) {
2021 repl = new_r_Conv(start_bl, repl, mode);
2024 /* The stack parameter is not primitive (it is a struct or array),
2025 * we thus will create a node representing the parameter's address
2031 assert(repl != NULL);
2033 /* Beware: the mode of the register parameters is always the mode of the register class
2034 which may be wrong. Add Conv's then. */
2035 mode = get_irn_mode(args[i]);
2036 if (mode != get_irn_mode(repl)) {
2037 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2039 exchange(args[i], repl);
2043 /* the arg proj is not needed anymore now and should be only used by the anchor */
2044 assert(get_irn_n_edges(arg_tuple) == 1);
2045 kill_node(arg_tuple);
2046 set_irg_args(irg, new_r_Bad(irg));
2048 /* All Return nodes hang on the End node, so look for them there. */
2049 end = get_irg_end_block(irg);
2050 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2051 ir_node *irn = get_Block_cfgpred(end, i);
2053 if (is_Return(irn)) {
2054 ir_node *blk = get_nodes_block(irn);
2055 ir_node *mem = get_Return_mem(irn);
2056 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2061 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2062 the code is dead and will never be executed. */
2065 /** Fix the state inputs of calls that still hang on unknowns */
2066 static void fix_call_state_inputs(ir_graph *irg)
2068 be_abi_irg_t *env = be_get_irg_abi(irg);
2069 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2071 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2073 /* Collect caller save registers */
2074 n = arch_env_get_n_reg_class(arch_env);
2075 for (i = 0; i < n; ++i) {
2077 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2078 for (j = 0; j < cls->n_regs; ++j) {
2079 const arch_register_t *reg = arch_register_for_index(cls, j);
2080 if (arch_register_type_is(reg, state)) {
2081 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2086 n = ARR_LEN(env->calls);
2087 n_states = ARR_LEN(stateregs);
2088 for (i = 0; i < n; ++i) {
2090 ir_node *call = env->calls[i];
2092 arity = get_irn_arity(call);
2094 /* the state reg inputs are the last n inputs of the calls */
2095 for (s = 0; s < n_states; ++s) {
2096 int inp = arity - n_states + s;
2097 const arch_register_t *reg = stateregs[s];
2098 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2100 set_irn_n(call, inp, regnode);
2104 DEL_ARR_F(stateregs);
2108 * Create a trampoline entity for the given method.
2110 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2112 ir_type *type = get_entity_type(method);
2113 ident *old_id = get_entity_ld_ident(method);
2114 ident *id = id_mangle3("", old_id, "$stub");
2115 ir_type *parent = be->pic_trampolines_type;
2116 ir_entity *ent = new_entity(parent, old_id, type);
2117 set_entity_ld_ident(ent, id);
2118 set_entity_visibility(ent, ir_visibility_private);
2124 * Returns the trampoline entity for the given method.
2126 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2128 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2129 if (result == NULL) {
2130 result = create_trampoline(env, method);
2131 pmap_insert(env->ent_trampoline_map, method, result);
2137 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2139 ident *old_id = get_entity_ld_ident(entity);
2140 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
2141 ir_type *e_type = get_entity_type(entity);
2142 ir_type *type = new_type_pointer(e_type);
2143 ir_type *parent = be->pic_symbols_type;
2144 ir_entity *ent = new_entity(parent, old_id, type);
2145 set_entity_ld_ident(ent, id);
2146 set_entity_visibility(ent, ir_visibility_private);
2151 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2153 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2154 if (result == NULL) {
2155 result = create_pic_symbol(env, entity);
2156 pmap_insert(env->ent_pic_symbol_map, entity, result);
2165 * Returns non-zero if a given entity can be accessed using a relative address.
2167 static int can_address_relative(ir_entity *entity)
2169 return get_entity_visibility(entity) != ir_visibility_external
2170 && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
2173 /** patches SymConsts to work in position independent code */
2174 static void fix_pic_symconsts(ir_node *node, void *data)
2182 ir_graph *irg = get_irn_irg(node);
2184 be_main_env_t *be = be_get_irg_main_env(irg);
2187 arity = get_irn_arity(node);
2188 for (i = 0; i < arity; ++i) {
2190 ir_node *pred = get_irn_n(node, i);
2192 ir_entity *pic_symbol;
2193 ir_node *pic_symconst;
2195 if (!is_SymConst(pred))
2198 entity = get_SymConst_entity(pred);
2199 block = get_nodes_block(pred);
2201 /* calls can jump to relative addresses, so we can directly jump to
2202 the (relatively) known call address or the trampoline */
2203 if (i == 1 && is_Call(node)) {
2204 ir_entity *trampoline;
2205 ir_node *trampoline_const;
2207 if (can_address_relative(entity))
2210 dbgi = get_irn_dbg_info(pred);
2211 trampoline = get_trampoline(be, entity);
2212 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2214 set_irn_n(node, i, trampoline_const);
2218 /* everything else is accessed relative to EIP */
2219 mode = get_irn_mode(pred);
2220 pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(irg));
2222 /* all ok now for locally constructed stuff */
2223 if (can_address_relative(entity)) {
2224 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2226 /* make sure the walker doesn't visit this add again */
2227 mark_irn_visited(add);
2228 set_irn_n(node, i, add);
2232 /* get entry from pic symbol segment */
2233 dbgi = get_irn_dbg_info(pred);
2234 pic_symbol = get_pic_symbol(be, entity);
2235 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2237 add = new_r_Add(block, pic_base, pic_symconst, mode);
2238 mark_irn_visited(add);
2240 /* we need an extra indirection for global data outside our current
2241 module. The loads are always safe and can therefore float
2242 and need no memory input */
2243 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2244 load_res = new_r_Proj(load, mode, pn_Load_res);
2246 set_irn_n(node, i, load_res);
2250 be_abi_irg_t *be_abi_introduce(ir_graph *irg)
2252 be_abi_irg_t *env = XMALLOCZ(be_abi_irg_t);
2253 ir_node *old_frame = get_irg_frame(irg);
2254 struct obstack *obst = be_get_be_obst(irg);
2255 be_options_t *options = be_get_irg_options(irg);
2256 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2257 ir_entity *entity = get_irg_entity(irg);
2258 ir_type *method_type = get_entity_type(entity);
2262 unsigned *limited_bitset;
2263 arch_register_req_t *sp_req;
2265 be_omit_fp = options->omit_fp;
2266 be_omit_leaf_fp = options->omit_leaf_fp;
2270 env->ignore_regs = pset_new_ptr_default();
2271 env->keep_map = pmap_create();
2272 env->dce_survivor = new_survive_dce();
2274 sp_req = OALLOCZ(obst, arch_register_req_t);
2275 env->sp_req = sp_req;
2277 sp_req->type = arch_register_req_type_limited
2278 | arch_register_req_type_produces_sp;
2279 sp_req->cls = arch_register_get_class(arch_env->sp);
2281 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2282 rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
2283 sp_req->limited = limited_bitset;
2284 if (arch_env->sp->type & arch_register_type_ignore) {
2285 sp_req->type |= arch_register_req_type_ignore;
2288 /* break here if backend provides a custom API.
2289 * Note: we shouldn't have to setup any be_abi_irg_t* stuff at all,
2290 * but need more cleanup to make this work
2292 be_set_irg_abi(irg, env);
2293 if (arch_env->custom_abi)
2296 env->call = be_abi_call_new(arch_env->sp->reg_class);
2297 arch_env_get_call_abi(arch_env, method_type, env->call);
2299 env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
2300 env->calls = NEW_ARR_F(ir_node*, 0);
2303 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2306 /* Lower all call nodes in the IRG. */
2310 Beware: init backend abi call object after processing calls,
2311 otherwise some information might be not yet available.
2313 env->cb = env->call->cb->init(env->call, irg);
2315 /* Process the IRG */
2318 /* fix call inputs for state registers */
2319 fix_call_state_inputs(irg);
2321 /* We don't need the keep map anymore. */
2322 pmap_destroy(env->keep_map);
2323 env->keep_map = NULL;
2325 /* calls array is not needed anymore */
2326 DEL_ARR_F(env->calls);
2329 /* reroute the stack origin of the calls to the true stack origin. */
2330 exchange(dummy, env->init_sp);
2331 exchange(old_frame, get_irg_frame(irg));
2333 /* Make some important node pointers survive the dead node elimination. */
2334 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2335 foreach_pmap(env->regs, ent) {
2336 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2339 env->call->cb->done(env->cb);
2344 void be_abi_free(ir_graph *irg)
2346 be_abi_irg_t *env = be_get_irg_abi(irg);
2348 if (env->call != NULL)
2349 be_abi_call_free(env->call);
2350 free_survive_dce(env->dce_survivor);
2351 if (env->ignore_regs != NULL)
2352 del_pset(env->ignore_regs);
2353 if (env->regs != NULL)
2354 pmap_destroy(env->regs);
2357 be_set_irg_abi(irg, NULL);
2360 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2362 arch_register_t *reg;
2364 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2365 if (reg->reg_class == cls)
2366 bitset_set(bs, reg->index);
2369 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2372 arch_register_t *reg;
2374 for (i = 0; i < cls->n_regs; ++i) {
2375 if (arch_register_type_is(&cls->regs[i], ignore))
2378 rbitset_set(raw_bitset, i);
2381 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2382 reg = pset_next(abi->ignore_regs)) {
2383 if (reg->reg_class != cls)
2386 rbitset_clear(raw_bitset, reg->index);
2392 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2393 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2394 | _| | |> < ___) | || (_| | (__| <
2395 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2399 typedef ir_node **node_array;
2401 typedef struct fix_stack_walker_env_t {
2402 node_array sp_nodes;
2403 } fix_stack_walker_env_t;
2406 * Walker. Collect all stack modifying nodes.
2408 static void collect_stack_nodes_walker(ir_node *node, void *data)
2410 ir_node *insn = node;
2411 fix_stack_walker_env_t *env = data;
2412 const arch_register_req_t *req;
2414 if (is_Proj(node)) {
2415 insn = get_Proj_pred(node);
2418 if (arch_irn_get_n_outs(insn) == 0)
2420 if (get_irn_mode(node) == mode_T)
2423 req = arch_get_register_req_out(node);
2424 if (! (req->type & arch_register_req_type_produces_sp))
2427 ARR_APP1(ir_node*, env->sp_nodes, node);
2430 void be_abi_fix_stack_nodes(ir_graph *irg)
2432 be_abi_irg_t *abi = be_get_irg_abi(irg);
2433 be_lv_t *lv = be_get_irg_liveness(irg);
2434 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2435 be_ssa_construction_env_t senv;
2438 fix_stack_walker_env_t walker_env;
2440 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2442 irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
2444 /* nothing to be done if we didn't find any node, in fact we mustn't
2445 * continue, as for endless loops incsp might have had no users and is bad
2448 len = ARR_LEN(walker_env.sp_nodes);
2450 DEL_ARR_F(walker_env.sp_nodes);
2454 be_ssa_construction_init(&senv, irg);
2455 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2456 ARR_LEN(walker_env.sp_nodes));
2457 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2458 ARR_LEN(walker_env.sp_nodes));
2461 len = ARR_LEN(walker_env.sp_nodes);
2462 for (i = 0; i < len; ++i) {
2463 be_liveness_update(lv, walker_env.sp_nodes[i]);
2465 be_ssa_construction_update_liveness_phis(&senv, lv);
2468 phis = be_ssa_construction_get_new_phis(&senv);
2470 /* set register requirements for stack phis */
2471 len = ARR_LEN(phis);
2472 for (i = 0; i < len; ++i) {
2473 ir_node *phi = phis[i];
2474 be_set_phi_reg_req(phi, abi->sp_req);
2475 arch_set_irn_register(phi, arch_env->sp);
2477 be_ssa_construction_destroy(&senv);
2479 DEL_ARR_F(walker_env.sp_nodes);
2483 * Fix all stack accessing operations in the block bl.
2485 * @param bl the block to process
2486 * @param real_bias the bias value
2488 * @return the bias at the end of this block
2490 static int process_stack_bias(ir_node *bl, int real_bias)
2492 int wanted_bias = real_bias;
2493 ir_graph *irg = get_Block_irg(bl);
2494 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
2495 bool sp_relative = layout->sp_relative;
2496 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2499 sched_foreach(bl, irn) {
2503 Check, if the node relates to an entity on the stack frame.
2504 If so, set the true offset (including the bias) for that
2507 ir_entity *ent = arch_get_frame_entity(irn);
2509 int bias = sp_relative ? real_bias : 0;
2510 int offset = be_get_stack_entity_offset(layout, ent, bias);
2511 arch_set_frame_offset(irn, offset);
2512 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2513 ent, offset, bias));
2517 * If the node modifies the stack pointer by a constant offset,
2518 * record that in the bias.
2520 ofs = arch_get_sp_bias(irn);
2522 if (be_is_IncSP(irn)) {
2523 /* fill in real stack frame size */
2524 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2525 ir_type *frame_type = get_irg_frame_type(irg);
2526 ofs = (int) get_type_size_bytes(frame_type);
2527 be_set_IncSP_offset(irn, ofs);
2528 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2529 ir_type *frame_type = get_irg_frame_type(irg);
2530 ofs = - (int)get_type_size_bytes(frame_type);
2531 be_set_IncSP_offset(irn, ofs);
2533 if (be_get_IncSP_align(irn)) {
2534 /* patch IncSP to produce an aligned stack pointer */
2535 ir_type *between_type = layout->between_type;
2536 int between_size = get_type_size_bytes(between_type);
2537 int alignment = 1 << arch_env->stack_alignment;
2538 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2541 be_set_IncSP_offset(irn, ofs + alignment - delta);
2542 real_bias += alignment - delta;
2545 /* adjust so real_bias corresponds with wanted_bias */
2546 int delta = wanted_bias - real_bias;
2549 be_set_IncSP_offset(irn, ofs + delta);
2560 assert(real_bias == wanted_bias);
2565 * A helper struct for the bias walker.
2568 int start_block_bias; /**< The bias at the end of the start block. */
2570 ir_node *start_block; /**< The start block of the current graph. */
2574 * Block-Walker: fix all stack offsets for all blocks
2575 * except the start block
2577 static void stack_bias_walker(ir_node *bl, void *data)
2579 struct bias_walk *bw = data;
2580 if (bl != bw->start_block) {
2581 process_stack_bias(bl, bw->start_block_bias);
2586 * Walker: finally lower all Sels of outer frame or parameter
2589 static void lower_outer_frame_sels(ir_node *sel, void *ctx)
2594 be_stack_layout_t *layout;
2601 ent = get_Sel_entity(sel);
2602 owner = get_entity_owner(ent);
2603 ptr = get_Sel_ptr(sel);
2604 irg = get_irn_irg(sel);
2605 layout = be_get_irg_stack_layout(irg);
2607 if (owner == layout->frame_type || owner == layout->arg_type) {
2608 /* found access to outer frame or arguments */
2609 int offset = be_get_stack_entity_offset(layout, ent, 0);
2612 ir_node *bl = get_nodes_block(sel);
2613 dbg_info *dbgi = get_irn_dbg_info(sel);
2614 ir_mode *mode = get_irn_mode(sel);
2615 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2616 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2618 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2624 void be_abi_fix_stack_bias(ir_graph *irg)
2626 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
2629 struct bias_walk bw;
2631 stack_frame_compute_initial_offset(stack_layout);
2632 // stack_layout_dump(stdout, stack_layout);
2634 /* Determine the stack bias at the end of the start block. */
2635 bw.start_block_bias = process_stack_bias(get_irg_start_block(irg),
2636 stack_layout->initial_bias);
2637 bw.between_size = get_type_size_bytes(stack_layout->between_type);
2639 /* fix the bias is all other blocks */
2640 bw.start_block = get_irg_start_block(irg);
2641 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2643 /* fix now inner functions: these still have Sel node to outer
2644 frame and parameter entities */
2645 frame_tp = get_irg_frame_type(irg);
2646 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2647 ir_entity *ent = get_class_member(frame_tp, i);
2648 ir_graph *irg = get_entity_irg(ent);
2651 irg_walk_graph(irg, NULL, lower_outer_frame_sels, NULL);
2656 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2658 assert(arch_register_type_is(reg, callee_save));
2659 assert(pmap_contains(abi->regs, (void *) reg));
2660 return pmap_get(abi->regs, (void *) reg);
2663 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2665 assert(arch_register_type_is(reg, ignore));
2666 assert(pmap_contains(abi->regs, (void *) reg));
2667 return pmap_get(abi->regs, (void *) reg);
2670 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
2671 void be_init_abi(void)
2673 FIRM_DBG_REGISTER(dbg, "firm.be.abi");