2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
32 #include "irgraph_t.h"
35 #include "iredges_t.h"
38 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 typedef struct _be_abi_call_arg_t {
62 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
64 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
65 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct _be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
80 ir_type *between_type;
82 const arch_register_class_t *cls_addr; /**< register class of the call address */
86 * The ABI information for the current graph.
88 struct _be_abi_irg_t {
90 const arch_env_t *arch_env;
91 survive_dce_t *dce_survivor;
93 be_abi_call_t *call; /**< The ABI call information. */
94 ir_type *method_type; /**< The type of the method of the IRG. */
96 ir_node *init_sp; /**< The node representing the stack pointer
97 at the start of the function. */
99 ir_node *start; /**< The be_Start params node. */
100 pmap *regs; /**< A map of all callee-save and ignore regs to
101 their Projs to the RegParams node. */
103 int start_block_bias; /**< The stack bias at the end of the start block. */
105 void *cb; /**< ABI Callback self pointer. */
107 pmap *keep_map; /**< mapping blocks to keep nodes. */
108 pset *ignore_regs; /**< Additional registers which shall be ignored. */
110 ir_node **calls; /**< flexible array containing all be_Call nodes */
112 arch_register_req_t *sp_req;
115 static heights_t *ir_heights;
117 /** Flag: if set, try to omit the frame pointer in all routines. */
118 static int be_omit_fp = 1;
120 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
121 static int be_omit_leaf_fp = 1;
124 _ ____ ___ ____ _ _ _ _
125 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
126 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
127 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
128 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
130 These callbacks are used by the backend to set the parameters
131 for a specific call type.
135 * Set compare function: compares two ABI call object arguments.
137 static int cmp_call_arg(const void *a, const void *b, size_t n)
139 const be_abi_call_arg_t *p = a, *q = b;
141 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
145 * Get an ABI call object argument.
147 * @param call the abi call
148 * @param is_res true for call results, false for call arguments
149 * @param pos position of the argument
150 * @param callee context type - if we are callee or caller
152 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
154 be_abi_call_arg_t arg;
157 memset(&arg, 0, sizeof(arg));
162 hash = is_res * 128 + pos;
164 return set_find(call->params, &arg, sizeof(arg), hash);
168 * Set an ABI call object argument.
170 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
172 unsigned hash = arg->is_res * 128 + arg->pos;
173 if (context & ABI_CONTEXT_CALLEE) {
175 set_insert(call->params, arg, sizeof(*arg), hash);
177 if (context & ABI_CONTEXT_CALLER) {
179 set_insert(call->params, arg, sizeof(*arg), hash);
183 /* Set the flags for a call. */
184 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
190 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
191 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
197 /* Set register class for call address */
198 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
200 call->cls_addr = cls;
204 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
205 ir_mode *load_mode, unsigned alignment,
206 unsigned space_before, unsigned space_after,
207 be_abi_context_t context)
209 be_abi_call_arg_t arg;
210 memset(&arg, 0, sizeof(arg));
211 assert(alignment > 0 && "Alignment must be greater than 0");
213 arg.load_mode = load_mode;
214 arg.alignment = alignment;
215 arg.space_before = space_before;
216 arg.space_after = space_after;
220 remember_call_arg(&arg, call, context);
223 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
225 be_abi_call_arg_t arg;
226 memset(&arg, 0, sizeof(arg));
233 remember_call_arg(&arg, call, context);
236 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
238 be_abi_call_arg_t arg;
239 memset(&arg, 0, sizeof(arg));
246 remember_call_arg(&arg, call, context);
249 /* Get the flags of a ABI call object. */
250 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
256 * Constructor for a new ABI call object.
258 * @param cls_addr register class of the call address
260 * @return the new ABI call object
262 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
264 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
267 call->params = new_set(cmp_call_arg, 16);
269 call->cls_addr = cls_addr;
271 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
277 * Destructor for an ABI call object.
279 static void be_abi_call_free(be_abi_call_t *call)
281 del_set(call->params);
287 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
288 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
289 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
290 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
293 Handling of the stack frame. It is composed of three types:
294 1) The type of the arguments which are pushed on the stack.
295 2) The "between type" which consists of stuff the call of the
296 function pushes on the stack (like the return address and
297 the old base pointer for ia32).
298 3) The Firm frame type which consists of all local variables
302 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
305 ir_type *t = get_entity_owner(ent);
306 int ofs = get_entity_offset(ent);
310 /* Find the type the entity is contained in. */
311 for (index = 0; index < N_FRAME_TYPES; ++index) {
312 if (frame->order[index] == t)
314 /* Add the size of all the types below the one of the entity to the entity's offset */
315 ofs += get_type_size_bytes(frame->order[index]);
318 /* correct the offset by the initial position of the frame pointer */
319 ofs -= frame->initial_offset;
321 /* correct the offset with the current bias. */
328 * Retrieve the entity with given offset from a frame type.
330 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
334 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
335 ir_entity *ent = get_compound_member(t, i);
336 if (get_entity_offset(ent) == offset)
343 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
345 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
346 ir_entity *ent = search_ent_with_offset(base, 0);
349 frame->initial_offset
350 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
352 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
355 return frame->initial_offset;
359 * Initializes the frame layout from parts
361 * @param frame the stack layout that will be initialized
362 * @param args the stack argument layout type
363 * @param between the between layout type
364 * @param locals the method frame type
365 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
366 * @param param_map an array mapping method argument positions to the stack argument type
368 * @return the initialized stack layout
370 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
371 ir_type *between, ir_type *locals, int stack_dir,
372 ir_entity *param_map[])
374 frame->arg_type = args;
375 frame->between_type = between;
376 frame->frame_type = locals;
377 frame->initial_offset = 0;
378 frame->initial_bias = 0;
379 frame->stack_dir = stack_dir;
380 frame->order[1] = between;
381 frame->param_map = param_map;
384 frame->order[0] = args;
385 frame->order[2] = locals;
388 /* typical decreasing stack: locals have the
389 * lowest addresses, arguments the highest */
390 frame->order[0] = locals;
391 frame->order[2] = args;
403 Adjustment of the calls inside a graph.
408 * Transform a call node into a be_Call node.
410 * @param env The ABI environment for the current irg.
411 * @param irn The call node.
412 * @param curr_sp The stack pointer node to use.
413 * @return The stack pointer after the call.
415 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
417 ir_graph *irg = env->irg;
418 const arch_env_t *arch_env = env->arch_env;
419 ir_type *call_tp = get_Call_type(irn);
420 ir_node *call_ptr = get_Call_ptr(irn);
421 int n_params = get_method_n_params(call_tp);
422 ir_node *curr_mem = get_Call_mem(irn);
423 ir_node *bl = get_nodes_block(irn);
425 int stack_dir = arch_env->stack_dir;
426 const arch_register_t *sp = arch_env->sp;
427 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
428 ir_mode *mach_mode = sp->reg_class->mode;
429 struct obstack *obst = be_get_be_obst(irg);
430 int no_alloc = call->flags.bits.frame_is_setup_on_call;
431 int n_res = get_method_n_ress(call_tp);
432 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
434 ir_node *res_proj = NULL;
435 int n_reg_params = 0;
436 int n_stack_params = 0;
439 pset_new_t destroyed_regs, states;
440 pset_new_iterator_t iter;
444 int n_reg_results = 0;
445 const arch_register_t *reg;
446 const ir_edge_t *edge;
448 int *stack_param_idx;
449 int i, n, destroy_all_regs;
452 pset_new_init(&destroyed_regs);
453 pset_new_init(&states);
455 /* Let the isa fill out the abi description for that call node. */
456 arch_env_get_call_abi(arch_env, call_tp, call);
458 /* Insert code to put the stack arguments on the stack. */
459 assert(get_Call_n_params(irn) == n_params);
460 assert(obstack_object_size(obst) == 0);
461 stack_param_idx = ALLOCAN(int, n_params);
462 for (i = 0; i < n_params; ++i) {
463 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
466 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
468 stack_size += round_up2(arg->space_before, arg->alignment);
469 stack_size += round_up2(arg_size, arg->alignment);
470 stack_size += round_up2(arg->space_after, arg->alignment);
472 stack_param_idx[n_stack_params++] = i;
476 /* Collect all arguments which are passed in registers. */
477 reg_param_idxs = ALLOCAN(int, n_params);
478 for (i = 0; i < n_params; ++i) {
479 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
480 if (arg && arg->in_reg) {
481 reg_param_idxs[n_reg_params++] = i;
486 * If the stack is decreasing and we do not want to store sequentially,
487 * or someone else allocated the call frame
488 * we allocate as much space on the stack all parameters need, by
489 * moving the stack pointer along the stack's direction.
491 * Note: we also have to do this for stack_size == 0, because we may have
492 * to adjust stack alignment for the call.
494 if (stack_dir < 0 && !do_seq && !no_alloc) {
495 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
498 dbgi = get_irn_dbg_info(irn);
499 /* If there are some parameters which shall be passed on the stack. */
500 if (n_stack_params > 0) {
502 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
506 * Reverse list of stack parameters if call arguments are from left to right.
507 * We must them reverse again if they are pushed (not stored) and the stack
508 * direction is downwards.
510 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
511 for (i = 0; i < n_stack_params >> 1; ++i) {
512 int other = n_stack_params - i - 1;
513 int tmp = stack_param_idx[i];
514 stack_param_idx[i] = stack_param_idx[other];
515 stack_param_idx[other] = tmp;
519 curr_mem = get_Call_mem(irn);
521 in[n_in++] = curr_mem;
524 for (i = 0; i < n_stack_params; ++i) {
525 int p = stack_param_idx[i];
526 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
527 ir_node *param = get_Call_param(irn, p);
528 ir_node *addr = curr_sp;
530 ir_type *param_type = get_method_param_type(call_tp, p);
531 int param_size = get_type_size_bytes(param_type) + arg->space_after;
534 * If we wanted to build the arguments sequentially,
535 * the stack pointer for the next must be incremented,
536 * and the memory value propagated.
540 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
541 param_size + arg->space_before, 0);
542 add_irn_dep(curr_sp, curr_mem);
544 curr_ofs += arg->space_before;
545 curr_ofs = round_up2(curr_ofs, arg->alignment);
547 /* Make the expression to compute the argument's offset. */
549 ir_mode *constmode = mach_mode;
550 if (mode_is_reference(mach_mode)) {
553 addr = new_r_Const_long(irg, constmode, curr_ofs);
554 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
558 /* Insert a store for primitive arguments. */
559 if (is_atomic_type(param_type)) {
561 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
562 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
563 mem = new_r_Proj(store, mode_M, pn_Store_M);
565 /* Make a mem copy for compound arguments. */
568 assert(mode_is_reference(get_irn_mode(param)));
569 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
570 mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular);
573 curr_ofs += param_size;
581 /* We need the sync only, if we didn't build the stores sequentially. */
583 if (n_stack_params >= 1) {
584 curr_mem = new_r_Sync(bl, n_in, in);
586 curr_mem = get_Call_mem(irn);
591 /* check for the return_twice property */
592 destroy_all_regs = 0;
593 if (is_SymConst_addr_ent(call_ptr)) {
594 ir_entity *ent = get_SymConst_entity(call_ptr);
596 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
597 destroy_all_regs = 1;
599 ir_type *call_tp = get_Call_type(irn);
601 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
602 destroy_all_regs = 1;
605 /* Put caller save into the destroyed set and state registers in the states set */
606 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
608 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
609 for (j = 0; j < cls->n_regs; ++j) {
610 const arch_register_t *reg = arch_register_for_index(cls, j);
612 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
613 if (! arch_register_type_is(reg, ignore))
614 pset_new_insert(&destroyed_regs, (void *) reg);
616 if (arch_register_type_is(reg, state)) {
617 pset_new_insert(&destroyed_regs, (void*) reg);
618 pset_new_insert(&states, (void*) reg);
623 if (destroy_all_regs) {
624 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
625 pset_new_remove(&destroyed_regs, arch_env->sp);
626 pset_new_remove(&destroyed_regs, arch_env->bp);
629 /* search the largest result proj number */
630 res_projs = ALLOCANZ(ir_node*, n_res);
632 foreach_out_edge(irn, edge) {
633 const ir_edge_t *res_edge;
634 ir_node *irn = get_edge_src_irn(edge);
636 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
639 foreach_out_edge(irn, res_edge) {
641 ir_node *res = get_edge_src_irn(res_edge);
643 assert(is_Proj(res));
645 proj = get_Proj_proj(res);
646 assert(proj < n_res);
647 assert(res_projs[proj] == NULL);
648 res_projs[proj] = res;
654 /** TODO: this is not correct for cases where return values are passed
655 * on the stack, but no known ABI does this currently...
657 n_reg_results = n_res;
659 assert(obstack_object_size(obst) == 0);
661 in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
663 /* make the back end call node and set its register requirements. */
664 for (i = 0; i < n_reg_params; ++i) {
665 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
668 /* add state registers ins */
669 foreach_pset_new(&states, reg, iter) {
670 const arch_register_class_t *cls = arch_register_get_class(reg);
672 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
673 ir_fprintf(stderr, "Adding %+F\n", regnode);
675 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
676 in[n_ins++] = regnode;
678 assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
680 /* ins collected, build the call */
681 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
683 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
684 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
685 n_ins, in, get_Call_type(irn));
686 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
689 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
690 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
691 n_ins, in, get_Call_type(irn));
693 be_Call_set_pop(low_call, call->pop);
695 /* put the call into the list of all calls for later processing */
696 ARR_APP1(ir_node *, env->calls, low_call);
698 /* create new stack pointer */
699 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
700 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
701 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
702 arch_set_irn_register(curr_sp, sp);
704 /* now handle results */
705 for (i = 0; i < n_res; ++i) {
707 ir_node *proj = res_projs[i];
708 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
710 /* returns values on stack not supported yet */
714 shift the proj number to the right, since we will drop the
715 unspeakable Proj_T from the Call. Therefore, all real argument
716 Proj numbers must be increased by pn_be_Call_first_res
718 pn = i + pn_be_Call_first_res;
721 ir_type *res_type = get_method_res_type(call_tp, i);
722 ir_mode *mode = get_type_mode(res_type);
723 proj = new_r_Proj(low_call, mode, pn);
726 set_Proj_pred(proj, low_call);
727 set_Proj_proj(proj, pn);
731 pset_new_remove(&destroyed_regs, arg->reg);
736 Set the register class of the call address to
737 the backend provided class (default: stack pointer class)
739 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
741 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
743 /* Set the register classes and constraints of the Call parameters. */
744 for (i = 0; i < n_reg_params; ++i) {
745 int index = reg_param_idxs[i];
746 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
747 assert(arg->reg != NULL);
749 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
753 /* Set the register constraints of the results. */
754 for (i = 0; i < n_res; ++i) {
755 ir_node *proj = res_projs[i];
756 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
757 int pn = get_Proj_proj(proj);
760 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
761 arch_set_irn_register(proj, arg->reg);
763 exchange(irn, low_call);
765 /* kill the ProjT node */
766 if (res_proj != NULL) {
770 /* Make additional projs for the caller save registers
771 and the Keep node which keeps them alive. */
773 const arch_register_t *reg;
777 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
778 pset_new_iterator_t iter;
781 n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
782 in = ALLOCAN(ir_node *, n_ins);
784 /* also keep the stack pointer */
785 set_irn_link(curr_sp, (void*) sp);
788 foreach_pset_new(&destroyed_regs, reg, iter) {
789 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
791 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
792 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
793 arch_set_irn_register(proj, reg);
795 set_irn_link(proj, (void*) reg);
800 for (i = 0; i < n_reg_results; ++i) {
801 ir_node *proj = res_projs[i];
802 const arch_register_t *reg = arch_get_irn_register(proj);
803 set_irn_link(proj, (void*) reg);
808 /* create the Keep for the caller save registers */
809 keep = be_new_Keep(bl, n, in);
810 for (i = 0; i < n; ++i) {
811 const arch_register_t *reg = get_irn_link(in[i]);
812 be_node_set_reg_class_in(keep, i, reg->reg_class);
816 /* Clean up the stack. */
817 assert(stack_size >= call->pop);
818 stack_size -= call->pop;
820 if (stack_size > 0) {
821 ir_node *mem_proj = NULL;
823 foreach_out_edge(low_call, edge) {
824 ir_node *irn = get_edge_src_irn(edge);
825 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
832 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
833 keep_alive(mem_proj);
836 /* Clean up the stack frame or revert alignment fixes if we allocated it */
838 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
841 be_abi_call_free(call);
843 pset_new_destroy(&states);
844 pset_new_destroy(&destroyed_regs);
850 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
852 * @param alignment the minimum stack alignment
853 * @param size the node containing the non-aligned size
854 * @param block the block where new nodes are allocated on
855 * @param dbg debug info for new nodes
857 * @return a node representing the aligned size
859 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
860 ir_node *block, dbg_info *dbg)
862 if (stack_alignment > 1) {
868 assert(is_po2(stack_alignment));
870 mode = get_irn_mode(size);
871 tv = new_tarval_from_long(stack_alignment-1, mode);
872 irg = get_Block_irg(block);
873 mask = new_r_Const(irg, tv);
874 size = new_rd_Add(dbg, block, size, mask, mode);
876 tv = new_tarval_from_long(-(long)stack_alignment, mode);
877 mask = new_r_Const(irg, tv);
878 size = new_rd_And(dbg, block, size, mask, mode);
884 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
886 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
895 const ir_edge_t *edge;
900 unsigned stack_alignment;
902 assert(get_Alloc_where(alloc) == stack_alloc);
904 block = get_nodes_block(alloc);
905 irg = get_Block_irg(block);
908 type = get_Alloc_type(alloc);
910 foreach_out_edge(alloc, edge) {
911 ir_node *irn = get_edge_src_irn(edge);
913 assert(is_Proj(irn));
914 switch (get_Proj_proj(irn)) {
926 /* Beware: currently Alloc nodes without a result might happen,
927 only escape analysis kills them and this phase runs only for object
928 oriented source. We kill the Alloc here. */
929 if (alloc_res == NULL && alloc_mem) {
930 exchange(alloc_mem, get_Alloc_mem(alloc));
934 dbg = get_irn_dbg_info(alloc);
935 count = get_Alloc_count(alloc);
937 /* we might need to multiply the count with the element size */
938 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
939 ir_mode *mode = get_irn_mode(count);
940 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
942 ir_node *cnst = new_rd_Const(dbg, irg, tv);
943 size = new_rd_Mul(dbg, block, count, cnst, mode);
948 /* The stack pointer will be modified in an unknown manner.
949 We cannot omit it. */
950 env->call->flags.bits.try_omit_fp = 0;
952 stack_alignment = 1 << env->arch_env->stack_alignment;
953 size = adjust_alloc_size(stack_alignment, size, block, dbg);
954 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
955 set_irn_dbg_info(new_alloc, dbg);
957 if (alloc_mem != NULL) {
961 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
963 /* We need to sync the output mem of the AddSP with the input mem
964 edge into the alloc node. */
965 ins[0] = get_Alloc_mem(alloc);
967 sync = new_r_Sync(block, 2, ins);
969 exchange(alloc_mem, sync);
972 exchange(alloc, new_alloc);
974 /* fix projnum of alloca res */
975 set_Proj_proj(alloc_res, pn_be_AddSP_res);
977 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
984 * The Free is transformed into a back end free node and connected to the stack nodes.
986 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
990 ir_node *subsp, *mem, *res, *size, *sync;
994 unsigned stack_alignment;
997 assert(get_Free_where(free) == stack_alloc);
999 block = get_nodes_block(free);
1000 irg = get_irn_irg(block);
1001 type = get_Free_type(free);
1002 sp_mode = env->arch_env->sp->reg_class->mode;
1003 dbg = get_irn_dbg_info(free);
1005 /* we might need to multiply the size with the element size */
1006 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1007 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1008 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1009 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1013 size = get_Free_size(free);
1016 stack_alignment = 1 << env->arch_env->stack_alignment;
1017 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1019 /* The stack pointer will be modified in an unknown manner.
1020 We cannot omit it. */
1021 env->call->flags.bits.try_omit_fp = 0;
1022 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1023 set_irn_dbg_info(subsp, dbg);
1025 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
1026 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
1028 /* we need to sync the memory */
1029 in[0] = get_Free_mem(free);
1031 sync = new_r_Sync(block, 2, in);
1033 /* and make the AddSP dependent on the former memory */
1034 add_irn_dep(subsp, get_Free_mem(free));
1037 exchange(free, sync);
1044 * Check if a node is somehow data dependent on another one.
1045 * both nodes must be in the same basic block.
1046 * @param n1 The first node.
1047 * @param n2 The second node.
1048 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1050 static int dependent_on(ir_node *n1, ir_node *n2)
1052 assert(get_nodes_block(n1) == get_nodes_block(n2));
1054 return heights_reachable_in_block(ir_heights, n1, n2);
1057 static int cmp_call_dependency(const void *c1, const void *c2)
1059 ir_node *n1 = *(ir_node **) c1;
1060 ir_node *n2 = *(ir_node **) c2;
1063 Classical qsort() comparison function behavior:
1064 0 if both elements are equal
1065 1 if second is "smaller" that first
1066 -1 if first is "smaller" that second
1068 if (dependent_on(n1, n2))
1071 if (dependent_on(n2, n1))
1074 /* The nodes have no depth order, but we need a total order because qsort()
1076 return get_irn_idx(n1) - get_irn_idx(n2);
1080 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1081 * Clears the irg_is_leaf flag if a Call is detected.
1083 static void link_ops_in_block_walker(ir_node *irn, void *data)
1085 be_abi_irg_t *env = data;
1086 ir_opcode code = get_irn_opcode(irn);
1088 if (code == iro_Call ||
1089 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1090 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1091 ir_node *bl = get_nodes_block(irn);
1092 void *save = get_irn_link(bl);
1094 if (code == iro_Call)
1095 env->call->flags.bits.irg_is_leaf = 0;
1097 set_irn_link(irn, save);
1098 set_irn_link(bl, irn);
1101 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1102 ir_node *param = get_Builtin_param(irn, 0);
1103 tarval *tv = get_Const_tarval(param);
1104 unsigned long value = get_tarval_long(tv);
1105 /* use ebp, so the climbframe algo works... */
1107 env->call->flags.bits.try_omit_fp = 0;
1114 * Process all Call/Alloc/Free nodes inside a basic block.
1115 * Note that the link field of the block must contain a linked list of all
1116 * Call nodes inside the Block. We first order this list according to data dependency
1117 * and that connect the calls together.
1119 static void process_ops_in_block(ir_node *bl, void *data)
1121 be_abi_irg_t *env = data;
1122 ir_node *curr_sp = env->init_sp;
1129 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1133 nodes = ALLOCAN(ir_node*, n_nodes);
1134 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1138 /* If there were call nodes in the block. */
1143 /* order the call nodes according to data dependency */
1144 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1146 for (i = n_nodes - 1; i >= 0; --i) {
1147 ir_node *irn = nodes[i];
1149 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1150 switch (get_irn_opcode(irn)) {
1153 /* The stack pointer will be modified due to a call. */
1154 env->call->flags.bits.try_omit_fp = 0;
1156 curr_sp = adjust_call(env, irn, curr_sp);
1159 if (get_Alloc_where(irn) == stack_alloc)
1160 curr_sp = adjust_alloc(env, irn, curr_sp);
1163 if (get_Free_where(irn) == stack_alloc)
1164 curr_sp = adjust_free(env, irn, curr_sp);
1167 panic("invalid call");
1171 /* Keep the last stack state in the block by tying it to Keep node,
1172 * the proj from calls is already kept */
1173 if (curr_sp != env->init_sp &&
1174 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1176 keep = be_new_Keep(bl, 1, nodes);
1177 pmap_insert(env->keep_map, bl, keep);
1181 set_irn_link(bl, curr_sp);
1185 * Adjust all call nodes in the graph to the ABI conventions.
1187 static void process_calls(be_abi_irg_t *env)
1189 ir_graph *irg = env->irg;
1191 env->call->flags.bits.irg_is_leaf = 1;
1192 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1194 ir_heights = heights_new(env->irg);
1195 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1196 heights_free(ir_heights);
1200 * Computes the stack argument layout type.
1201 * Changes a possibly allocated value param type by moving
1202 * entities to the stack layout type.
1204 * @param env the ABI environment
1205 * @param call the current call ABI
1206 * @param method_type the method type
1207 * @param val_param_tp the value parameter type, will be destroyed
1208 * @param param_map an array mapping method arguments to the stack layout type
1210 * @return the stack argument layout type
1212 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1213 ir_type *method_type, ir_type *val_param_tp,
1214 ir_entity ***param_map)
1216 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1217 int inc = env->arch_env->stack_dir * dir;
1218 int n = get_method_n_params(method_type);
1219 int curr = inc > 0 ? 0 : n - 1;
1220 struct obstack *obst = be_get_be_obst(env->irg);
1226 ident *id = get_entity_ident(get_irg_entity(env->irg));
1229 *param_map = map = OALLOCN(obst, ir_entity*, n);
1230 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1231 for (i = 0; i < n; ++i, curr += inc) {
1232 ir_type *param_type = get_method_param_type(method_type, curr);
1233 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
1236 if (arg->on_stack) {
1237 if (val_param_tp != NULL) {
1238 /* the entity was already created, create a copy in the param type */
1239 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1240 arg->stack_ent = copy_entity_own(val_ent, res);
1241 set_entity_link(val_ent, arg->stack_ent);
1242 set_entity_link(arg->stack_ent, NULL);
1244 /* create a new entity */
1245 snprintf(buf, sizeof(buf), "param_%d", i);
1246 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1248 ofs += arg->space_before;
1249 ofs = round_up2(ofs, arg->alignment);
1250 set_entity_offset(arg->stack_ent, ofs);
1251 ofs += arg->space_after;
1252 ofs += get_type_size_bytes(param_type);
1253 map[i] = arg->stack_ent;
1256 set_type_size_bytes(res, ofs);
1257 set_type_state(res, layout_fixed);
1262 const arch_register_t *reg;
1266 static int cmp_regs(const void *a, const void *b)
1268 const reg_node_map_t *p = a;
1269 const reg_node_map_t *q = b;
1271 if (p->reg->reg_class == q->reg->reg_class)
1272 return p->reg->index - q->reg->index;
1274 return p->reg->reg_class - q->reg->reg_class;
1277 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1280 int n = pmap_count(reg_map);
1283 foreach_pmap(reg_map, ent) {
1284 res[i].reg = ent->key;
1285 res[i].irn = ent->value;
1289 qsort(res, n, sizeof(res[0]), cmp_regs);
1293 * Creates a barrier.
1295 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1298 int n_regs = pmap_count(regs);
1304 in = ALLOCAN(ir_node*, n_regs+1);
1305 rm = ALLOCAN(reg_node_map_t, n_regs);
1306 reg_map_to_arr(rm, regs);
1307 for (n = 0; n < n_regs; ++n) {
1315 irn = be_new_Barrier(bl, n, in);
1317 for (n = 0; n < n_regs; ++n) {
1318 ir_node *pred = rm[n].irn;
1319 const arch_register_t *reg = rm[n].reg;
1320 arch_register_type_t add_type = 0;
1322 const backend_info_t *info;
1324 /* stupid workaround for now... as not all nodes report register
1326 info = be_get_info(skip_Proj(pred));
1327 if (info != NULL && info->out_infos != NULL) {
1328 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1329 if (ireq->type & arch_register_req_type_ignore)
1330 add_type |= arch_register_req_type_ignore;
1331 if (ireq->type & arch_register_req_type_produces_sp)
1332 add_type |= arch_register_req_type_produces_sp;
1335 proj = new_r_Proj(irn, get_irn_mode(pred), n);
1336 be_node_set_reg_class_in(irn, n, reg->reg_class);
1338 be_set_constr_single_reg_in(irn, n, reg, 0);
1339 be_set_constr_single_reg_out(irn, n, reg, add_type);
1340 arch_set_irn_register(proj, reg);
1342 pmap_insert(regs, (void *) reg, proj);
1346 *mem = new_r_Proj(irn, mode_M, n);
1353 * Creates a be_Return for a Return node.
1355 * @param @env the abi environment
1356 * @param irn the Return node or NULL if there was none
1357 * @param bl the block where the be_Retun should be placed
1358 * @param mem the current memory
1359 * @param n_res number of return results
1361 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1362 ir_node *mem, int n_res)
1364 be_abi_call_t *call = env->call;
1365 const arch_env_t *arch_env = env->arch_env;
1367 pmap *reg_map = pmap_create();
1368 ir_node *keep = pmap_get(env->keep_map, bl);
1375 const arch_register_t **regs;
1379 get the valid stack node in this block.
1380 If we had a call in that block there is a Keep constructed by process_calls()
1381 which points to the last stack modification in that block. we'll use
1382 it then. Else we use the stack from the start block and let
1383 the ssa construction fix the usage.
1385 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1387 stack = get_irn_n(keep, 0);
1389 remove_End_keepalive(get_irg_end(env->irg), keep);
1392 /* Insert results for Return into the register map. */
1393 for (i = 0; i < n_res; ++i) {
1394 ir_node *res = get_Return_res(irn, i);
1395 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1396 assert(arg->in_reg && "return value must be passed in register");
1397 pmap_insert(reg_map, (void *) arg->reg, res);
1400 /* Add uses of the callee save registers. */
1401 foreach_pmap(env->regs, ent) {
1402 const arch_register_t *reg = ent->key;
1403 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1404 pmap_insert(reg_map, ent->key, ent->value);
1407 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1409 /* Make the Epilogue node and call the arch's epilogue maker. */
1410 create_barrier(bl, &mem, reg_map, 1);
1411 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1414 Maximum size of the in array for Return nodes is
1415 return args + callee save/ignore registers + memory + stack pointer
1417 in_max = pmap_count(reg_map) + n_res + 2;
1419 in = ALLOCAN(ir_node*, in_max);
1420 regs = ALLOCAN(arch_register_t const*, in_max);
1423 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1425 regs[1] = arch_env->sp;
1428 /* clear SP entry, since it has already been grown. */
1429 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1430 for (i = 0; i < n_res; ++i) {
1431 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1433 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1434 regs[n++] = arg->reg;
1436 /* Clear the map entry to mark the register as processed. */
1437 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1440 /* grow the rest of the stuff. */
1441 foreach_pmap(reg_map, ent) {
1444 regs[n++] = ent->key;
1448 /* The in array for the new back end return is now ready. */
1450 dbgi = get_irn_dbg_info(irn);
1454 /* we have to pop the shadow parameter in in case of struct returns */
1456 ret = be_new_Return(dbgi, env->irg, bl, n_res, pop, n, in);
1458 /* Set the register classes of the return's parameter accordingly. */
1459 for (i = 0; i < n; ++i) {
1460 if (regs[i] == NULL)
1463 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1466 /* Free the space of the Epilog's in array and the register <-> proj map. */
1467 pmap_destroy(reg_map);
1472 typedef struct ent_pos_pair ent_pos_pair;
1473 struct ent_pos_pair {
1474 ir_entity *ent; /**< a value param entity */
1475 int pos; /**< its parameter number */
1476 ent_pos_pair *next; /**< for linking */
1479 typedef struct lower_frame_sels_env_t {
1480 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1481 ir_node *frame; /**< the current frame */
1482 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1483 const arch_register_class_t *link_class; /**< register class of the link pointer */
1484 ir_type *value_tp; /**< the value type if any */
1485 ir_type *frame_tp; /**< the frame type */
1486 int static_link_pos; /**< argument number of the hidden static link */
1487 } lower_frame_sels_env_t;
1490 * Return an entity from the backend for an value param entity.
1492 * @param ent an value param type entity
1493 * @param ctx context
1495 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1497 ir_entity *argument_ent = get_entity_link(ent);
1499 if (argument_ent == NULL) {
1500 /* we have NO argument entity yet: This is bad, as we will
1501 * need one for backing store.
1504 ir_type *frame_tp = ctx->frame_tp;
1505 unsigned offset = get_type_size_bytes(frame_tp);
1506 ir_type *tp = get_entity_type(ent);
1507 unsigned align = get_type_alignment_bytes(tp);
1509 offset += align - 1;
1510 offset &= ~(align - 1);
1512 argument_ent = copy_entity_own(ent, frame_tp);
1514 /* must be automatic to set a fixed layout */
1515 set_entity_offset(argument_ent, offset);
1516 offset += get_type_size_bytes(tp);
1518 set_type_size_bytes(frame_tp, offset);
1519 set_entity_link(ent, argument_ent);
1521 return argument_ent;
1524 * Walker: Replaces Sels of frame type and
1525 * value param type entities by FrameAddress.
1526 * Links all used entities.
1528 static void lower_frame_sels_walker(ir_node *irn, void *data)
1530 lower_frame_sels_env_t *ctx = data;
1533 ir_node *ptr = get_Sel_ptr(irn);
1535 if (ptr == ctx->frame) {
1536 ir_entity *ent = get_Sel_entity(irn);
1537 ir_node *bl = get_nodes_block(irn);
1540 int is_value_param = 0;
1542 if (get_entity_owner(ent) == ctx->value_tp) {
1545 /* replace by its copy from the argument type */
1546 pos = get_struct_member_index(ctx->value_tp, ent);
1547 ent = get_argument_entity(ent, ctx);
1550 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1553 /* check, if it's a param Sel and if have not seen this entity before */
1554 if (is_value_param && get_entity_link(ent) == NULL) {
1560 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1562 set_entity_link(ent, ctx->value_param_list);
1569 * Check if a value parameter is transmitted as a register.
1570 * This might happen if the address of an parameter is taken which is
1571 * transmitted in registers.
1573 * Note that on some architectures this case must be handled specially
1574 * because the place of the backing store is determined by their ABI.
1576 * In the default case we move the entity to the frame type and create
1577 * a backing store into the first block.
1579 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1581 be_abi_call_t *call = env->call;
1582 ir_graph *irg = env->irg;
1583 ent_pos_pair *entry, *new_list;
1585 int i, n = ARR_LEN(value_param_list);
1588 for (i = 0; i < n; ++i) {
1589 int pos = value_param_list[i].pos;
1590 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
1593 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1594 value_param_list[i].next = new_list;
1595 new_list = &value_param_list[i];
1598 if (new_list != NULL) {
1599 /* ok, change the graph */
1600 ir_node *start_bl = get_irg_start_block(irg);
1601 ir_node *first_bl = get_first_block_succ(start_bl);
1602 ir_node *frame, *imem, *nmem, *store, *mem, *args;
1603 optimization_state_t state;
1606 assert(first_bl && first_bl != start_bl);
1607 /* we had already removed critical edges, so the following
1608 assertion should be always true. */
1609 assert(get_Block_n_cfgpreds(first_bl) == 1);
1611 /* now create backing stores */
1612 frame = get_irg_frame(irg);
1613 imem = get_irg_initial_mem(irg);
1615 save_optimization_state(&state);
1617 nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
1618 restore_optimization_state(&state);
1620 /* reroute all edges to the new memory source */
1621 edges_reroute(imem, nmem, irg);
1625 args = get_irg_args(irg);
1626 for (entry = new_list; entry != NULL; entry = entry->next) {
1628 ir_type *tp = get_entity_type(entry->ent);
1629 ir_mode *mode = get_type_mode(tp);
1632 /* address for the backing store */
1633 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1636 mem = new_r_Proj(store, mode_M, pn_Store_M);
1638 /* the backing store itself */
1639 store = new_r_Store(first_bl, mem, addr,
1640 new_r_Proj(args, mode, i), 0);
1642 /* the new memory Proj gets the last Proj from store */
1643 set_Proj_pred(nmem, store);
1644 set_Proj_proj(nmem, pn_Store_M);
1646 /* move all entities to the frame type */
1647 frame_tp = get_irg_frame_type(irg);
1648 offset = get_type_size_bytes(frame_tp);
1650 /* we will add new entities: set the layout to undefined */
1651 assert(get_type_state(frame_tp) == layout_fixed);
1652 set_type_state(frame_tp, layout_undefined);
1653 for (entry = new_list; entry != NULL; entry = entry->next) {
1654 ir_entity *ent = entry->ent;
1656 /* If the entity is still on the argument type, move it to the
1658 * This happens if the value_param type was build due to compound
1660 if (get_entity_owner(ent) != frame_tp) {
1661 ir_type *tp = get_entity_type(ent);
1662 unsigned align = get_type_alignment_bytes(tp);
1664 offset += align - 1;
1665 offset &= ~(align - 1);
1666 set_entity_owner(ent, frame_tp);
1667 /* must be automatic to set a fixed layout */
1668 set_entity_offset(ent, offset);
1669 offset += get_type_size_bytes(tp);
1672 set_type_size_bytes(frame_tp, offset);
1673 /* fix the layout again */
1674 set_type_state(frame_tp, layout_fixed);
1679 * The start block has no jump, instead it has an initial exec Proj.
1680 * The backend wants to handle all blocks the same way, so we replace
1681 * the out cfg edge with a real jump.
1683 static void fix_start_block(ir_graph *irg)
1685 ir_node *initial_X = get_irg_initial_exec(irg);
1686 ir_node *start_block = get_irg_start_block(irg);
1687 const ir_edge_t *edge;
1689 assert(is_Proj(initial_X));
1691 foreach_out_edge(initial_X, edge) {
1692 ir_node *block = get_edge_src_irn(edge);
1694 if (is_Anchor(block))
1696 if (block != start_block) {
1697 ir_node *jmp = new_r_Jmp(start_block);
1698 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1699 set_irg_initial_exec(irg, jmp);
1703 panic("Initial exec has no follow block in %+F", irg);
1707 * Update the entity of Sels to the outer value parameters.
1709 static void update_outer_frame_sels(ir_node *irn, void *env)
1711 lower_frame_sels_env_t *ctx = env;
1718 ptr = get_Sel_ptr(irn);
1719 if (! is_arg_Proj(ptr))
1721 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1723 ent = get_Sel_entity(irn);
1725 if (get_entity_owner(ent) == ctx->value_tp) {
1726 /* replace by its copy from the argument type */
1727 pos = get_struct_member_index(ctx->value_tp, ent);
1728 ent = get_argument_entity(ent, ctx);
1729 set_Sel_entity(irn, ent);
1731 /* check, if we have not seen this entity before */
1732 if (get_entity_link(ent) == NULL) {
1738 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1740 set_entity_link(ent, ctx->value_param_list);
1746 * Fix access to outer local variables.
1748 static void fix_outer_variable_access(be_abi_irg_t *env,
1749 lower_frame_sels_env_t *ctx)
1755 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1756 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1758 if (! is_method_entity(ent))
1761 irg = get_entity_irg(ent);
1766 * FIXME: find the number of the static link parameter
1767 * for now we assume 0 here
1769 ctx->static_link_pos = 0;
1771 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1776 * Modify the irg itself and the frame type.
1778 static void modify_irg(be_abi_irg_t *env)
1780 be_abi_call_t *call = env->call;
1781 const arch_env_t *arch_env= env->arch_env;
1782 const arch_register_t *sp = arch_env->sp;
1783 ir_graph *irg = env->irg;
1786 ir_node *new_mem_proj;
1788 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1789 struct obstack *obst = be_get_be_obst(irg);
1790 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1795 unsigned frame_size;
1798 const arch_register_t *fp_reg;
1799 ir_node *frame_pointer;
1803 const ir_edge_t *edge;
1804 ir_type *arg_type, *bet_type, *tp;
1805 lower_frame_sels_env_t ctx;
1806 ir_entity **param_map;
1808 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1810 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1811 * memory, which leads to loops in the DAG. */
1812 old_mem = get_irg_initial_mem(irg);
1814 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1816 /* set the links of all frame entities to NULL, we use it
1817 to detect if an entity is already linked in the value_param_list */
1818 tp = get_method_value_param_type(method_type);
1821 /* clear the links of the clone type, let the
1822 original entities point to its clones */
1823 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1824 ir_entity *mem = get_struct_member(tp, i);
1825 set_entity_link(mem, NULL);
1829 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1831 /* Convert the Sel nodes in the irg to frame addr nodes: */
1832 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1833 ctx.frame = get_irg_frame(irg);
1834 ctx.sp_class = env->arch_env->sp->reg_class;
1835 ctx.link_class = env->arch_env->link_class;
1836 ctx.frame_tp = get_irg_frame_type(irg);
1838 /* layout the stackframe now */
1839 if (get_type_state(ctx.frame_tp) == layout_undefined) {
1840 default_layout_compound_type(ctx.frame_tp);
1843 /* we will possible add new entities to the frame: set the layout to undefined */
1844 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1845 set_type_state(ctx.frame_tp, layout_undefined);
1847 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1849 /* fix the frame type layout again */
1850 set_type_state(ctx.frame_tp, layout_fixed);
1851 /* align stackframe to 4 byte */
1852 frame_size = get_type_size_bytes(ctx.frame_tp);
1853 if (frame_size % 4 != 0) {
1854 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1857 env->regs = pmap_create();
1859 n_params = get_method_n_params(method_type);
1860 args = OALLOCNZ(obst, ir_node*, n_params);
1863 * for inner function we must now fix access to outer frame entities.
1865 fix_outer_variable_access(env, &ctx);
1867 /* Check if a value parameter is transmitted as a register.
1868 * This might happen if the address of an parameter is taken which is
1869 * transmitted in registers.
1871 * Note that on some architectures this case must be handled specially
1872 * because the place of the backing store is determined by their ABI.
1874 * In the default case we move the entity to the frame type and create
1875 * a backing store into the first block.
1877 fix_address_of_parameter_access(env, ctx.value_param_list);
1879 DEL_ARR_F(ctx.value_param_list);
1880 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1882 /* Fill the argument vector */
1883 arg_tuple = get_irg_args(irg);
1884 foreach_out_edge(arg_tuple, edge) {
1885 ir_node *irn = get_edge_src_irn(edge);
1886 if (! is_Anchor(irn)) {
1887 int nr = get_Proj_proj(irn);
1889 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1893 bet_type = call->cb->get_between_type(env->cb);
1894 stack_frame_init(stack_layout, arg_type, bet_type,
1895 get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1897 /* Count the register params and add them to the number of Projs for the RegParams node */
1898 for (i = 0; i < n_params; ++i) {
1899 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1900 if (arg->in_reg && args[i]) {
1901 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1902 assert(i == get_Proj_proj(args[i]));
1904 /* For now, associate the register with the old Proj from Start representing that argument. */
1905 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1906 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1910 /* Collect all callee-save registers */
1911 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1912 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1913 for (j = 0; j < cls->n_regs; ++j) {
1914 const arch_register_t *reg = &cls->regs[j];
1915 if (arch_register_type_is(reg, callee_save) ||
1916 arch_register_type_is(reg, state)) {
1917 pmap_insert(env->regs, (void *) reg, NULL);
1922 /* handle start block here (place a jump in the block) */
1923 fix_start_block(irg);
1925 pmap_insert(env->regs, (void *) sp, NULL);
1926 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1927 start_bl = get_irg_start_block(irg);
1928 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1931 * make proj nodes for the callee save registers.
1932 * memorize them, since Return nodes get those as inputs.
1934 * Note, that if a register corresponds to an argument, the regs map contains
1935 * the old Proj from start for that argument.
1938 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1939 reg_map_to_arr(rm, env->regs);
1940 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1941 arch_register_t *reg = (void *) rm[i].reg;
1942 ir_mode *mode = reg->reg_class->mode;
1944 arch_register_req_type_t add_type = 0;
1948 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1951 proj = new_r_Proj(env->start, mode, nr + 1);
1952 pmap_insert(env->regs, (void *) reg, proj);
1953 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1954 arch_set_irn_register(proj, reg);
1956 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1959 /* create a new initial memory proj */
1960 assert(is_Proj(old_mem));
1961 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1962 new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1964 set_irg_initial_mem(irg, mem);
1966 /* Generate the Prologue */
1967 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &stack_layout->initial_bias);
1969 /* do the stack allocation BEFORE the barrier, or spill code
1970 might be added before it */
1971 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1972 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1973 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1975 create_barrier(start_bl, &mem, env->regs, 0);
1977 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1978 arch_set_irn_register(env->init_sp, sp);
1980 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1981 set_irg_frame(irg, frame_pointer);
1982 pset_insert_ptr(env->ignore_regs, fp_reg);
1984 /* rewire old mem users to new mem */
1985 exchange(old_mem, mem);
1987 /* keep the mem (for functions with an endless loop = no return) */
1990 set_irg_initial_mem(irg, mem);
1992 /* Now, introduce stack param nodes for all parameters passed on the stack */
1993 for (i = 0; i < n_params; ++i) {
1994 ir_node *arg_proj = args[i];
1995 ir_node *repl = NULL;
1997 if (arg_proj != NULL) {
1998 be_abi_call_arg_t *arg;
1999 ir_type *param_type;
2000 int nr = get_Proj_proj(arg_proj);
2003 nr = MIN(nr, n_params);
2004 arg = get_call_arg(call, 0, nr, 1);
2005 param_type = get_method_param_type(method_type, nr);
2008 repl = pmap_get(env->regs, (void *) arg->reg);
2009 } else if (arg->on_stack) {
2010 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2012 /* For atomic parameters which are actually used, we create a Load node. */
2013 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2014 ir_mode *mode = get_type_mode(param_type);
2015 ir_mode *load_mode = arg->load_mode;
2017 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2018 repl = new_r_Proj(load, load_mode, pn_Load_res);
2020 if (mode != load_mode) {
2021 repl = new_r_Conv(start_bl, repl, mode);
2024 /* The stack parameter is not primitive (it is a struct or array),
2025 * we thus will create a node representing the parameter's address
2031 assert(repl != NULL);
2033 /* Beware: the mode of the register parameters is always the mode of the register class
2034 which may be wrong. Add Conv's then. */
2035 mode = get_irn_mode(args[i]);
2036 if (mode != get_irn_mode(repl)) {
2037 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2039 exchange(args[i], repl);
2043 /* the arg proj is not needed anymore now and should be only used by the anchor */
2044 assert(get_irn_n_edges(arg_tuple) == 1);
2045 kill_node(arg_tuple);
2046 set_irg_args(irg, new_r_Bad(irg));
2048 /* All Return nodes hang on the End node, so look for them there. */
2049 end = get_irg_end_block(irg);
2050 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2051 ir_node *irn = get_Block_cfgpred(end, i);
2053 if (is_Return(irn)) {
2054 ir_node *blk = get_nodes_block(irn);
2055 ir_node *mem = get_Return_mem(irn);
2056 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2061 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2062 the code is dead and will never be executed. */
2065 /** Fix the state inputs of calls that still hang on unknowns */
2066 static void fix_call_state_inputs(be_abi_irg_t *env)
2068 const arch_env_t *arch_env = env->arch_env;
2070 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2072 /* Collect caller save registers */
2073 n = arch_env_get_n_reg_class(arch_env);
2074 for (i = 0; i < n; ++i) {
2076 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2077 for (j = 0; j < cls->n_regs; ++j) {
2078 const arch_register_t *reg = arch_register_for_index(cls, j);
2079 if (arch_register_type_is(reg, state)) {
2080 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2085 n = ARR_LEN(env->calls);
2086 n_states = ARR_LEN(stateregs);
2087 for (i = 0; i < n; ++i) {
2089 ir_node *call = env->calls[i];
2091 arity = get_irn_arity(call);
2093 /* the state reg inputs are the last n inputs of the calls */
2094 for (s = 0; s < n_states; ++s) {
2095 int inp = arity - n_states + s;
2096 const arch_register_t *reg = stateregs[s];
2097 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2099 set_irn_n(call, inp, regnode);
2103 DEL_ARR_F(stateregs);
2107 * Create a trampoline entity for the given method.
2109 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2111 ir_type *type = get_entity_type(method);
2112 ident *old_id = get_entity_ld_ident(method);
2113 ident *id = id_mangle3("", old_id, "$stub");
2114 ir_type *parent = be->pic_trampolines_type;
2115 ir_entity *ent = new_entity(parent, old_id, type);
2116 set_entity_ld_ident(ent, id);
2117 set_entity_visibility(ent, ir_visibility_private);
2123 * Returns the trampoline entity for the given method.
2125 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2127 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2128 if (result == NULL) {
2129 result = create_trampoline(env, method);
2130 pmap_insert(env->ent_trampoline_map, method, result);
2136 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2138 ident *old_id = get_entity_ld_ident(entity);
2139 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
2140 ir_type *e_type = get_entity_type(entity);
2141 ir_type *type = new_type_pointer(e_type);
2142 ir_type *parent = be->pic_symbols_type;
2143 ir_entity *ent = new_entity(parent, old_id, type);
2144 set_entity_ld_ident(ent, id);
2145 set_entity_visibility(ent, ir_visibility_private);
2150 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2152 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2153 if (result == NULL) {
2154 result = create_pic_symbol(env, entity);
2155 pmap_insert(env->ent_pic_symbol_map, entity, result);
2164 * Returns non-zero if a given entity can be accessed using a relative address.
2166 static int can_address_relative(ir_entity *entity)
2168 return get_entity_visibility(entity) != ir_visibility_external
2169 && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
2172 /** patches SymConsts to work in position independent code */
2173 static void fix_pic_symconsts(ir_node *node, void *data)
2182 be_abi_irg_t *env = data;
2184 be_main_env_t *be = be_get_irg_main_env(env->irg);
2186 arity = get_irn_arity(node);
2187 for (i = 0; i < arity; ++i) {
2189 ir_node *pred = get_irn_n(node, i);
2191 ir_entity *pic_symbol;
2192 ir_node *pic_symconst;
2194 if (!is_SymConst(pred))
2197 entity = get_SymConst_entity(pred);
2198 block = get_nodes_block(pred);
2199 irg = get_irn_irg(pred);
2201 /* calls can jump to relative addresses, so we can directly jump to
2202 the (relatively) known call address or the trampoline */
2203 if (i == 1 && is_Call(node)) {
2204 ir_entity *trampoline;
2205 ir_node *trampoline_const;
2207 if (can_address_relative(entity))
2210 dbgi = get_irn_dbg_info(pred);
2211 trampoline = get_trampoline(be, entity);
2212 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2214 set_irn_n(node, i, trampoline_const);
2218 /* everything else is accessed relative to EIP */
2219 mode = get_irn_mode(pred);
2220 pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(env->irg));
2222 /* all ok now for locally constructed stuff */
2223 if (can_address_relative(entity)) {
2224 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2226 /* make sure the walker doesn't visit this add again */
2227 mark_irn_visited(add);
2228 set_irn_n(node, i, add);
2232 /* get entry from pic symbol segment */
2233 dbgi = get_irn_dbg_info(pred);
2234 pic_symbol = get_pic_symbol(be, entity);
2235 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2237 add = new_r_Add(block, pic_base, pic_symconst, mode);
2238 mark_irn_visited(add);
2240 /* we need an extra indirection for global data outside our current
2241 module. The loads are always safe and can therefore float
2242 and need no memory input */
2243 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2244 load_res = new_r_Proj(load, mode, pn_Load_res);
2246 set_irn_n(node, i, load_res);
2250 be_abi_irg_t *be_abi_introduce(ir_graph *irg)
2252 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2253 ir_node *old_frame = get_irg_frame(irg);
2254 struct obstack *obst = be_get_be_obst(irg);
2255 be_options_t *options = be_get_irg_options(irg);
2256 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2260 unsigned *limited_bitset;
2261 arch_register_req_t *sp_req;
2263 be_omit_fp = options->omit_fp;
2264 be_omit_leaf_fp = options->omit_leaf_fp;
2268 env->arch_env = arch_env;
2269 env->method_type = get_entity_type(get_irg_entity(irg));
2270 env->call = be_abi_call_new(arch_env->sp->reg_class);
2271 arch_env_get_call_abi(arch_env, env->method_type, env->call);
2273 env->ignore_regs = pset_new_ptr_default();
2274 env->keep_map = pmap_create();
2275 env->dce_survivor = new_survive_dce();
2278 sp_req = OALLOCZ(obst, arch_register_req_t);
2279 env->sp_req = sp_req;
2281 sp_req->type = arch_register_req_type_limited
2282 | arch_register_req_type_produces_sp;
2283 sp_req->cls = arch_register_get_class(arch_env->sp);
2285 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2286 rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
2287 sp_req->limited = limited_bitset;
2288 if (arch_env->sp->type & arch_register_type_ignore) {
2289 sp_req->type |= arch_register_req_type_ignore;
2292 env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
2294 env->calls = NEW_ARR_F(ir_node*, 0);
2297 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2300 /* Lower all call nodes in the IRG. */
2304 Beware: init backend abi call object after processing calls,
2305 otherwise some information might be not yet available.
2307 env->cb = env->call->cb->init(env->call, arch_env, irg);
2309 /* Process the IRG */
2312 /* fix call inputs for state registers */
2313 fix_call_state_inputs(env);
2315 /* We don't need the keep map anymore. */
2316 pmap_destroy(env->keep_map);
2317 env->keep_map = NULL;
2319 /* calls array is not needed anymore */
2320 DEL_ARR_F(env->calls);
2323 /* reroute the stack origin of the calls to the true stack origin. */
2324 exchange(dummy, env->init_sp);
2325 exchange(old_frame, get_irg_frame(irg));
2327 /* Make some important node pointers survive the dead node elimination. */
2328 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2329 foreach_pmap(env->regs, ent) {
2330 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2333 env->call->cb->done(env->cb);
2338 void be_abi_free(be_abi_irg_t *env)
2340 be_abi_call_free(env->call);
2341 free_survive_dce(env->dce_survivor);
2342 del_pset(env->ignore_regs);
2343 pmap_destroy(env->regs);
2347 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2349 arch_register_t *reg;
2351 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2352 if (reg->reg_class == cls)
2353 bitset_set(bs, reg->index);
2356 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2359 arch_register_t *reg;
2361 for (i = 0; i < cls->n_regs; ++i) {
2362 if (arch_register_type_is(&cls->regs[i], ignore))
2365 rbitset_set(raw_bitset, i);
2368 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2369 reg = pset_next(abi->ignore_regs)) {
2370 if (reg->reg_class != cls)
2373 rbitset_clear(raw_bitset, reg->index);
2379 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2380 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2381 | _| | |> < ___) | || (_| | (__| <
2382 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2386 typedef ir_node **node_array;
2388 typedef struct fix_stack_walker_env_t {
2389 node_array sp_nodes;
2390 } fix_stack_walker_env_t;
2393 * Walker. Collect all stack modifying nodes.
2395 static void collect_stack_nodes_walker(ir_node *node, void *data)
2397 ir_node *insn = node;
2398 fix_stack_walker_env_t *env = data;
2399 const arch_register_req_t *req;
2401 if (is_Proj(node)) {
2402 insn = get_Proj_pred(node);
2405 if (arch_irn_get_n_outs(insn) == 0)
2407 if (get_irn_mode(node) == mode_T)
2410 req = arch_get_register_req_out(node);
2411 if (! (req->type & arch_register_req_type_produces_sp))
2414 ARR_APP1(ir_node*, env->sp_nodes, node);
2417 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2419 be_ssa_construction_env_t senv;
2422 ir_graph *irg = env->irg;
2423 be_lv_t *lv = be_get_irg_liveness(irg);
2424 fix_stack_walker_env_t walker_env;
2426 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2428 irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
2430 /* nothing to be done if we didn't find any node, in fact we mustn't
2431 * continue, as for endless loops incsp might have had no users and is bad
2434 len = ARR_LEN(walker_env.sp_nodes);
2436 DEL_ARR_F(walker_env.sp_nodes);
2440 be_ssa_construction_init(&senv, irg);
2441 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2442 ARR_LEN(walker_env.sp_nodes));
2443 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2444 ARR_LEN(walker_env.sp_nodes));
2447 len = ARR_LEN(walker_env.sp_nodes);
2448 for (i = 0; i < len; ++i) {
2449 be_liveness_update(lv, walker_env.sp_nodes[i]);
2451 be_ssa_construction_update_liveness_phis(&senv, lv);
2454 phis = be_ssa_construction_get_new_phis(&senv);
2456 /* set register requirements for stack phis */
2457 len = ARR_LEN(phis);
2458 for (i = 0; i < len; ++i) {
2459 ir_node *phi = phis[i];
2460 be_set_phi_reg_req(phi, env->sp_req);
2461 arch_set_irn_register(phi, env->arch_env->sp);
2463 be_ssa_construction_destroy(&senv);
2465 DEL_ARR_F(walker_env.sp_nodes);
2469 * Fix all stack accessing operations in the block bl.
2471 * @param env the abi environment
2472 * @param bl the block to process
2473 * @param real_bias the bias value
2475 * @return the bias at the end of this block
2477 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2479 int omit_fp = env->call->flags.bits.try_omit_fp;
2481 int wanted_bias = real_bias;
2482 be_stack_layout_t *layout = be_get_irg_stack_layout(env->irg);
2484 sched_foreach(bl, irn) {
2488 Check, if the node relates to an entity on the stack frame.
2489 If so, set the true offset (including the bias) for that
2492 ir_entity *ent = arch_get_frame_entity(irn);
2494 int bias = omit_fp ? real_bias : 0;
2495 int offset = get_stack_entity_offset(layout, ent, bias);
2496 arch_set_frame_offset(irn, offset);
2497 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2498 ent, offset, bias));
2502 * If the node modifies the stack pointer by a constant offset,
2503 * record that in the bias.
2505 ofs = arch_get_sp_bias(irn);
2507 if (be_is_IncSP(irn)) {
2508 /* fill in real stack frame size */
2509 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2510 ir_type *frame_type = get_irg_frame_type(env->irg);
2511 ofs = (int) get_type_size_bytes(frame_type);
2512 be_set_IncSP_offset(irn, ofs);
2513 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2514 ir_type *frame_type = get_irg_frame_type(env->irg);
2515 ofs = - (int)get_type_size_bytes(frame_type);
2516 be_set_IncSP_offset(irn, ofs);
2518 if (be_get_IncSP_align(irn)) {
2519 /* patch IncSP to produce an aligned stack pointer */
2520 ir_type *between_type = layout->between_type;
2521 int between_size = get_type_size_bytes(between_type);
2522 int alignment = 1 << env->arch_env->stack_alignment;
2523 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2526 be_set_IncSP_offset(irn, ofs + alignment - delta);
2527 real_bias += alignment - delta;
2530 /* adjust so real_bias corresponds with wanted_bias */
2531 int delta = wanted_bias - real_bias;
2534 be_set_IncSP_offset(irn, ofs + delta);
2545 assert(real_bias == wanted_bias);
2550 * A helper struct for the bias walker.
2553 be_abi_irg_t *env; /**< The ABI irg environment. */
2554 int start_block_bias; /**< The bias at the end of the start block. */
2556 ir_node *start_block; /**< The start block of the current graph. */
2560 * Block-Walker: fix all stack offsets for all blocks
2561 * except the start block
2563 static void stack_bias_walker(ir_node *bl, void *data)
2565 struct bias_walk *bw = data;
2566 if (bl != bw->start_block) {
2567 process_stack_bias(bw->env, bl, bw->start_block_bias);
2572 * Walker: finally lower all Sels of outer frame or parameter
2575 static void lower_outer_frame_sels(ir_node *sel, void *ctx)
2577 be_abi_irg_t *env = ctx;
2581 be_stack_layout_t *layout;
2586 ent = get_Sel_entity(sel);
2587 owner = get_entity_owner(ent);
2588 ptr = get_Sel_ptr(sel);
2590 layout = be_get_irg_stack_layout(env->irg);
2591 if (owner == layout->frame_type || owner == layout->arg_type) {
2592 /* found access to outer frame or arguments */
2593 int offset = get_stack_entity_offset(layout, ent, 0);
2596 ir_node *bl = get_nodes_block(sel);
2597 dbg_info *dbgi = get_irn_dbg_info(sel);
2598 ir_mode *mode = get_irn_mode(sel);
2599 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2600 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2602 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2608 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2610 ir_graph *irg = env->irg;
2611 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
2614 struct bias_walk bw;
2616 stack_frame_compute_initial_offset(stack_layout);
2617 // stack_layout_dump(stdout, stack_layout);
2619 /* Determine the stack bias at the end of the start block. */
2620 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg),
2621 stack_layout->initial_bias);
2622 bw.between_size = get_type_size_bytes(stack_layout->between_type);
2624 /* fix the bias is all other blocks */
2626 bw.start_block = get_irg_start_block(irg);
2627 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2629 /* fix now inner functions: these still have Sel node to outer
2630 frame and parameter entities */
2631 frame_tp = get_irg_frame_type(irg);
2632 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2633 ir_entity *ent = get_class_member(frame_tp, i);
2634 ir_graph *irg = get_entity_irg(ent);
2637 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2642 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2644 assert(arch_register_type_is(reg, callee_save));
2645 assert(pmap_contains(abi->regs, (void *) reg));
2646 return pmap_get(abi->regs, (void *) reg);
2649 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2651 assert(arch_register_type_is(reg, ignore));
2652 assert(pmap_contains(abi->regs, (void *) reg));
2653 return pmap_get(abi->regs, (void *) reg);
2657 * Returns non-zero if the ABI has omitted the frame pointer in
2658 * the current graph.
2660 int be_abi_omit_fp(const be_abi_irg_t *abi)
2662 return abi->call->flags.bits.try_omit_fp;
2665 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
2666 void be_init_abi(void)
2668 FIRM_DBG_REGISTER(dbg, "firm.be.abi");