2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
33 #include "irgraph_t.h"
36 #include "iredges_t.h"
39 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
58 typedef struct _be_abi_call_arg_t {
59 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
60 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
61 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
64 const arch_register_t *reg;
67 unsigned alignment; /**< stack alignment */
68 unsigned space_before; /**< allocate space before */
69 unsigned space_after; /**< allocate space after */
72 struct _be_abi_call_t {
73 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
74 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
75 const be_abi_callbacks_t *cb;
76 ir_type *between_type;
78 const arch_register_class_t *cls_addr; /**< register class of the call address */
82 * The ABI information for the current birg.
84 struct _be_abi_irg_t {
86 be_irg_t *birg; /**< The back end IRG. */
87 const arch_env_t *arch_env;
88 survive_dce_t *dce_survivor;
90 be_abi_call_t *call; /**< The ABI call information. */
91 ir_type *method_type; /**< The type of the method of the IRG. */
93 ir_node *init_sp; /**< The node representing the stack pointer
94 at the start of the function. */
96 ir_node *start; /**< The be_Start params node. */
97 pmap *regs; /**< A map of all callee-save and ignore regs to
98 their Projs to the RegParams node. */
100 int start_block_bias; /**< The stack bias at the end of the start block. */
102 void *cb; /**< ABI Callback self pointer. */
104 pmap *keep_map; /**< mapping blocks to keep nodes. */
105 pset *ignore_regs; /**< Additional registers which shall be ignored. */
107 ir_node **calls; /**< flexible array containing all be_Call nodes */
109 arch_register_req_t *sp_req;
111 be_stack_layout_t frame; /**< The stack frame model. */
113 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
116 static heights_t *ir_heights;
118 /** Flag: if set, try to omit the frame pointer in all routines. */
119 static int be_omit_fp = 1;
121 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
122 static int be_omit_leaf_fp = 1;
125 _ ____ ___ ____ _ _ _ _
126 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
127 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
128 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
129 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
131 These callbacks are used by the backend to set the parameters
132 for a specific call type.
136 * Set compare function: compares two ABI call object arguments.
138 static int cmp_call_arg(const void *a, const void *b, size_t n)
140 const be_abi_call_arg_t *p = a, *q = b;
142 return !(p->is_res == q->is_res && p->pos == q->pos);
146 * Get an ABI call object argument.
148 * @param call the abi call
149 * @param is_res true for call results, false for call arguments
150 * @param pos position of the argument
152 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
154 be_abi_call_arg_t arg;
157 memset(&arg, 0, sizeof(arg));
161 hash = is_res * 128 + pos;
163 return set_find(call->params, &arg, sizeof(arg), hash);
167 * Set an ABI call object argument.
169 * @param call the abi call
170 * @param is_res true for call results, false for call arguments
171 * @param pos position of the argument
173 static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
175 be_abi_call_arg_t arg;
178 memset(&arg, 0, sizeof(arg));
182 hash = is_res * 128 + pos;
184 return set_insert(call->params, &arg, sizeof(arg), hash);
187 /* Set the flags for a call. */
188 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
194 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
195 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
201 /* Set register class for call address */
202 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
204 call->cls_addr = cls;
208 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
210 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
212 arg->load_mode = load_mode;
213 arg->alignment = alignment;
214 arg->space_before = space_before;
215 arg->space_after = space_after;
216 assert(alignment > 0 && "Alignment must be greater than 0");
219 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
221 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
226 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
228 be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
233 /* Get the flags of a ABI call object. */
234 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
240 * Constructor for a new ABI call object.
242 * @param cls_addr register class of the call address
244 * @return the new ABI call object
246 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
248 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
251 call->params = new_set(cmp_call_arg, 16);
253 call->cls_addr = cls_addr;
255 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
261 * Destructor for an ABI call object.
263 static void be_abi_call_free(be_abi_call_t *call)
265 del_set(call->params);
271 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
272 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
273 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
274 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
277 Handling of the stack frame. It is composed of three types:
278 1) The type of the arguments which are pushed on the stack.
279 2) The "between type" which consists of stuff the call of the
280 function pushes on the stack (like the return address and
281 the old base pointer for ia32).
282 3) The Firm frame type which consists of all local variables
286 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
289 ir_type *t = get_entity_owner(ent);
290 int ofs = get_entity_offset(ent);
294 /* Find the type the entity is contained in. */
295 for (index = 0; index < N_FRAME_TYPES; ++index) {
296 if (frame->order[index] == t)
298 /* Add the size of all the types below the one of the entity to the entity's offset */
299 ofs += get_type_size_bytes(frame->order[index]);
302 /* correct the offset by the initial position of the frame pointer */
303 ofs -= frame->initial_offset;
305 /* correct the offset with the current bias. */
312 * Retrieve the entity with given offset from a frame type.
314 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
318 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
319 ir_entity *ent = get_compound_member(t, i);
320 if (get_entity_offset(ent) == offset)
327 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
329 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
330 ir_entity *ent = search_ent_with_offset(base, 0);
333 frame->initial_offset
334 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
336 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
339 return frame->initial_offset;
343 * Initializes the frame layout from parts
345 * @param frame the stack layout that will be initialized
346 * @param args the stack argument layout type
347 * @param between the between layout type
348 * @param locals the method frame type
349 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
350 * @param param_map an array mapping method argument positions to the stack argument type
352 * @return the initialized stack layout
354 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
355 ir_type *between, ir_type *locals, int stack_dir,
356 ir_entity *param_map[])
358 frame->arg_type = args;
359 frame->between_type = between;
360 frame->frame_type = locals;
361 frame->initial_offset = 0;
362 frame->initial_bias = 0;
363 frame->stack_dir = stack_dir;
364 frame->order[1] = between;
365 frame->param_map = param_map;
368 frame->order[0] = args;
369 frame->order[2] = locals;
372 /* typical decreasing stack: locals have the
373 * lowest addresses, arguments the highest */
374 frame->order[0] = locals;
375 frame->order[2] = args;
381 /** Dumps the stack layout to file. */
382 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
386 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
387 for (j = 0; j < N_FRAME_TYPES; ++j) {
388 ir_type *t = frame->order[j];
390 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
391 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
392 ir_entity *ent = get_compound_member(t, i);
393 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
400 * Returns non-zero if the call argument at given position
401 * is transfered on the stack.
403 static inline int is_on_stack(be_abi_call_t *call, int pos)
405 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
406 return arg && !arg->in_reg;
416 Adjustment of the calls inside a graph.
421 * Transform a call node into a be_Call node.
423 * @param env The ABI environment for the current irg.
424 * @param irn The call node.
425 * @param curr_sp The stack pointer node to use.
426 * @return The stack pointer after the call.
428 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
430 ir_graph *irg = env->birg->irg;
431 const arch_env_t *arch_env = env->birg->main_env->arch_env;
432 ir_type *call_tp = get_Call_type(irn);
433 ir_node *call_ptr = get_Call_ptr(irn);
434 int n_params = get_method_n_params(call_tp);
435 ir_node *curr_mem = get_Call_mem(irn);
436 ir_node *bl = get_nodes_block(irn);
438 int stack_dir = arch_env->stack_dir;
439 const arch_register_t *sp = arch_env->sp;
440 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
441 ir_mode *mach_mode = sp->reg_class->mode;
442 struct obstack *obst = &env->obst;
443 int no_alloc = call->flags.bits.frame_is_setup_on_call;
444 int n_res = get_method_n_ress(call_tp);
445 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
447 ir_node *res_proj = NULL;
448 int n_reg_params = 0;
449 int n_stack_params = 0;
452 pset_new_t destroyed_regs, states;
453 pset_new_iterator_t iter;
457 int n_reg_results = 0;
458 const arch_register_t *reg;
459 const ir_edge_t *edge;
461 int *stack_param_idx;
462 int i, n, destroy_all_regs;
465 pset_new_init(&destroyed_regs);
466 pset_new_init(&states);
468 /* Let the isa fill out the abi description for that call node. */
469 arch_env_get_call_abi(arch_env, call_tp, call);
471 /* Insert code to put the stack arguments on the stack. */
472 assert(get_Call_n_params(irn) == n_params);
473 for (i = 0; i < n_params; ++i) {
474 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
477 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
479 stack_size += round_up2(arg->space_before, arg->alignment);
480 stack_size += round_up2(arg_size, arg->alignment);
481 stack_size += round_up2(arg->space_after, arg->alignment);
482 obstack_int_grow(obst, i);
486 stack_param_idx = obstack_finish(obst);
488 /* Collect all arguments which are passed in registers. */
489 for (i = 0; i < n_params; ++i) {
490 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
491 if (arg && arg->in_reg) {
492 obstack_int_grow(obst, i);
496 reg_param_idxs = obstack_finish(obst);
499 * If the stack is decreasing and we do not want to store sequentially,
500 * or someone else allocated the call frame
501 * we allocate as much space on the stack all parameters need, by
502 * moving the stack pointer along the stack's direction.
504 * Note: we also have to do this for stack_size == 0, because we may have
505 * to adjust stack alignment for the call.
507 if (stack_dir < 0 && !do_seq && !no_alloc) {
508 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
511 dbgi = get_irn_dbg_info(irn);
512 /* If there are some parameters which shall be passed on the stack. */
513 if (n_stack_params > 0) {
517 * Reverse list of stack parameters if call arguments are from left to right.
518 * We must them reverse again if they are pushed (not stored) and the stack
519 * direction is downwards.
521 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
522 for (i = 0; i < n_stack_params >> 1; ++i) {
523 int other = n_stack_params - i - 1;
524 int tmp = stack_param_idx[i];
525 stack_param_idx[i] = stack_param_idx[other];
526 stack_param_idx[other] = tmp;
530 curr_mem = get_Call_mem(irn);
532 obstack_ptr_grow(obst, curr_mem);
535 for (i = 0; i < n_stack_params; ++i) {
536 int p = stack_param_idx[i];
537 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
538 ir_node *param = get_Call_param(irn, p);
539 ir_node *addr = curr_sp;
541 ir_type *param_type = get_method_param_type(call_tp, p);
542 int param_size = get_type_size_bytes(param_type) + arg->space_after;
545 * If we wanted to build the arguments sequentially,
546 * the stack pointer for the next must be incremented,
547 * and the memory value propagated.
551 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp, param_size + arg->space_before, 0);
552 add_irn_dep(curr_sp, curr_mem);
555 curr_ofs += arg->space_before;
556 curr_ofs = round_up2(curr_ofs, arg->alignment);
558 /* Make the expression to compute the argument's offset. */
560 ir_mode *constmode = mach_mode;
561 if (mode_is_reference(mach_mode)) {
564 addr = new_r_Const_long(irg, constmode, curr_ofs);
565 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
569 /* Insert a store for primitive arguments. */
570 if (is_atomic_type(param_type)) {
572 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
573 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
574 mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
577 /* Make a mem copy for compound arguments. */
581 assert(mode_is_reference(get_irn_mode(param)));
582 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
583 mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
586 curr_ofs += param_size;
591 obstack_ptr_grow(obst, mem);
594 in = (ir_node **) obstack_finish(obst);
596 /* We need the sync only, if we didn't build the stores sequentially. */
598 if (n_stack_params >= 1) {
599 curr_mem = new_r_Sync(bl, n_stack_params + 1, in);
601 curr_mem = get_Call_mem(irn);
604 obstack_free(obst, in);
607 /* check for the return_twice property */
608 destroy_all_regs = 0;
609 if (is_SymConst_addr_ent(call_ptr)) {
610 ir_entity *ent = get_SymConst_entity(call_ptr);
612 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
613 destroy_all_regs = 1;
615 ir_type *call_tp = get_Call_type(irn);
617 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
618 destroy_all_regs = 1;
621 /* Put caller save into the destroyed set and state registers in the states set */
622 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
624 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
625 for (j = 0; j < cls->n_regs; ++j) {
626 const arch_register_t *reg = arch_register_for_index(cls, j);
628 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
629 if (! arch_register_type_is(reg, ignore))
630 pset_new_insert(&destroyed_regs, (void *) reg);
632 if (arch_register_type_is(reg, state)) {
633 pset_new_insert(&destroyed_regs, (void*) reg);
634 pset_new_insert(&states, (void*) reg);
639 if (destroy_all_regs) {
640 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
641 pset_new_remove(&destroyed_regs, arch_env->sp);
642 pset_new_remove(&destroyed_regs, arch_env->bp);
645 /* search the largest result proj number */
646 res_projs = ALLOCANZ(ir_node*, n_res);
648 foreach_out_edge(irn, edge) {
649 const ir_edge_t *res_edge;
650 ir_node *irn = get_edge_src_irn(edge);
652 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
655 foreach_out_edge(irn, res_edge) {
657 ir_node *res = get_edge_src_irn(res_edge);
659 assert(is_Proj(res));
661 proj = get_Proj_proj(res);
662 assert(proj < n_res);
663 assert(res_projs[proj] == NULL);
664 res_projs[proj] = res;
670 /** TODO: this is not correct for cases where return values are passed
671 * on the stack, but no known ABI does this currently...
673 n_reg_results = n_res;
675 /* make the back end call node and set its register requirements. */
676 for (i = 0; i < n_reg_params; ++i) {
677 obstack_ptr_grow(obst, get_Call_param(irn, reg_param_idxs[i]));
680 /* add state registers ins */
681 foreach_pset_new(&states, reg, iter) {
682 const arch_register_class_t *cls = arch_register_get_class(reg);
684 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
685 ir_fprintf(stderr, "Adding %+F\n", regnode);
687 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
688 obstack_ptr_grow(obst, regnode);
690 n_ins = n_reg_params + pset_new_size(&states);
692 in = obstack_finish(obst);
694 /* ins collected, build the call */
695 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
697 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
698 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
699 n_ins, in, get_Call_type(irn));
700 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
703 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
704 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
705 n_ins, in, get_Call_type(irn));
707 be_Call_set_pop(low_call, call->pop);
709 /* put the call into the list of all calls for later processing */
710 ARR_APP1(ir_node *, env->calls, low_call);
712 /* create new stack pointer */
713 curr_sp = new_r_Proj(bl, low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
714 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
715 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
716 arch_set_irn_register(curr_sp, sp);
718 /* now handle results */
719 for (i = 0; i < n_res; ++i) {
721 ir_node *proj = res_projs[i];
722 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
724 /* returns values on stack not supported yet */
728 shift the proj number to the right, since we will drop the
729 unspeakable Proj_T from the Call. Therefore, all real argument
730 Proj numbers must be increased by pn_be_Call_first_res
732 pn = i + pn_be_Call_first_res;
735 ir_type *res_type = get_method_res_type(call_tp, i);
736 ir_mode *mode = get_type_mode(res_type);
737 proj = new_r_Proj(bl, low_call, mode, pn);
740 set_Proj_pred(proj, low_call);
741 set_Proj_proj(proj, pn);
745 pset_new_remove(&destroyed_regs, arg->reg);
750 Set the register class of the call address to
751 the backend provided class (default: stack pointer class)
753 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
755 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
757 /* Set the register classes and constraints of the Call parameters. */
758 for (i = 0; i < n_reg_params; ++i) {
759 int index = reg_param_idxs[i];
760 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
761 assert(arg->reg != NULL);
763 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
767 /* Set the register constraints of the results. */
768 for (i = 0; i < n_res; ++i) {
769 ir_node *proj = res_projs[i];
770 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
771 int pn = get_Proj_proj(proj);
774 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
775 arch_set_irn_register(proj, arg->reg);
777 obstack_free(obst, in);
778 exchange(irn, low_call);
780 /* kill the ProjT node */
781 if (res_proj != NULL) {
785 /* Make additional projs for the caller save registers
786 and the Keep node which keeps them alive. */
788 const arch_register_t *reg;
792 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
793 pset_new_iterator_t iter;
795 /* also keep the stack pointer */
797 set_irn_link(curr_sp, (void*) sp);
798 obstack_ptr_grow(obst, curr_sp);
800 foreach_pset_new(&destroyed_regs, reg, iter) {
801 ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj);
803 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
804 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
805 arch_set_irn_register(proj, reg);
807 set_irn_link(proj, (void*) reg);
808 obstack_ptr_grow(obst, proj);
813 for (i = 0; i < n_reg_results; ++i) {
814 ir_node *proj = res_projs[i];
815 const arch_register_t *reg = arch_get_irn_register(proj);
816 set_irn_link(proj, (void*) reg);
817 obstack_ptr_grow(obst, proj);
821 /* create the Keep for the caller save registers */
822 in = (ir_node **) obstack_finish(obst);
823 keep = be_new_Keep(bl, n, in);
824 for (i = 0; i < n; ++i) {
825 const arch_register_t *reg = get_irn_link(in[i]);
826 be_node_set_reg_class_in(keep, i, reg->reg_class);
828 obstack_free(obst, in);
831 /* Clean up the stack. */
832 assert(stack_size >= call->pop);
833 stack_size -= call->pop;
835 if (stack_size > 0) {
836 ir_node *mem_proj = NULL;
838 foreach_out_edge(low_call, edge) {
839 ir_node *irn = get_edge_src_irn(edge);
840 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
847 mem_proj = new_r_Proj(bl, low_call, mode_M, pn_be_Call_M_regular);
848 keep_alive(mem_proj);
851 /* Clean up the stack frame or revert alignment fixes if we allocated it */
853 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
856 be_abi_call_free(call);
857 obstack_free(obst, stack_param_idx);
859 pset_new_destroy(&states);
860 pset_new_destroy(&destroyed_regs);
866 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
868 * @param alignment the minimum stack alignment
869 * @param size the node containing the non-aligned size
870 * @param block the block where new nodes are allocated on
871 * @param dbg debug info for new nodes
873 * @return a node representing the aligned size
875 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
876 ir_node *block, dbg_info *dbg)
878 if (stack_alignment > 1) {
884 assert(is_po2(stack_alignment));
886 mode = get_irn_mode(size);
887 tv = new_tarval_from_long(stack_alignment-1, mode);
888 irg = get_Block_irg(block);
889 mask = new_r_Const(irg, tv);
890 size = new_rd_Add(dbg, block, size, mask, mode);
892 tv = new_tarval_from_long(-(long)stack_alignment, mode);
893 mask = new_r_Const(irg, tv);
894 size = new_rd_And(dbg, block, size, mask, mode);
900 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
902 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
911 const ir_edge_t *edge;
912 ir_node *new_alloc, *size, *addr, *ins[2];
913 unsigned stack_alignment;
915 assert(get_Alloc_where(alloc) == stack_alloc);
917 block = get_nodes_block(alloc);
918 irg = get_Block_irg(block);
921 type = get_Alloc_type(alloc);
923 foreach_out_edge(alloc, edge) {
924 ir_node *irn = get_edge_src_irn(edge);
926 assert(is_Proj(irn));
927 switch (get_Proj_proj(irn)) {
939 /* Beware: currently Alloc nodes without a result might happen,
940 only escape analysis kills them and this phase runs only for object
941 oriented source. We kill the Alloc here. */
942 if (alloc_res == NULL && alloc_mem) {
943 exchange(alloc_mem, get_Alloc_mem(alloc));
947 dbg = get_irn_dbg_info(alloc);
948 size = get_Alloc_size(alloc);
950 /* we might need to multiply the size with the element size */
951 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
952 ir_mode *mode = get_irn_mode(size);
953 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
955 ir_node *cnst = new_rd_Const(dbg, irg, tv);
956 size = new_rd_Mul(dbg, block, size, cnst, mode);
959 /* The stack pointer will be modified in an unknown manner.
960 We cannot omit it. */
961 env->call->flags.bits.try_omit_fp = 0;
963 stack_alignment = 1 << env->arch_env->stack_alignment;
964 size = adjust_alloc_size(stack_alignment, size, block, dbg);
965 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
966 set_irn_dbg_info(new_alloc, dbg);
968 if (alloc_mem != NULL) {
972 addsp_mem = new_r_Proj(block, new_alloc, mode_M, pn_be_AddSP_M);
974 /* We need to sync the output mem of the AddSP with the input mem
975 edge into the alloc node. */
976 ins[0] = get_Alloc_mem(alloc);
978 sync = new_r_Sync(block, 2, ins);
980 exchange(alloc_mem, sync);
983 exchange(alloc, new_alloc);
985 /* fix projnum of alloca res */
986 set_Proj_proj(alloc_res, pn_be_AddSP_res);
989 curr_sp = new_r_Proj(block, new_alloc, get_irn_mode(curr_sp),
997 * The Free is transformed into a back end free node and connected to the stack nodes.
999 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
1003 ir_node *subsp, *mem, *res, *size, *sync;
1007 unsigned stack_alignment;
1010 assert(get_Free_where(free) == stack_alloc);
1012 block = get_nodes_block(free);
1013 irg = get_irn_irg(block);
1014 type = get_Free_type(free);
1015 sp_mode = env->arch_env->sp->reg_class->mode;
1016 dbg = get_irn_dbg_info(free);
1018 /* we might need to multiply the size with the element size */
1019 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1020 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1021 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1022 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1026 size = get_Free_size(free);
1029 stack_alignment = 1 << env->arch_env->stack_alignment;
1030 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1032 /* The stack pointer will be modified in an unknown manner.
1033 We cannot omit it. */
1034 env->call->flags.bits.try_omit_fp = 0;
1035 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1036 set_irn_dbg_info(subsp, dbg);
1038 mem = new_r_Proj(block, subsp, mode_M, pn_be_SubSP_M);
1039 res = new_r_Proj(block, subsp, sp_mode, pn_be_SubSP_sp);
1041 /* we need to sync the memory */
1042 in[0] = get_Free_mem(free);
1044 sync = new_r_Sync(block, 2, in);
1046 /* and make the AddSP dependent on the former memory */
1047 add_irn_dep(subsp, get_Free_mem(free));
1050 exchange(free, sync);
1056 /* the following function is replaced by the usage of the heights module */
1059 * Walker for dependent_on().
1060 * This function searches a node tgt recursively from a given node
1061 * but is restricted to the given block.
1062 * @return 1 if tgt was reachable from curr, 0 if not.
1064 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
1068 if (get_nodes_block(curr) != bl)
1074 /* Phi functions stop the recursion inside a basic block */
1075 if (! is_Phi(curr)) {
1076 for (i = 0, n = get_irn_arity(curr); i < n; ++i) {
1077 if (check_dependence(get_irn_n(curr, i), tgt, bl))
1087 * Check if a node is somehow data dependent on another one.
1088 * both nodes must be in the same basic block.
1089 * @param n1 The first node.
1090 * @param n2 The second node.
1091 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1093 static int dependent_on(ir_node *n1, ir_node *n2)
1095 assert(get_nodes_block(n1) == get_nodes_block(n2));
1097 return heights_reachable_in_block(ir_heights, n1, n2);
1100 static int cmp_call_dependency(const void *c1, const void *c2)
1102 ir_node *n1 = *(ir_node **) c1;
1103 ir_node *n2 = *(ir_node **) c2;
1106 Classical qsort() comparison function behavior:
1107 0 if both elements are equal
1108 1 if second is "smaller" that first
1109 -1 if first is "smaller" that second
1111 if (dependent_on(n1, n2))
1114 if (dependent_on(n2, n1))
1117 /* The nodes have no depth order, but we need a total order because qsort()
1119 return get_irn_idx(n1) - get_irn_idx(n2);
1123 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1124 * Clears the irg_is_leaf flag if a Call is detected.
1126 static void link_ops_in_block_walker(ir_node *irn, void *data)
1128 be_abi_irg_t *env = data;
1129 ir_opcode code = get_irn_opcode(irn);
1131 if (code == iro_Call ||
1132 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1133 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1134 ir_node *bl = get_nodes_block(irn);
1135 void *save = get_irn_link(bl);
1137 if (code == iro_Call)
1138 env->call->flags.bits.irg_is_leaf = 0;
1140 set_irn_link(irn, save);
1141 set_irn_link(bl, irn);
1144 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1145 ir_node *param = get_Builtin_param(irn, 0);
1146 tarval *tv = get_Const_tarval(param);
1147 unsigned long value = get_tarval_long(tv);
1148 /* use ebp, so the climbframe algo works... */
1150 env->call->flags.bits.try_omit_fp = 0;
1157 * Process all Call/Alloc/Free nodes inside a basic block.
1158 * Note that the link field of the block must contain a linked list of all
1159 * Call nodes inside the Block. We first order this list according to data dependency
1160 * and that connect the calls together.
1162 static void process_ops_in_block(ir_node *bl, void *data)
1164 be_abi_irg_t *env = data;
1165 ir_node *curr_sp = env->init_sp;
1169 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
1170 obstack_ptr_grow(&env->obst, irn);
1172 /* If there were call nodes in the block. */
1178 nodes = obstack_finish(&env->obst);
1180 /* order the call nodes according to data dependency */
1181 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
1183 for (i = n - 1; i >= 0; --i) {
1184 ir_node *irn = nodes[i];
1186 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1187 switch (get_irn_opcode(irn)) {
1190 /* The stack pointer will be modified due to a call. */
1191 env->call->flags.bits.try_omit_fp = 0;
1193 curr_sp = adjust_call(env, irn, curr_sp);
1196 if (get_Alloc_where(irn) == stack_alloc)
1197 curr_sp = adjust_alloc(env, irn, curr_sp);
1200 if (get_Free_where(irn) == stack_alloc)
1201 curr_sp = adjust_free(env, irn, curr_sp);
1204 panic("invalid call");
1209 obstack_free(&env->obst, nodes);
1211 /* Keep the last stack state in the block by tying it to Keep node,
1212 * the proj from calls is already kept */
1213 if (curr_sp != env->init_sp &&
1214 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1216 keep = be_new_Keep(bl, 1, nodes);
1217 pmap_insert(env->keep_map, bl, keep);
1221 set_irn_link(bl, curr_sp);
1222 } /* process_ops_in_block */
1225 * Adjust all call nodes in the graph to the ABI conventions.
1227 static void process_calls(be_abi_irg_t *env)
1229 ir_graph *irg = env->birg->irg;
1231 env->call->flags.bits.irg_is_leaf = 1;
1232 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1234 ir_heights = heights_new(env->birg->irg);
1235 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1236 heights_free(ir_heights);
1240 * Computes the stack argument layout type.
1241 * Changes a possibly allocated value param type by moving
1242 * entities to the stack layout type.
1244 * @param env the ABI environment
1245 * @param call the current call ABI
1246 * @param method_type the method type
1247 * @param val_param_tp the value parameter type, will be destroyed
1248 * @param param_map an array mapping method arguments to the stack layout type
1250 * @return the stack argument layout type
1252 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1253 ir_type *method_type, ir_type *val_param_tp,
1254 ir_entity ***param_map)
1256 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1257 int inc = env->birg->main_env->arch_env->stack_dir * dir;
1258 int n = get_method_n_params(method_type);
1259 int curr = inc > 0 ? 0 : n - 1;
1265 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1268 *param_map = map = OALLOCN(&env->obst, ir_entity*, n);
1269 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1270 for (i = 0; i < n; ++i, curr += inc) {
1271 ir_type *param_type = get_method_param_type(method_type, curr);
1272 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1275 if (arg->on_stack) {
1276 if (val_param_tp != NULL) {
1277 /* the entity was already created, create a copy in the param type */
1278 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1279 arg->stack_ent = copy_entity_own(val_ent, res);
1280 set_entity_link(val_ent, arg->stack_ent);
1281 set_entity_link(arg->stack_ent, NULL);
1282 /* must be automatic to set a fixed layout */
1283 set_entity_allocation(arg->stack_ent, allocation_automatic);
1285 /* create a new entity */
1286 snprintf(buf, sizeof(buf), "param_%d", i);
1287 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1289 ofs += arg->space_before;
1290 ofs = round_up2(ofs, arg->alignment);
1291 set_entity_offset(arg->stack_ent, ofs);
1292 ofs += arg->space_after;
1293 ofs += get_type_size_bytes(param_type);
1294 map[i] = arg->stack_ent;
1297 set_type_size_bytes(res, ofs);
1298 set_type_state(res, layout_fixed);
1303 const arch_register_t *reg;
1307 static int cmp_regs(const void *a, const void *b)
1309 const reg_node_map_t *p = a;
1310 const reg_node_map_t *q = b;
1312 if (p->reg->reg_class == q->reg->reg_class)
1313 return p->reg->index - q->reg->index;
1315 return p->reg->reg_class - q->reg->reg_class;
1318 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1321 int n = pmap_count(reg_map);
1323 reg_node_map_t *res = OALLOCN(obst, reg_node_map_t, n);
1325 foreach_pmap(reg_map, ent) {
1326 res[i].reg = ent->key;
1327 res[i].irn = ent->value;
1331 qsort(res, n, sizeof(res[0]), cmp_regs);
1336 * Creates a barrier.
1338 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1340 int n_regs = pmap_count(regs);
1346 rm = reg_map_to_arr(&env->obst, regs);
1348 for (n = 0; n < n_regs; ++n)
1349 obstack_ptr_grow(&env->obst, rm[n].irn);
1352 obstack_ptr_grow(&env->obst, *mem);
1356 in = (ir_node **) obstack_finish(&env->obst);
1357 irn = be_new_Barrier(bl, n, in);
1358 obstack_free(&env->obst, in);
1360 for (n = 0; n < n_regs; ++n) {
1361 ir_node *pred = rm[n].irn;
1362 const arch_register_t *reg = rm[n].reg;
1363 arch_register_type_t add_type = 0;
1365 const backend_info_t *info;
1367 /* stupid workaround for now... as not all nodes report register
1369 info = be_get_info(skip_Proj(pred));
1370 if (info != NULL && info->out_infos != NULL) {
1371 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1372 if (ireq->type & arch_register_req_type_ignore)
1373 add_type |= arch_register_req_type_ignore;
1374 if (ireq->type & arch_register_req_type_produces_sp)
1375 add_type |= arch_register_req_type_produces_sp;
1378 proj = new_r_Proj(bl, irn, get_irn_mode(pred), n);
1379 be_node_set_reg_class_in(irn, n, reg->reg_class);
1381 be_set_constr_single_reg_in(irn, n, reg, 0);
1382 be_set_constr_single_reg_out(irn, n, reg, add_type);
1383 arch_set_irn_register(proj, reg);
1385 pmap_insert(regs, (void *) reg, proj);
1389 *mem = new_r_Proj(bl, irn, mode_M, n);
1392 obstack_free(&env->obst, rm);
1397 * Creates a be_Return for a Return node.
1399 * @param @env the abi environment
1400 * @param irn the Return node or NULL if there was none
1401 * @param bl the block where the be_Retun should be placed
1402 * @param mem the current memory
1403 * @param n_res number of return results
1405 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1406 ir_node *mem, int n_res)
1408 be_abi_call_t *call = env->call;
1409 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1411 pmap *reg_map = pmap_create();
1412 ir_node *keep = pmap_get(env->keep_map, bl);
1419 const arch_register_t **regs;
1423 get the valid stack node in this block.
1424 If we had a call in that block there is a Keep constructed by process_calls()
1425 which points to the last stack modification in that block. we'll use
1426 it then. Else we use the stack from the start block and let
1427 the ssa construction fix the usage.
1429 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1431 stack = get_irn_n(keep, 0);
1433 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1436 /* Insert results for Return into the register map. */
1437 for (i = 0; i < n_res; ++i) {
1438 ir_node *res = get_Return_res(irn, i);
1439 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1440 assert(arg->in_reg && "return value must be passed in register");
1441 pmap_insert(reg_map, (void *) arg->reg, res);
1444 /* Add uses of the callee save registers. */
1445 foreach_pmap(env->regs, ent) {
1446 const arch_register_t *reg = ent->key;
1447 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1448 pmap_insert(reg_map, ent->key, ent->value);
1451 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1453 /* Make the Epilogue node and call the arch's epilogue maker. */
1454 create_barrier(env, bl, &mem, reg_map, 1);
1455 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1458 Maximum size of the in array for Return nodes is
1459 return args + callee save/ignore registers + memory + stack pointer
1461 in_max = pmap_count(reg_map) + n_res + 2;
1463 in = OALLOCN(&env->obst, ir_node*, in_max);
1464 regs = OALLOCN(&env->obst, arch_register_t const*, in_max);
1467 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1469 regs[1] = arch_env->sp;
1472 /* clear SP entry, since it has already been grown. */
1473 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1474 for (i = 0; i < n_res; ++i) {
1475 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1477 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1478 regs[n++] = arg->reg;
1480 /* Clear the map entry to mark the register as processed. */
1481 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1484 /* grow the rest of the stuff. */
1485 foreach_pmap(reg_map, ent) {
1488 regs[n++] = ent->key;
1492 /* The in array for the new back end return is now ready. */
1494 dbgi = get_irn_dbg_info(irn);
1498 /* we have to pop the shadow parameter in in case of struct returns */
1500 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1502 /* Set the register classes of the return's parameter accordingly. */
1503 for (i = 0; i < n; ++i) {
1504 if (regs[i] == NULL)
1507 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1510 /* Free the space of the Epilog's in array and the register <-> proj map. */
1511 obstack_free(&env->obst, in);
1512 pmap_destroy(reg_map);
1517 typedef struct ent_pos_pair ent_pos_pair;
1518 struct ent_pos_pair {
1519 ir_entity *ent; /**< a value param entity */
1520 int pos; /**< its parameter number */
1521 ent_pos_pair *next; /**< for linking */
1524 typedef struct lower_frame_sels_env_t {
1525 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1526 ir_node *frame; /**< the current frame */
1527 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1528 const arch_register_class_t *link_class; /**< register class of the link pointer */
1529 ir_type *value_tp; /**< the value type if any */
1530 ir_type *frame_tp; /**< the frame type */
1531 int static_link_pos; /**< argument number of the hidden static link */
1532 } lower_frame_sels_env_t;
1535 * Return an entity from the backend for an value param entity.
1537 * @param ent an value param type entity
1538 * @param ctx context
1540 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1542 ir_entity *argument_ent = get_entity_link(ent);
1544 if (argument_ent == NULL) {
1545 /* we have NO argument entity yet: This is bad, as we will
1546 * need one for backing store.
1549 ir_type *frame_tp = ctx->frame_tp;
1550 unsigned offset = get_type_size_bytes(frame_tp);
1551 ir_type *tp = get_entity_type(ent);
1552 unsigned align = get_type_alignment_bytes(tp);
1554 offset += align - 1;
1555 offset &= ~(align - 1);
1557 argument_ent = copy_entity_own(ent, frame_tp);
1559 /* must be automatic to set a fixed layout */
1560 set_entity_allocation(argument_ent, allocation_automatic);
1561 set_entity_offset(argument_ent, offset);
1562 offset += get_type_size_bytes(tp);
1564 set_type_size_bytes(frame_tp, offset);
1565 set_entity_link(ent, argument_ent);
1567 return argument_ent;
1570 * Walker: Replaces Sels of frame type and
1571 * value param type entities by FrameAddress.
1572 * Links all used entities.
1574 static void lower_frame_sels_walker(ir_node *irn, void *data)
1576 lower_frame_sels_env_t *ctx = data;
1579 ir_node *ptr = get_Sel_ptr(irn);
1581 if (ptr == ctx->frame) {
1582 ir_entity *ent = get_Sel_entity(irn);
1583 ir_node *bl = get_nodes_block(irn);
1586 int is_value_param = 0;
1588 if (get_entity_owner(ent) == ctx->value_tp) {
1591 /* replace by its copy from the argument type */
1592 pos = get_struct_member_index(ctx->value_tp, ent);
1593 ent = get_argument_entity(ent, ctx);
1596 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1599 /* check, if it's a param Sel and if have not seen this entity before */
1600 if (is_value_param && get_entity_link(ent) == NULL) {
1606 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1608 set_entity_link(ent, ctx->value_param_list);
1615 * Check if a value parameter is transmitted as a register.
1616 * This might happen if the address of an parameter is taken which is
1617 * transmitted in registers.
1619 * Note that on some architectures this case must be handled specially
1620 * because the place of the backing store is determined by their ABI.
1622 * In the default case we move the entity to the frame type and create
1623 * a backing store into the first block.
1625 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1627 be_abi_call_t *call = env->call;
1628 ir_graph *irg = env->birg->irg;
1629 ent_pos_pair *entry, *new_list;
1631 int i, n = ARR_LEN(value_param_list);
1632 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1635 for (i = 0; i < n; ++i) {
1636 int pos = value_param_list[i].pos;
1637 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
1640 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1641 value_param_list[i].next = new_list;
1642 new_list = &value_param_list[i];
1645 if (new_list != NULL) {
1646 /* ok, change the graph */
1647 ir_node *start_bl = get_irg_start_block(irg);
1648 ir_node *first_bl = NULL;
1649 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1650 const ir_edge_t *edge;
1651 optimization_state_t state;
1654 foreach_block_succ(start_bl, edge) {
1655 first_bl = get_edge_src_irn(edge);
1658 assert(first_bl && first_bl != start_bl);
1659 /* we had already removed critical edges, so the following
1660 assertion should be always true. */
1661 assert(get_Block_n_cfgpreds(first_bl) == 1);
1663 /* now create backing stores */
1664 frame = get_irg_frame(irg);
1665 imem = get_irg_initial_mem(irg);
1667 save_optimization_state(&state);
1669 nmem = new_r_Proj(start_bl, get_irg_start(irg), mode_M, pn_Start_M);
1670 restore_optimization_state(&state);
1672 /* reroute all edges to the new memory source */
1673 edges_reroute(imem, nmem, irg);
1677 args = get_irg_args(irg);
1678 args_bl = get_nodes_block(args);
1679 for (entry = new_list; entry != NULL; entry = entry->next) {
1681 ir_type *tp = get_entity_type(entry->ent);
1682 ir_mode *mode = get_type_mode(tp);
1685 /* address for the backing store */
1686 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1689 mem = new_r_Proj(first_bl, store, mode_M, pn_Store_M);
1691 /* the backing store itself */
1692 store = new_r_Store(first_bl, mem, addr,
1693 new_r_Proj(args_bl, args, mode, i), 0);
1695 /* the new memory Proj gets the last Proj from store */
1696 set_Proj_pred(nmem, store);
1697 set_Proj_proj(nmem, pn_Store_M);
1699 /* move all entities to the frame type */
1700 frame_tp = get_irg_frame_type(irg);
1701 offset = get_type_size_bytes(frame_tp);
1703 /* we will add new entities: set the layout to undefined */
1704 assert(get_type_state(frame_tp) == layout_fixed);
1705 set_type_state(frame_tp, layout_undefined);
1706 for (entry = new_list; entry != NULL; entry = entry->next) {
1707 ir_entity *ent = entry->ent;
1709 /* If the entity is still on the argument type, move it to the frame type.
1710 This happens if the value_param type was build due to compound
1712 if (get_entity_owner(ent) != frame_tp) {
1713 ir_type *tp = get_entity_type(ent);
1714 unsigned align = get_type_alignment_bytes(tp);
1716 offset += align - 1;
1717 offset &= ~(align - 1);
1718 set_entity_owner(ent, frame_tp);
1719 add_class_member(frame_tp, ent);
1720 /* must be automatic to set a fixed layout */
1721 set_entity_allocation(ent, allocation_automatic);
1722 set_entity_offset(ent, offset);
1723 offset += get_type_size_bytes(tp);
1726 set_type_size_bytes(frame_tp, offset);
1727 /* fix the layout again */
1728 set_type_state(frame_tp, layout_fixed);
1733 * The start block has no jump, instead it has an initial exec Proj.
1734 * The backend wants to handle all blocks the same way, so we replace
1735 * the out cfg edge with a real jump.
1737 static void fix_start_block(ir_graph *irg)
1739 ir_node *initial_X = get_irg_initial_exec(irg);
1740 ir_node *start_block = get_irg_start_block(irg);
1741 const ir_edge_t *edge;
1743 assert(is_Proj(initial_X));
1745 foreach_out_edge(initial_X, edge) {
1746 ir_node *block = get_edge_src_irn(edge);
1748 if (is_Anchor(block))
1750 if (block != start_block) {
1751 ir_node *jmp = new_r_Jmp(start_block);
1752 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1753 set_irg_initial_exec(irg, jmp);
1757 panic("Initial exec has no follow block in %+F", irg);
1761 * Update the entity of Sels to the outer value parameters.
1763 static void update_outer_frame_sels(ir_node *irn, void *env) {
1764 lower_frame_sels_env_t *ctx = env;
1771 ptr = get_Sel_ptr(irn);
1772 if (! is_arg_Proj(ptr))
1774 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1776 ent = get_Sel_entity(irn);
1778 if (get_entity_owner(ent) == ctx->value_tp) {
1779 /* replace by its copy from the argument type */
1780 pos = get_struct_member_index(ctx->value_tp, ent);
1781 ent = get_argument_entity(ent, ctx);
1782 set_Sel_entity(irn, ent);
1784 /* check, if we have not seen this entity before */
1785 if (get_entity_link(ent) == NULL) {
1791 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1793 set_entity_link(ent, ctx->value_param_list);
1799 * Fix access to outer local variables.
1801 static void fix_outer_variable_access(be_abi_irg_t *env,
1802 lower_frame_sels_env_t *ctx)
1808 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1809 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1811 if (! is_method_entity(ent))
1813 if (get_entity_peculiarity(ent) == peculiarity_description)
1817 * FIXME: find the number of the static link parameter
1818 * for now we assume 0 here
1820 ctx->static_link_pos = 0;
1822 irg = get_entity_irg(ent);
1823 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1828 * Modify the irg itself and the frame type.
1830 static void modify_irg(be_abi_irg_t *env)
1832 be_abi_call_t *call = env->call;
1833 const arch_env_t *arch_env= env->birg->main_env->arch_env;
1834 const arch_register_t *sp = arch_env->sp;
1835 ir_graph *irg = env->birg->irg;
1838 ir_node *new_mem_proj;
1840 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1845 unsigned frame_size;
1848 const arch_register_t *fp_reg;
1849 ir_node *frame_pointer;
1853 const ir_edge_t *edge;
1854 ir_type *arg_type, *bet_type, *tp;
1855 lower_frame_sels_env_t ctx;
1856 ir_entity **param_map;
1858 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1860 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1862 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1863 * memory, which leads to loops in the DAG. */
1864 old_mem = get_irg_initial_mem(irg);
1866 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1868 /* set the links of all frame entities to NULL, we use it
1869 to detect if an entity is already linked in the value_param_list */
1870 tp = get_method_value_param_type(method_type);
1873 /* clear the links of the clone type, let the
1874 original entities point to its clones */
1875 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1876 ir_entity *mem = get_struct_member(tp, i);
1877 set_entity_link(mem, NULL);
1881 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1883 /* Convert the Sel nodes in the irg to frame addr nodes: */
1884 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1885 ctx.frame = get_irg_frame(irg);
1886 ctx.sp_class = env->arch_env->sp->reg_class;
1887 ctx.link_class = env->arch_env->link_class;
1888 ctx.frame_tp = get_irg_frame_type(irg);
1890 /* we will possible add new entities to the frame: set the layout to undefined */
1891 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1892 set_type_state(ctx.frame_tp, layout_undefined);
1894 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1896 /* fix the frame type layout again */
1897 set_type_state(ctx.frame_tp, layout_fixed);
1898 /* align stackframe to 4 byte */
1899 frame_size = get_type_size_bytes(ctx.frame_tp);
1900 if (frame_size % 4 != 0) {
1901 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1904 env->regs = pmap_create();
1906 n_params = get_method_n_params(method_type);
1907 args = OALLOCNZ(&env->obst, ir_node*, n_params);
1910 * for inner function we must now fix access to outer frame entities.
1912 fix_outer_variable_access(env, &ctx);
1914 /* Check if a value parameter is transmitted as a register.
1915 * This might happen if the address of an parameter is taken which is
1916 * transmitted in registers.
1918 * Note that on some architectures this case must be handled specially
1919 * because the place of the backing store is determined by their ABI.
1921 * In the default case we move the entity to the frame type and create
1922 * a backing store into the first block.
1924 fix_address_of_parameter_access(env, ctx.value_param_list);
1926 DEL_ARR_F(ctx.value_param_list);
1927 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1929 /* Fill the argument vector */
1930 arg_tuple = get_irg_args(irg);
1931 foreach_out_edge(arg_tuple, edge) {
1932 ir_node *irn = get_edge_src_irn(edge);
1933 if (! is_Anchor(irn)) {
1934 int nr = get_Proj_proj(irn);
1936 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1940 bet_type = call->cb->get_between_type(env->cb);
1941 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1943 /* Count the register params and add them to the number of Projs for the RegParams node */
1944 for (i = 0; i < n_params; ++i) {
1945 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1946 if (arg->in_reg && args[i]) {
1947 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1948 assert(i == get_Proj_proj(args[i]));
1950 /* For now, associate the register with the old Proj from Start representing that argument. */
1951 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1952 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1956 /* Collect all callee-save registers */
1957 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1958 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1959 for (j = 0; j < cls->n_regs; ++j) {
1960 const arch_register_t *reg = &cls->regs[j];
1961 if (arch_register_type_is(reg, callee_save) ||
1962 arch_register_type_is(reg, state)) {
1963 pmap_insert(env->regs, (void *) reg, NULL);
1968 /* handle start block here (place a jump in the block) */
1969 fix_start_block(irg);
1971 pmap_insert(env->regs, (void *) sp, NULL);
1972 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1973 start_bl = get_irg_start_block(irg);
1974 env->start = be_new_Start(start_bl, pmap_count(env->regs) + 1);
1977 * make proj nodes for the callee save registers.
1978 * memorize them, since Return nodes get those as inputs.
1980 * Note, that if a register corresponds to an argument, the regs map contains
1981 * the old Proj from start for that argument.
1984 rm = reg_map_to_arr(&env->obst, env->regs);
1985 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1986 arch_register_t *reg = (void *) rm[i].reg;
1987 ir_mode *mode = reg->reg_class->mode;
1989 arch_register_req_type_t add_type = 0;
1993 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1996 proj = new_r_Proj(start_bl, env->start, mode, nr + 1);
1997 pmap_insert(env->regs, (void *) reg, proj);
1998 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1999 arch_set_irn_register(proj, reg);
2001 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
2003 obstack_free(&env->obst, rm);
2005 /* create a new initial memory proj */
2006 assert(is_Proj(old_mem));
2007 arch_set_out_register_req(env->start, 0, arch_no_register_req);
2008 new_mem_proj = new_r_Proj(start_bl, env->start, mode_M, 0);
2010 set_irg_initial_mem(irg, mem);
2012 /* Generate the Prologue */
2013 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
2015 /* do the stack allocation BEFORE the barrier, or spill code
2016 might be added before it */
2017 env->init_sp = be_abi_reg_map_get(env->regs, sp);
2018 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
2019 be_abi_reg_map_set(env->regs, sp, env->init_sp);
2021 create_barrier(env, start_bl, &mem, env->regs, 0);
2023 env->init_sp = be_abi_reg_map_get(env->regs, sp);
2024 arch_set_irn_register(env->init_sp, sp);
2026 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
2027 set_irg_frame(irg, frame_pointer);
2028 pset_insert_ptr(env->ignore_regs, fp_reg);
2030 /* rewire old mem users to new mem */
2031 exchange(old_mem, mem);
2033 set_irg_initial_mem(irg, mem);
2035 /* Now, introduce stack param nodes for all parameters passed on the stack */
2036 for (i = 0; i < n_params; ++i) {
2037 ir_node *arg_proj = args[i];
2038 ir_node *repl = NULL;
2040 if (arg_proj != NULL) {
2041 be_abi_call_arg_t *arg;
2042 ir_type *param_type;
2043 int nr = get_Proj_proj(arg_proj);
2046 nr = MIN(nr, n_params);
2047 arg = get_call_arg(call, 0, nr);
2048 param_type = get_method_param_type(method_type, nr);
2051 repl = pmap_get(env->regs, (void *) arg->reg);
2052 } else if (arg->on_stack) {
2053 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2055 /* For atomic parameters which are actually used, we create a Load node. */
2056 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2057 ir_mode *mode = get_type_mode(param_type);
2058 ir_mode *load_mode = arg->load_mode;
2060 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2061 repl = new_r_Proj(start_bl, load, load_mode, pn_Load_res);
2063 if (mode != load_mode) {
2064 repl = new_r_Conv(start_bl, repl, mode);
2067 /* The stack parameter is not primitive (it is a struct or array),
2068 * we thus will create a node representing the parameter's address
2074 assert(repl != NULL);
2076 /* Beware: the mode of the register parameters is always the mode of the register class
2077 which may be wrong. Add Conv's then. */
2078 mode = get_irn_mode(args[i]);
2079 if (mode != get_irn_mode(repl)) {
2080 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2082 exchange(args[i], repl);
2086 /* the arg proj is not needed anymore now and should be only used by the anchor */
2087 assert(get_irn_n_edges(arg_tuple) == 1);
2088 kill_node(arg_tuple);
2089 set_irg_args(irg, new_r_Bad(irg));
2091 /* All Return nodes hang on the End node, so look for them there. */
2092 end = get_irg_end_block(irg);
2093 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2094 ir_node *irn = get_Block_cfgpred(end, i);
2096 if (is_Return(irn)) {
2097 ir_node *blk = get_nodes_block(irn);
2098 ir_node *mem = get_Return_mem(irn);
2099 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2103 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2104 the code is dead and will never be executed. */
2106 obstack_free(&env->obst, args);
2109 /** Fix the state inputs of calls that still hang on unknowns */
2111 void fix_call_state_inputs(be_abi_irg_t *env)
2113 const arch_env_t *arch_env = env->arch_env;
2115 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2117 /* Collect caller save registers */
2118 n = arch_env_get_n_reg_class(arch_env);
2119 for (i = 0; i < n; ++i) {
2121 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2122 for (j = 0; j < cls->n_regs; ++j) {
2123 const arch_register_t *reg = arch_register_for_index(cls, j);
2124 if (arch_register_type_is(reg, state)) {
2125 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2130 n = ARR_LEN(env->calls);
2131 n_states = ARR_LEN(stateregs);
2132 for (i = 0; i < n; ++i) {
2134 ir_node *call = env->calls[i];
2136 arity = get_irn_arity(call);
2138 /* the state reg inputs are the last n inputs of the calls */
2139 for (s = 0; s < n_states; ++s) {
2140 int inp = arity - n_states + s;
2141 const arch_register_t *reg = stateregs[s];
2142 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2144 set_irn_n(call, inp, regnode);
2148 DEL_ARR_F(stateregs);
2152 * Create a trampoline entity for the given method.
2154 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2156 ir_type *type = get_entity_type(method);
2157 ident *old_id = get_entity_ld_ident(method);
2158 ident *id = id_mangle3("L", old_id, "$stub");
2159 ir_type *parent = be->pic_trampolines_type;
2160 ir_entity *ent = new_entity(parent, old_id, type);
2161 set_entity_ld_ident(ent, id);
2162 set_entity_visibility(ent, visibility_local);
2163 set_entity_variability(ent, variability_uninitialized);
2169 * Returns the trampoline entity for the given method.
2171 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2173 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2174 if (result == NULL) {
2175 result = create_trampoline(env, method);
2176 pmap_insert(env->ent_trampoline_map, method, result);
2182 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2184 ident *old_id = get_entity_ld_ident(entity);
2185 ident *id = id_mangle3("L", old_id, "$non_lazy_ptr");
2186 ir_type *e_type = get_entity_type(entity);
2187 ir_type *type = new_type_pointer(id, e_type, mode_P_data);
2188 ir_type *parent = be->pic_symbols_type;
2189 ir_entity *ent = new_entity(parent, old_id, type);
2190 set_entity_ld_ident(ent, id);
2191 set_entity_visibility(ent, visibility_local);
2192 set_entity_variability(ent, variability_uninitialized);
2197 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2199 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2200 if (result == NULL) {
2201 result = create_pic_symbol(env, entity);
2202 pmap_insert(env->ent_pic_symbol_map, entity, result);
2211 * Returns non-zero if a given entity can be accessed using a relative address.
2213 static int can_address_relative(ir_entity *entity)
2215 return get_entity_visibility(entity) != visibility_external_allocated;
2218 /** patches SymConsts to work in position independent code */
2219 static void fix_pic_symconsts(ir_node *node, void *data)
2229 be_abi_irg_t *env = data;
2231 be_main_env_t *be = env->birg->main_env;
2233 arity = get_irn_arity(node);
2234 for (i = 0; i < arity; ++i) {
2236 ir_node *pred = get_irn_n(node, i);
2238 ir_entity *pic_symbol;
2239 ir_node *pic_symconst;
2241 if (!is_SymConst(pred))
2244 entity = get_SymConst_entity(pred);
2245 block = get_nodes_block(pred);
2246 irg = get_irn_irg(pred);
2248 /* calls can jump to relative addresses, so we can directly jump to
2249 the (relatively) known call address or the trampoline */
2250 if (i == 1 && is_Call(node)) {
2251 ir_entity *trampoline;
2252 ir_node *trampoline_const;
2254 if (can_address_relative(entity))
2257 dbgi = get_irn_dbg_info(pred);
2258 trampoline = get_trampoline(be, entity);
2259 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2261 set_irn_n(node, i, trampoline_const);
2265 /* everything else is accessed relative to EIP */
2266 mode = get_irn_mode(pred);
2267 unknown = new_r_Unknown(irg, mode);
2268 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2270 /* all ok now for locally constructed stuff */
2271 if (can_address_relative(entity)) {
2272 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2274 /* make sure the walker doesn't visit this add again */
2275 mark_irn_visited(add);
2276 set_irn_n(node, i, add);
2280 /* get entry from pic symbol segment */
2281 dbgi = get_irn_dbg_info(pred);
2282 pic_symbol = get_pic_symbol(be, entity);
2283 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2285 add = new_r_Add(block, pic_base, pic_symconst, mode);
2286 mark_irn_visited(add);
2288 /* we need an extra indirection for global data outside our current
2289 module. The loads are always safe and can therefore float
2290 and need no memory input */
2291 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2292 load_res = new_r_Proj(block, load, mode, pn_Load_res);
2294 set_irn_n(node, i, load_res);
2298 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2300 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2301 ir_node *old_frame = get_irg_frame(birg->irg);
2302 ir_graph *irg = birg->irg;
2306 optimization_state_t state;
2307 unsigned *limited_bitset;
2308 arch_register_req_t *sp_req;
2310 be_omit_fp = birg->main_env->options->omit_fp;
2311 be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
2313 obstack_init(&env->obst);
2315 env->arch_env = birg->main_env->arch_env;
2316 env->method_type = get_entity_type(get_irg_entity(irg));
2317 env->call = be_abi_call_new(env->arch_env->sp->reg_class);
2318 arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
2320 env->ignore_regs = pset_new_ptr_default();
2321 env->keep_map = pmap_create();
2322 env->dce_survivor = new_survive_dce();
2325 sp_req = OALLOCZ(&env->obst, arch_register_req_t);
2326 env->sp_req = sp_req;
2328 sp_req->type = arch_register_req_type_limited
2329 | arch_register_req_type_produces_sp;
2330 sp_req->cls = arch_register_get_class(env->arch_env->sp);
2332 limited_bitset = rbitset_obstack_alloc(&env->obst, sp_req->cls->n_regs);
2333 rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
2334 sp_req->limited = limited_bitset;
2335 if (env->arch_env->sp->type & arch_register_type_ignore) {
2336 sp_req->type |= arch_register_req_type_ignore;
2339 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
2340 to another Unknown or the stack pointer gets used */
2341 save_optimization_state(&state);
2343 env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
2344 restore_optimization_state(&state);
2346 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
2348 env->calls = NEW_ARR_F(ir_node*, 0);
2350 if (birg->main_env->options->pic) {
2351 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2354 /* Lower all call nodes in the IRG. */
2358 Beware: init backend abi call object after processing calls,
2359 otherwise some information might be not yet available.
2361 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2363 /* Process the IRG */
2366 /* fix call inputs for state registers */
2367 fix_call_state_inputs(env);
2369 /* We don't need the keep map anymore. */
2370 pmap_destroy(env->keep_map);
2371 env->keep_map = NULL;
2373 /* calls array is not needed anymore */
2374 DEL_ARR_F(env->calls);
2377 /* reroute the stack origin of the calls to the true stack origin. */
2378 exchange(dummy, env->init_sp);
2379 exchange(old_frame, get_irg_frame(irg));
2381 /* Make some important node pointers survive the dead node elimination. */
2382 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2383 foreach_pmap(env->regs, ent) {
2384 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2387 env->call->cb->done(env->cb);
2392 void be_abi_free(be_abi_irg_t *env)
2394 be_abi_call_free(env->call);
2395 free_survive_dce(env->dce_survivor);
2396 del_pset(env->ignore_regs);
2397 pmap_destroy(env->regs);
2398 obstack_free(&env->obst, NULL);
2402 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2404 arch_register_t *reg;
2406 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2407 if (reg->reg_class == cls)
2408 bitset_set(bs, reg->index);
2411 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2414 arch_register_t *reg;
2416 for (i = 0; i < cls->n_regs; ++i) {
2417 if (arch_register_type_is(&cls->regs[i], ignore))
2420 rbitset_set(raw_bitset, i);
2423 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2424 reg = pset_next(abi->ignore_regs)) {
2425 if (reg->reg_class != cls)
2428 rbitset_clear(raw_bitset, reg->index);
2432 /* Returns the stack layout from a abi environment. */
2433 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2441 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2442 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2443 | _| | |> < ___) | || (_| | (__| <
2444 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2448 typedef ir_node **node_array;
2450 typedef struct fix_stack_walker_env_t {
2451 node_array sp_nodes;
2452 } fix_stack_walker_env_t;
2455 * Walker. Collect all stack modifying nodes.
2457 static void collect_stack_nodes_walker(ir_node *node, void *data)
2459 ir_node *insn = node;
2460 fix_stack_walker_env_t *env = data;
2461 const arch_register_req_t *req;
2463 if (is_Proj(node)) {
2464 insn = get_Proj_pred(node);
2467 if (arch_irn_get_n_outs(insn) == 0)
2470 req = arch_get_register_req_out(node);
2471 if (! (req->type & arch_register_req_type_produces_sp))
2474 ARR_APP1(ir_node*, env->sp_nodes, node);
2477 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2479 be_ssa_construction_env_t senv;
2482 be_irg_t *birg = env->birg;
2483 be_lv_t *lv = be_get_birg_liveness(birg);
2484 fix_stack_walker_env_t walker_env;
2486 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2488 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2490 /* nothing to be done if we didn't find any node, in fact we mustn't
2491 * continue, as for endless loops incsp might have had no users and is bad
2494 len = ARR_LEN(walker_env.sp_nodes);
2496 DEL_ARR_F(walker_env.sp_nodes);
2500 be_ssa_construction_init(&senv, birg);
2501 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2502 ARR_LEN(walker_env.sp_nodes));
2503 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2504 ARR_LEN(walker_env.sp_nodes));
2507 len = ARR_LEN(walker_env.sp_nodes);
2508 for (i = 0; i < len; ++i) {
2509 be_liveness_update(lv, walker_env.sp_nodes[i]);
2511 be_ssa_construction_update_liveness_phis(&senv, lv);
2514 phis = be_ssa_construction_get_new_phis(&senv);
2516 /* set register requirements for stack phis */
2517 len = ARR_LEN(phis);
2518 for (i = 0; i < len; ++i) {
2519 ir_node *phi = phis[i];
2520 be_set_phi_reg_req(phi, env->sp_req);
2521 arch_set_irn_register(phi, env->arch_env->sp);
2523 be_ssa_construction_destroy(&senv);
2525 DEL_ARR_F(walker_env.sp_nodes);
2529 * Fix all stack accessing operations in the block bl.
2531 * @param env the abi environment
2532 * @param bl the block to process
2533 * @param real_bias the bias value
2535 * @return the bias at the end of this block
2537 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2539 int omit_fp = env->call->flags.bits.try_omit_fp;
2541 int wanted_bias = real_bias;
2543 sched_foreach(bl, irn) {
2547 Check, if the node relates to an entity on the stack frame.
2548 If so, set the true offset (including the bias) for that
2551 ir_entity *ent = arch_get_frame_entity(irn);
2553 int bias = omit_fp ? real_bias : 0;
2554 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2555 arch_set_frame_offset(irn, offset);
2556 DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2557 ent, offset, bias));
2561 * If the node modifies the stack pointer by a constant offset,
2562 * record that in the bias.
2564 ofs = arch_get_sp_bias(irn);
2566 if (be_is_IncSP(irn)) {
2567 /* fill in real stack frame size */
2568 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2569 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2570 ofs = (int) get_type_size_bytes(frame_type);
2571 be_set_IncSP_offset(irn, ofs);
2572 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2573 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2574 ofs = - (int)get_type_size_bytes(frame_type);
2575 be_set_IncSP_offset(irn, ofs);
2577 if (be_get_IncSP_align(irn)) {
2578 /* patch IncSP to produce an aligned stack pointer */
2579 ir_type *between_type = env->frame.between_type;
2580 int between_size = get_type_size_bytes(between_type);
2581 int alignment = 1 << env->arch_env->stack_alignment;
2582 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2585 be_set_IncSP_offset(irn, ofs + alignment - delta);
2586 real_bias += alignment - delta;
2589 /* adjust so real_bias corresponds with wanted_bias */
2590 int delta = wanted_bias - real_bias;
2593 be_set_IncSP_offset(irn, ofs + delta);
2604 assert(real_bias == wanted_bias);
2609 * A helper struct for the bias walker.
2612 be_abi_irg_t *env; /**< The ABI irg environment. */
2613 int start_block_bias; /**< The bias at the end of the start block. */
2615 ir_node *start_block; /**< The start block of the current graph. */
2619 * Block-Walker: fix all stack offsets for all blocks
2620 * except the start block
2622 static void stack_bias_walker(ir_node *bl, void *data)
2624 struct bias_walk *bw = data;
2625 if (bl != bw->start_block) {
2626 process_stack_bias(bw->env, bl, bw->start_block_bias);
2631 * Walker: finally lower all Sels of outer frame or parameter
2634 static void lower_outer_frame_sels(ir_node *sel, void *ctx) {
2635 be_abi_irg_t *env = ctx;
2643 ent = get_Sel_entity(sel);
2644 owner = get_entity_owner(ent);
2645 ptr = get_Sel_ptr(sel);
2647 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2648 /* found access to outer frame or arguments */
2649 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2652 ir_node *bl = get_nodes_block(sel);
2653 dbg_info *dbgi = get_irn_dbg_info(sel);
2654 ir_mode *mode = get_irn_mode(sel);
2655 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2656 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2658 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2664 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2666 ir_graph *irg = env->birg->irg;
2669 struct bias_walk bw;
2671 stack_frame_compute_initial_offset(&env->frame);
2672 // stack_layout_dump(stdout, frame);
2674 /* Determine the stack bias at the end of the start block. */
2675 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2676 bw.between_size = get_type_size_bytes(env->frame.between_type);
2678 /* fix the bias is all other blocks */
2680 bw.start_block = get_irg_start_block(irg);
2681 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2683 /* fix now inner functions: these still have Sel node to outer
2684 frame and parameter entities */
2685 frame_tp = get_irg_frame_type(irg);
2686 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2687 ir_entity *ent = get_class_member(frame_tp, i);
2689 if (is_method_entity(ent) && get_entity_peculiarity(ent) != peculiarity_description) {
2690 ir_graph *irg = get_entity_irg(ent);
2692 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2697 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2699 assert(arch_register_type_is(reg, callee_save));
2700 assert(pmap_contains(abi->regs, (void *) reg));
2701 return pmap_get(abi->regs, (void *) reg);
2704 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2706 assert(arch_register_type_is(reg, ignore));
2707 assert(pmap_contains(abi->regs, (void *) reg));
2708 return pmap_get(abi->regs, (void *) reg);
2712 * Returns non-zero if the ABI has omitted the frame pointer in
2713 * the current graph.
2715 int be_abi_omit_fp(const be_abi_irg_t *abi)
2717 return abi->call->flags.bits.try_omit_fp;