2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
35 #include "irgraph_t.h"
38 #include "iredges_t.h"
41 #include "irprintf_t.h"
47 #include "raw_bitset.h"
55 #include "besched_t.h"
57 #include "bessaconstr.h"
59 typedef struct _be_abi_call_arg_t {
60 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
61 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
62 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
65 const arch_register_t *reg;
68 unsigned alignment; /**< stack alignment */
69 unsigned space_before; /**< allocate space before */
70 unsigned space_after; /**< allocate space after */
73 struct _be_abi_call_t {
74 be_abi_call_flags_t flags;
76 const be_abi_callbacks_t *cb;
77 ir_type *between_type;
79 const arch_register_class_t *cls_addr;
82 struct _be_abi_irg_t {
84 be_stack_layout_t *frame; /**< The stack frame model. */
85 be_irg_t *birg; /**< The back end IRG. */
86 const arch_isa_t *isa; /**< The isa. */
87 survive_dce_t *dce_survivor;
89 be_abi_call_t *call; /**< The ABI call information. */
90 ir_type *method_type; /**< The type of the method of the IRG. */
92 ir_node *init_sp; /**< The node representing the stack pointer
93 at the start of the function. */
95 ir_node *reg_params; /**< The reg params node. */
96 pmap *regs; /**< A map of all callee-save and ignore regs to
97 their Projs to the RegParams node. */
99 int start_block_bias; /**< The stack bias at the end of the start block. */
101 void *cb; /**< ABI Callback self pointer. */
103 pmap *keep_map; /**< mapping blocks to keep nodes. */
104 pset *ignore_regs; /**< Additional registers which shall be ignored. */
106 ir_node **calls; /**< flexible array containing all be_Call nodes */
108 arch_register_req_t sp_req;
109 arch_register_req_t sp_cls_req;
111 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
114 static heights_t *ir_heights;
116 /* Flag: if set, try to omit the frame pointer if called by the backend */
117 static int be_omit_fp = 1;
120 _ ____ ___ ____ _ _ _ _
121 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
122 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
123 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
124 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
126 These callbacks are used by the backend to set the parameters
127 for a specific call type.
131 * Set compare function: compares two ABI call object arguments.
133 static int cmp_call_arg(const void *a, const void *b, size_t n)
135 const be_abi_call_arg_t *p = a, *q = b;
137 return !(p->is_res == q->is_res && p->pos == q->pos);
141 * Get or set an ABI call object argument.
143 * @param call the abi call
144 * @param is_res true for call results, false for call arguments
145 * @param pos position of the argument
146 * @param do_insert true if the argument is set, false if it's retrieved
148 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
150 be_abi_call_arg_t arg;
153 memset(&arg, 0, sizeof(arg));
157 hash = is_res * 128 + pos;
160 ? set_insert(call->params, &arg, sizeof(arg), hash)
161 : set_find(call->params, &arg, sizeof(arg), hash);
165 * Retrieve an ABI call object argument.
167 * @param call the ABI call object
168 * @param is_res true for call results, false for call arguments
169 * @param pos position of the argument
171 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
173 return get_or_set_call_arg(call, is_res, pos, 0);
176 /* Set the flags for a call. */
177 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
183 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
189 /* Set register class for call address */
190 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
192 call->cls_addr = cls;
196 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
198 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
200 arg->load_mode = load_mode;
201 arg->alignment = alignment;
202 arg->space_before = space_before;
203 arg->space_after = space_after;
204 assert(alignment > 0 && "Alignment must be greater than 0");
207 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
209 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
214 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
216 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
221 /* Get the flags of a ABI call object. */
222 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
228 * Constructor for a new ABI call object.
230 * @return the new ABI call object
232 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
234 be_abi_call_t *call = xmalloc(sizeof(call[0]));
235 memset(call, 0, sizeof(call[0]));
238 call->params = new_set(cmp_call_arg, 16);
240 call->cls_addr = cls_addr;
242 call->flags.bits.try_omit_fp = be_omit_fp;
248 * Destructor for an ABI call object.
250 static void be_abi_call_free(be_abi_call_t *call)
252 del_set(call->params);
258 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
259 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
260 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
261 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
264 Handling of the stack frame. It is composed of three types:
265 1) The type of the arguments which are pushed on the stack.
266 2) The "between type" which consists of stuff the call of the
267 function pushes on the stack (like the return address and
268 the old base pointer for ia32).
269 3) The Firm frame type which consists of all local variables
273 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
276 ir_type *t = get_entity_owner(ent);
277 int ofs = get_entity_offset(ent);
281 /* Find the type the entity is contained in. */
282 for(index = 0; index < N_FRAME_TYPES; ++index) {
283 if(frame->order[index] == t)
287 /* Add the size of all the types below the one of the entity to the entity's offset */
288 for(i = 0; i < index; ++i)
289 ofs += get_type_size_bytes(frame->order[i]);
291 /* correct the offset by the initial position of the frame pointer */
292 ofs -= frame->initial_offset;
294 /* correct the offset with the current bias. */
301 * Retrieve the entity with given offset from a frame type.
303 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
307 for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
308 ir_entity *ent = get_compound_member(t, i);
309 if(get_entity_offset(ent) == offset)
316 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
318 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
319 ir_entity *ent = search_ent_with_offset(base, 0);
321 frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
323 return frame->initial_offset;
327 * Initializes the frame layout from parts
329 * @param frame the stack layout that will be initialized
330 * @param args the stack argument layout type
331 * @param between the between layout type
332 * @param locals the method frame type
333 * @param stack_dir the stack direction
334 * @param param_map an array mapping method argument positions to the stack argument type
336 * @return the initialized stack layout
338 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
339 ir_type *between, ir_type *locals, int stack_dir,
340 ir_entity *param_map[])
342 frame->arg_type = args;
343 frame->between_type = between;
344 frame->frame_type = locals;
345 frame->initial_offset = 0;
346 frame->stack_dir = stack_dir;
347 frame->order[1] = between;
348 frame->param_map = param_map;
351 frame->order[0] = args;
352 frame->order[2] = locals;
355 frame->order[0] = locals;
356 frame->order[2] = args;
362 /** Dumps the stack layout to file. */
363 static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
367 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
368 for (j = 0; j < N_FRAME_TYPES; ++j) {
369 ir_type *t = frame->order[j];
371 ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
372 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
373 ir_entity *ent = get_compound_member(t, i);
374 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
381 * Returns non-zero if the call argument at given position
382 * is transfered on the stack.
384 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
386 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
387 return arg && !arg->in_reg;
397 Adjustment of the calls inside a graph.
402 * Transform a call node into a be_Call node.
404 * @param env The ABI environment for the current irg.
405 * @param irn The call node.
406 * @param curr_sp The stack pointer node to use.
407 * @return The stack pointer after the call.
409 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
411 ir_graph *irg = env->birg->irg;
412 const arch_env_t *arch_env = &env->birg->main_env->arch_env;
413 const arch_isa_t *isa = arch_env->isa;
414 ir_type *call_tp = get_Call_type(irn);
415 ir_node *call_ptr = get_Call_ptr(irn);
416 int n_params = get_method_n_params(call_tp);
417 ir_node *curr_mem = get_Call_mem(irn);
418 ir_node *bl = get_nodes_block(irn);
419 pset *results = pset_new_ptr(8);
420 pset *caller_save = pset_new_ptr(8);
421 pset *states = pset_new_ptr(2);
423 int stack_dir = arch_isa_stack_dir(isa);
424 const arch_register_t *sp = arch_isa_sp(isa);
425 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
426 ir_mode *mach_mode = sp->reg_class->mode;
427 struct obstack *obst = &env->obst;
428 int no_alloc = call->flags.bits.frame_is_setup_on_call;
429 int n_res = get_method_n_ress(call_tp);
430 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
432 ir_node *res_proj = NULL;
433 int n_reg_params = 0;
434 int n_stack_params = 0;
440 int n_reg_results = 0;
441 const arch_register_t *reg;
442 const ir_edge_t *edge;
444 int *stack_param_idx;
447 /* Let the isa fill out the abi description for that call node. */
448 arch_isa_get_call_abi(isa, call_tp, call);
450 /* Insert code to put the stack arguments on the stack. */
451 assert(get_Call_n_params(irn) == n_params);
452 for (i = 0; i < n_params; ++i) {
453 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
456 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
458 stack_size += round_up2(arg->space_before, arg->alignment);
459 stack_size += round_up2(arg_size, arg->alignment);
460 stack_size += round_up2(arg->space_after, arg->alignment);
461 obstack_int_grow(obst, i);
465 stack_param_idx = obstack_finish(obst);
467 /* Collect all arguments which are passed in registers. */
468 for (i = 0; i < n_params; ++i) {
469 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
470 if (arg && arg->in_reg) {
471 obstack_int_grow(obst, i);
475 reg_param_idxs = obstack_finish(obst);
478 * If the stack is decreasing and we do not want to store sequentially,
479 * or someone else allocated the call frame
480 * we allocate as much space on the stack all parameters need, by
481 * moving the stack pointer along the stack's direction.
483 * Note: we also have to do this for stack_size == 0, because we may have
484 * to adjust stack alignment for the call.
486 if (stack_dir < 0 && !do_seq && !no_alloc) {
487 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size, 1);
490 /* If there are some parameters which shall be passed on the stack. */
491 if (n_stack_params > 0) {
495 * Reverse list of stack parameters if call arguments are from left to right.
496 * We must them reverse again if they are pushed (not stored) and the stack
497 * direction is downwards.
499 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
500 for (i = 0; i < n_stack_params >> 1; ++i) {
501 int other = n_stack_params - i - 1;
502 int tmp = stack_param_idx[i];
503 stack_param_idx[i] = stack_param_idx[other];
504 stack_param_idx[other] = tmp;
508 curr_mem = get_Call_mem(irn);
510 obstack_ptr_grow(obst, curr_mem);
513 for (i = 0; i < n_stack_params; ++i) {
514 int p = stack_param_idx[i];
515 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
516 ir_node *param = get_Call_param(irn, p);
517 ir_node *addr = curr_sp;
519 ir_type *param_type = get_method_param_type(call_tp, p);
520 int param_size = get_type_size_bytes(param_type) + arg->space_after;
523 * If we wanted to build the arguments sequentially,
524 * the stack pointer for the next must be incremented,
525 * and the memory value propagated.
529 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before, 0);
530 add_irn_dep(curr_sp, curr_mem);
533 curr_ofs += arg->space_before;
534 curr_ofs = round_up2(curr_ofs, arg->alignment);
536 /* Make the expression to compute the argument's offset. */
538 ir_mode *constmode = mach_mode;
539 if(mode_is_reference(mach_mode)) {
542 addr = new_r_Const_long(irg, bl, constmode, curr_ofs);
543 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
547 /* Insert a store for primitive arguments. */
548 if (is_atomic_type(param_type)) {
550 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
551 store = new_r_Store(irg, bl, mem_input, addr, param);
552 mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
555 /* Make a mem copy for compound arguments. */
559 assert(mode_is_reference(get_irn_mode(param)));
560 copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
561 mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
564 curr_ofs += param_size;
569 obstack_ptr_grow(obst, mem);
572 in = (ir_node **) obstack_finish(obst);
574 /* We need the sync only, if we didn't build the stores sequentially. */
576 if (n_stack_params >= 1) {
577 curr_mem = new_r_Sync(irg, bl, n_stack_params + 1, in);
579 curr_mem = get_Call_mem(irn);
582 obstack_free(obst, in);
585 /* Collect caller save registers */
586 for (i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
588 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
589 for (j = 0; j < cls->n_regs; ++j) {
590 const arch_register_t *reg = arch_register_for_index(cls, j);
591 if (arch_register_type_is(reg, caller_save)) {
592 pset_insert_ptr(caller_save, (void *) reg);
594 if (arch_register_type_is(reg, state)) {
595 pset_insert_ptr(caller_save, (void*) reg);
596 pset_insert_ptr(states, (void*) reg);
601 /* search the greatest result proj number */
603 res_projs = alloca(n_res * sizeof(res_projs[0]));
604 memset(res_projs, 0, n_res * sizeof(res_projs[0]));
606 foreach_out_edge(irn, edge) {
607 const ir_edge_t *res_edge;
608 ir_node *irn = get_edge_src_irn(edge);
610 if(!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
613 foreach_out_edge(irn, res_edge) {
615 ir_node *res = get_edge_src_irn(res_edge);
617 assert(is_Proj(res));
619 proj = get_Proj_proj(res);
620 assert(proj < n_res);
621 assert(res_projs[proj] == NULL);
622 res_projs[proj] = res;
628 /** TODO: this is not correct for cases where return values are passed
629 * on the stack, but no known ABI does this currentl...
631 n_reg_results = n_res;
633 /* make the back end call node and set its register requirements. */
634 for (i = 0; i < n_reg_params; ++i) {
635 obstack_ptr_grow(obst, get_Call_param(irn, reg_param_idxs[i]));
637 foreach_pset(states, reg) {
638 const arch_register_class_t *cls = arch_register_get_class(reg);
640 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
641 ir_fprintf(stderr, "Adding %+F\n", regnode);
643 ir_node *regnode = new_rd_Unknown(irg, arch_register_class_mode(cls));
644 obstack_ptr_grow(obst, regnode);
646 n_ins = n_reg_params + pset_count(states);
648 in = obstack_finish(obst);
650 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
652 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
654 n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
655 n_ins, in, get_Call_type(irn));
656 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
659 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
661 n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
662 n_ins, in, get_Call_type(irn));
664 be_Call_set_pop(low_call, call->pop);
665 ARR_APP1(ir_node *, env->calls, low_call);
667 /* create new stack pointer */
668 curr_sp = new_r_Proj(irg, bl, low_call, get_irn_mode(curr_sp),
670 be_set_constr_single_reg(low_call, BE_OUT_POS(pn_be_Call_sp), sp);
671 arch_set_irn_register(arch_env, curr_sp, sp);
672 be_node_set_flags(low_call, BE_OUT_POS(pn_be_Call_sp),
673 arch_irn_flags_ignore | arch_irn_flags_modify_sp);
675 for(i = 0; i < n_res; ++i) {
677 ir_node *proj = res_projs[i];
678 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
680 /* returns values on stack not supported yet */
684 shift the proj number to the right, since we will drop the
685 unspeakable Proj_T from the Call. Therefore, all real argument
686 Proj numbers must be increased by pn_be_Call_first_res
688 pn = i + pn_be_Call_first_res;
691 ir_type *res_type = get_method_res_type(call_tp, i);
692 ir_mode *mode = get_type_mode(res_type);
693 proj = new_r_Proj(irg, bl, low_call, mode, pn);
696 set_Proj_pred(proj, low_call);
697 set_Proj_proj(proj, pn);
701 pset_remove_ptr(caller_save, arg->reg);
706 Set the register class of the call address to
707 the backend provided class (default: stack pointer class)
709 be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr);
711 DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
713 /* Set the register classes and constraints of the Call parameters. */
714 for (i = 0; i < n_reg_params; ++i) {
715 int index = reg_param_idxs[i];
716 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
717 assert(arg->reg != NULL);
719 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + i, arg->reg);
722 /* Set the register constraints of the results. */
723 for (i = 0; i < n_res; ++i) {
724 ir_node *proj = res_projs[i];
725 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
726 int pn = get_Proj_proj(proj);
729 be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
730 arch_set_irn_register(arch_env, proj, arg->reg);
732 obstack_free(obst, in);
733 exchange(irn, low_call);
735 /* kill the ProjT node */
736 if (res_proj != NULL) {
737 be_kill_node(res_proj);
740 /* Make additional projs for the caller save registers
741 and the Keep node which keeps them alive. */
742 if (1 || pset_count(caller_save) + n_reg_results > 0) {
743 const arch_register_t *reg;
748 = pn_be_Call_first_res + n_reg_results;
750 /* also keep the stack pointer */
752 set_irn_link(curr_sp, (void*) sp);
753 obstack_ptr_grow(obst, curr_sp);
755 for (reg = pset_first(caller_save); reg; reg = pset_next(caller_save), ++n) {
756 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode,
759 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
760 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
761 arch_set_irn_register(arch_env, proj, reg);
763 /* a call can produce ignore registers, in this case set the flag and register for the Proj */
764 if (arch_register_type_is(reg, ignore)) {
765 be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj),
766 arch_irn_flags_ignore);
769 set_irn_link(proj, (void*) reg);
770 obstack_ptr_grow(obst, proj);
774 for(i = 0; i < n_reg_results; ++i) {
775 ir_node *proj = res_projs[i];
776 const arch_register_t *reg = arch_get_irn_register(arch_env, proj);
777 set_irn_link(proj, (void*) reg);
778 obstack_ptr_grow(obst, proj);
782 /* create the Keep for the caller save registers */
783 in = (ir_node **) obstack_finish(obst);
784 keep = be_new_Keep(NULL, irg, bl, n, in);
785 for (i = 0; i < n; ++i) {
786 const arch_register_t *reg = get_irn_link(in[i]);
787 be_node_set_reg_class(keep, i, reg->reg_class);
789 obstack_free(obst, in);
792 /* Clean up the stack. */
793 assert(stack_size >= call->pop);
794 stack_size -= call->pop;
796 if (stack_size > 0) {
797 ir_node *mem_proj = NULL;
799 foreach_out_edge(low_call, edge) {
800 ir_node *irn = get_edge_src_irn(edge);
801 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
808 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_be_Call_M_regular);
809 keep_alive(mem_proj);
812 /* Clean up the stack frame or revert alignment fixes if we allocated it */
814 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size, 0);
817 be_abi_call_free(call);
818 obstack_free(obst, stack_param_idx);
821 del_pset(caller_save);
827 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
829 * @param alignment the minimum stack alignment
830 * @param size the node containing the non-aligned size
831 * @param irg the irg where new nodes are allocated on
832 * @param irg the block where new nodes are allocated on
833 * @param dbg debug info for new nodes
835 * @return a node representing the aligned size
837 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
838 ir_graph *irg, ir_node *block, dbg_info *dbg)
840 if (stack_alignment > 1) {
845 assert(is_po2(stack_alignment));
847 mode = get_irn_mode(size);
848 tv = new_tarval_from_long(stack_alignment-1, mode);
849 mask = new_r_Const(irg, block, mode, tv);
850 size = new_rd_Add(dbg, irg, block, size, mask, mode);
852 tv = new_tarval_from_long(-(long)stack_alignment, mode);
853 mask = new_r_Const(irg, block, mode, tv);
854 size = new_rd_And(dbg, irg, block, size, mask, mode);
860 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
862 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
871 const ir_edge_t *edge;
872 ir_node *new_alloc, *size, *addr, *ins[2];
873 unsigned stack_alignment;
875 if (get_Alloc_where(alloc) != stack_alloc) {
880 block = get_nodes_block(alloc);
881 irg = get_irn_irg(block);
884 type = get_Alloc_type(alloc);
886 foreach_out_edge(alloc, edge) {
887 ir_node *irn = get_edge_src_irn(edge);
889 assert(is_Proj(irn));
890 switch(get_Proj_proj(irn)) {
902 /* Beware: currently Alloc nodes without a result might happen,
903 only escape analysis kills them and this phase runs only for object
904 oriented source. We kill the Alloc here. */
905 if (alloc_res == NULL && alloc_mem) {
906 exchange(alloc_mem, get_Alloc_mem(alloc));
910 dbg = get_irn_dbg_info(alloc);
912 /* we might need to multiply the size with the element size */
913 if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
914 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
916 ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
917 ir_node *mul = new_rd_Mul(dbg, irg, block, get_Alloc_size(alloc),
921 size = get_Alloc_size(alloc);
924 /* The stack pointer will be modified in an unknown manner.
925 We cannot omit it. */
926 env->call->flags.bits.try_omit_fp = 0;
928 stack_alignment = env->isa->stack_alignment;
929 size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
930 new_alloc = be_new_AddSP(env->isa->sp, irg, block, curr_sp, size);
931 set_irn_dbg_info(new_alloc, dbg);
933 if(alloc_mem != NULL) {
937 addsp_mem = new_r_Proj(irg, block, new_alloc, mode_M, pn_be_AddSP_M);
939 /* We need to sync the output mem of the AddSP with the input mem
940 edge into the alloc node. */
941 ins[0] = get_Alloc_mem(alloc);
943 sync = new_r_Sync(irg, block, 2, ins);
945 exchange(alloc_mem, sync);
948 exchange(alloc, new_alloc);
950 /* fix projnum of alloca res */
951 set_Proj_proj(alloc_res, pn_be_AddSP_res);
954 curr_sp = new_r_Proj(irg, block, new_alloc, get_irn_mode(curr_sp),
962 * The Free is transformed into a back end free node and connected to the stack nodes.
964 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
968 ir_node *subsp, *mem, *res, *size, *sync;
972 unsigned stack_alignment;
975 if (get_Free_where(free) != stack_alloc) {
980 block = get_nodes_block(free);
981 irg = get_irn_irg(block);
982 type = get_Free_type(free);
983 sp_mode = env->isa->sp->reg_class->mode;
984 dbg = get_irn_dbg_info(free);
986 /* we might need to multiply the size with the element size */
987 if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
988 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
989 ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
990 ir_node *mul = new_rd_Mul(dbg, irg, block, get_Free_size(free),
994 size = get_Free_size(free);
997 stack_alignment = env->isa->stack_alignment;
998 size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
1000 /* The stack pointer will be modified in an unknown manner.
1001 We cannot omit it. */
1002 env->call->flags.bits.try_omit_fp = 0;
1003 subsp = be_new_SubSP(env->isa->sp, irg, block, curr_sp, size);
1004 set_irn_dbg_info(subsp, dbg);
1006 mem = new_r_Proj(irg, block, subsp, mode_M, pn_be_SubSP_M);
1007 res = new_r_Proj(irg, block, subsp, sp_mode, pn_be_SubSP_sp);
1009 /* we need to sync the memory */
1010 in[0] = get_Free_mem(free);
1012 sync = new_r_Sync(irg, block, 2, in);
1014 /* and make the AddSP dependent on the former memory */
1015 add_irn_dep(subsp, get_Free_mem(free));
1018 exchange(free, sync);
1024 /* the following function is replaced by the usage of the heights module */
1027 * Walker for dependent_on().
1028 * This function searches a node tgt recursively from a given node
1029 * but is restricted to the given block.
1030 * @return 1 if tgt was reachable from curr, 0 if not.
1032 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
1036 if (get_nodes_block(curr) != bl)
1042 /* Phi functions stop the recursion inside a basic block */
1043 if (! is_Phi(curr)) {
1044 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
1045 if (check_dependence(get_irn_n(curr, i), tgt, bl))
1055 * Check if a node is somehow data dependent on another one.
1056 * both nodes must be in the same basic block.
1057 * @param n1 The first node.
1058 * @param n2 The second node.
1059 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1061 static int dependent_on(ir_node *n1, ir_node *n2)
1063 assert(get_nodes_block(n1) == get_nodes_block(n2));
1065 return heights_reachable_in_block(ir_heights, n1, n2);
1068 static int cmp_call_dependency(const void *c1, const void *c2)
1070 ir_node *n1 = *(ir_node **) c1;
1071 ir_node *n2 = *(ir_node **) c2;
1074 Classical qsort() comparison function behavior:
1075 0 if both elements are equal
1076 1 if second is "smaller" that first
1077 -1 if first is "smaller" that second
1079 if (dependent_on(n1, n2))
1082 if (dependent_on(n2, n1))
1089 * Walker: links all Call/alloc/Free nodes to the Block they are contained.
1091 static void link_calls_in_block_walker(ir_node *irn, void *data)
1093 ir_opcode code = get_irn_opcode(irn);
1095 if (code == iro_Call ||
1096 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1097 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1098 be_abi_irg_t *env = data;
1099 ir_node *bl = get_nodes_block(irn);
1100 void *save = get_irn_link(bl);
1102 if (code == iro_Call)
1103 env->call->flags.bits.irg_is_leaf = 0;
1105 set_irn_link(irn, save);
1106 set_irn_link(bl, irn);
1112 * Process all Call nodes inside a basic block.
1113 * Note that the link field of the block must contain a linked list of all
1114 * Call nodes inside the Block. We first order this list according to data dependency
1115 * and that connect the calls together.
1117 static void process_calls_in_block(ir_node *bl, void *data)
1119 be_abi_irg_t *env = data;
1120 ir_node *curr_sp = env->init_sp;
1124 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
1125 obstack_ptr_grow(&env->obst, irn);
1127 /* If there were call nodes in the block. */
1133 nodes = obstack_finish(&env->obst);
1135 /* order the call nodes according to data dependency */
1136 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
1138 for(i = n - 1; i >= 0; --i) {
1139 ir_node *irn = nodes[i];
1141 DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1142 switch(get_irn_opcode(irn)) {
1144 curr_sp = adjust_call(env, irn, curr_sp);
1147 curr_sp = adjust_alloc(env, irn, curr_sp);
1150 curr_sp = adjust_free(env, irn, curr_sp);
1153 panic("invalid call");
1158 obstack_free(&env->obst, nodes);
1160 /* Keep the last stack state in the block by tying it to Keep node,
1161 * the proj from calls is already kept */
1162 if(curr_sp != env->init_sp
1163 && !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1165 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl),
1167 pmap_insert(env->keep_map, bl, keep);
1171 set_irn_link(bl, curr_sp);
1172 } /* process_calls_in_block */
1175 * Adjust all call nodes in the graph to the ABI conventions.
1177 static void process_calls(be_abi_irg_t *env)
1179 ir_graph *irg = env->birg->irg;
1181 env->call->flags.bits.irg_is_leaf = 1;
1182 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
1184 ir_heights = heights_new(env->birg->irg);
1185 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
1186 heights_free(ir_heights);
1190 * Computes the stack argument layout type.
1191 * Changes a possibly allocated value param type by moving
1192 * entities to the stack layout type.
1194 * @param env the ABI environment
1195 * @param call the current call ABI
1196 * @param method_type the method type
1197 * @param param_map an array mapping method arguments to the stack layout type
1199 * @return the stack argument layout type
1201 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map)
1203 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1204 int inc = env->birg->main_env->arch_env.isa->stack_dir * dir;
1205 int n = get_method_n_params(method_type);
1206 int curr = inc > 0 ? 0 : n - 1;
1212 ir_type *val_param_tp = get_method_value_param_type(method_type);
1213 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1216 *param_map = map = obstack_alloc(&env->obst, n * sizeof(ir_entity *));
1217 res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
1218 for (i = 0; i < n; ++i, curr += inc) {
1219 ir_type *param_type = get_method_param_type(method_type, curr);
1220 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1223 if (arg->on_stack) {
1225 /* the entity was already created, move it to the param type */
1226 arg->stack_ent = get_method_value_param_ent(method_type, i);
1227 remove_struct_member(val_param_tp, arg->stack_ent);
1228 set_entity_owner(arg->stack_ent, res);
1229 add_struct_member(res, arg->stack_ent);
1230 /* must be automatic to set a fixed layout */
1231 set_entity_allocation(arg->stack_ent, allocation_automatic);
1234 snprintf(buf, sizeof(buf), "param_%d", i);
1235 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1237 ofs += arg->space_before;
1238 ofs = round_up2(ofs, arg->alignment);
1239 set_entity_offset(arg->stack_ent, ofs);
1240 ofs += arg->space_after;
1241 ofs += get_type_size_bytes(param_type);
1242 map[i] = arg->stack_ent;
1245 set_type_size_bytes(res, ofs);
1246 set_type_state(res, layout_fixed);
1251 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1254 struct obstack obst;
1256 obstack_init(&obst);
1258 /* Create a Perm after the RegParams node to delimit it. */
1259 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1260 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1265 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1266 const arch_register_t *reg = &cls->regs[j];
1267 ir_node *irn = pmap_get(regs, (void *) reg);
1269 if(irn && !arch_register_type_is(reg, ignore)) {
1271 obstack_ptr_grow(&obst, irn);
1272 set_irn_link(irn, (void *) reg);
1276 obstack_ptr_grow(&obst, NULL);
1277 in = obstack_finish(&obst);
1279 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1280 for(j = 0; j < n_regs; ++j) {
1281 ir_node *arg = in[j];
1282 arch_register_t *reg = get_irn_link(arg);
1283 pmap_insert(regs, reg, arg);
1284 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1287 obstack_free(&obst, in);
1290 obstack_free(&obst, NULL);
1295 const arch_register_t *reg;
1299 static int cmp_regs(const void *a, const void *b)
1301 const reg_node_map_t *p = a;
1302 const reg_node_map_t *q = b;
1304 if(p->reg->reg_class == q->reg->reg_class)
1305 return p->reg->index - q->reg->index;
1307 return p->reg->reg_class - q->reg->reg_class;
1310 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1313 int n = pmap_count(reg_map);
1315 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1317 foreach_pmap(reg_map, ent) {
1318 res[i].reg = ent->key;
1319 res[i].irn = ent->value;
1323 qsort(res, n, sizeof(res[0]), cmp_regs);
1328 * Creates a barrier.
1330 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1332 ir_graph *irg = env->birg->irg;
1333 int n_regs = pmap_count(regs);
1339 rm = reg_map_to_arr(&env->obst, regs);
1341 for(n = 0; n < n_regs; ++n)
1342 obstack_ptr_grow(&env->obst, rm[n].irn);
1345 obstack_ptr_grow(&env->obst, *mem);
1349 in = (ir_node **) obstack_finish(&env->obst);
1350 irn = be_new_Barrier(irg, bl, n, in);
1351 obstack_free(&env->obst, in);
1353 for(n = 0; n < n_regs; ++n) {
1354 const arch_register_t *reg = rm[n].reg;
1356 int pos = BE_OUT_POS(n);
1359 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1360 be_node_set_reg_class(irn, n, reg->reg_class);
1362 be_set_constr_single_reg(irn, n, reg);
1363 be_set_constr_single_reg(irn, pos, reg);
1364 be_node_set_reg_class(irn, pos, reg->reg_class);
1365 arch_set_irn_register(&env->birg->main_env->arch_env, proj, reg);
1367 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1368 if(arch_register_type_is(reg, ignore) || arch_irn_is(&env->birg->main_env->arch_env, in[n], ignore))
1369 flags |= arch_irn_flags_ignore;
1371 if(arch_irn_is(&env->birg->main_env->arch_env, in[n], modify_sp))
1372 flags |= arch_irn_flags_modify_sp;
1374 be_node_set_flags(irn, pos, flags);
1376 pmap_insert(regs, (void *) reg, proj);
1380 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1383 obstack_free(&env->obst, rm);
1388 * Creates a be_Return for a Return node.
1390 * @param @env the abi environment
1391 * @param irn the Return node or NULL if there was none
1392 * @param bl the block where the be_Retun should be placed
1393 * @param mem the current memory
1394 * @param n_res number of return results
1396 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1397 ir_node *mem, int n_res)
1399 be_abi_call_t *call = env->call;
1400 const arch_isa_t *isa = env->birg->main_env->arch_env.isa;
1402 pmap *reg_map = pmap_create();
1403 ir_node *keep = pmap_get(env->keep_map, bl);
1410 const arch_register_t **regs;
1414 get the valid stack node in this block.
1415 If we had a call in that block there is a Keep constructed by process_calls()
1416 which points to the last stack modification in that block. we'll use
1417 it then. Else we use the stack from the start block and let
1418 the ssa construction fix the usage.
1420 stack = be_abi_reg_map_get(env->regs, isa->sp);
1422 stack = get_irn_n(keep, 0);
1424 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1427 /* Insert results for Return into the register map. */
1428 for(i = 0; i < n_res; ++i) {
1429 ir_node *res = get_Return_res(irn, i);
1430 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1431 assert(arg->in_reg && "return value must be passed in register");
1432 pmap_insert(reg_map, (void *) arg->reg, res);
1435 /* Add uses of the callee save registers. */
1436 foreach_pmap(env->regs, ent) {
1437 const arch_register_t *reg = ent->key;
1438 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1439 pmap_insert(reg_map, ent->key, ent->value);
1442 be_abi_reg_map_set(reg_map, isa->sp, stack);
1444 /* Make the Epilogue node and call the arch's epilogue maker. */
1445 create_barrier(env, bl, &mem, reg_map, 1);
1446 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1449 Maximum size of the in array for Return nodes is
1450 return args + callee save/ignore registers + memory + stack pointer
1452 in_max = pmap_count(reg_map) + n_res + 2;
1454 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1455 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1458 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1463 /* clear SP entry, since it has already been grown. */
1464 pmap_insert(reg_map, (void *) isa->sp, NULL);
1465 for(i = 0; i < n_res; ++i) {
1466 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1468 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1469 regs[n++] = arg->reg;
1471 /* Clear the map entry to mark the register as processed. */
1472 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1475 /* grow the rest of the stuff. */
1476 foreach_pmap(reg_map, ent) {
1479 regs[n++] = ent->key;
1483 /* The in array for the new back end return is now ready. */
1485 dbgi = get_irn_dbg_info(irn);
1489 /* we have to pop the shadow parameter in in case of struct returns */
1491 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1493 /* Set the register classes of the return's parameter accordingly. */
1494 for(i = 0; i < n; ++i)
1496 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1498 /* Free the space of the Epilog's in array and the register <-> proj map. */
1499 obstack_free(&env->obst, in);
1500 pmap_destroy(reg_map);
1505 typedef struct lower_frame_sels_env_t {
1507 ir_entity *value_param_list; /**< the list of all value param entities */
1508 ir_entity *value_param_tail; /**< the tail of the list of all value param entities */
1509 } lower_frame_sels_env_t;
1512 * Walker: Replaces Sels of frame type and
1513 * value param type entities by FrameAddress.
1514 * Links all used entities.
1516 static void lower_frame_sels_walker(ir_node *irn, void *data) {
1517 lower_frame_sels_env_t *ctx = data;
1520 ir_graph *irg = current_ir_graph;
1521 ir_node *frame = get_irg_frame(irg);
1522 ir_node *param_base = get_irg_value_param_base(irg);
1523 ir_node *ptr = get_Sel_ptr(irn);
1525 if (ptr == frame || ptr == param_base) {
1526 be_abi_irg_t *env = ctx->env;
1527 ir_entity *ent = get_Sel_entity(irn);
1528 ir_node *bl = get_nodes_block(irn);
1531 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
1534 /* check, if it's a param sel and if have not seen this entity before */
1535 if (ptr == param_base &&
1536 ent != ctx->value_param_tail &&
1537 get_entity_link(ent) == NULL) {
1538 set_entity_link(ent, ctx->value_param_list);
1539 ctx->value_param_list = ent;
1540 if (ctx->value_param_tail == NULL) ctx->value_param_tail = ent;
1547 * Check if a value parameter is transmitted as a register.
1548 * This might happen if the address of an parameter is taken which is
1549 * transmitted in registers.
1551 * Note that on some architectures this case must be handled specially
1552 * because the place of the backing store is determined by their ABI.
1554 * In the default case we move the entity to the frame type and create
1555 * a backing store into the first block.
1557 static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_entity *value_param_list) {
1558 be_abi_call_t *call = env->call;
1559 ir_graph *irg = env->birg->irg;
1560 ir_entity *ent, *next_ent, *new_list;
1562 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1565 for (ent = value_param_list; ent; ent = next_ent) {
1566 int i = get_struct_member_index(get_entity_owner(ent), ent);
1567 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1569 next_ent = get_entity_link(ent);
1571 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
1572 set_entity_link(ent, new_list);
1577 /* ok, change the graph */
1578 ir_node *start_bl = get_irg_start_block(irg);
1579 ir_node *first_bl = NULL;
1580 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1581 const ir_edge_t *edge;
1582 optimization_state_t state;
1585 foreach_block_succ(start_bl, edge) {
1586 ir_node *succ = get_edge_src_irn(edge);
1587 if (start_bl != succ) {
1593 /* we had already removed critical edges, so the following
1594 assertion should be always true. */
1595 assert(get_Block_n_cfgpreds(first_bl) == 1);
1597 /* now create backing stores */
1598 frame = get_irg_frame(irg);
1599 imem = get_irg_initial_mem(irg);
1601 save_optimization_state(&state);
1603 nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
1604 restore_optimization_state(&state);
1606 /* reroute all edges to the new memory source */
1607 edges_reroute(imem, nmem, irg);
1611 args = get_irg_args(irg);
1612 args_bl = get_nodes_block(args);
1613 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1614 int i = get_struct_member_index(get_entity_owner(ent), ent);
1615 ir_type *tp = get_entity_type(ent);
1616 ir_mode *mode = get_type_mode(tp);
1619 /* address for the backing store */
1620 addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
1623 mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
1625 /* the backing store itself */
1626 store = new_r_Store(irg, first_bl, mem, addr,
1627 new_r_Proj(irg, args_bl, args, mode, i));
1629 /* the new memory Proj gets the last Proj from store */
1630 set_Proj_pred(nmem, store);
1631 set_Proj_proj(nmem, pn_Store_M);
1633 /* move all entities to the frame type */
1634 frame_tp = get_irg_frame_type(irg);
1635 offset = get_type_size_bytes(frame_tp);
1637 /* we will add new entities: set the layout to undefined */
1638 assert(get_type_state(frame_tp) == layout_fixed);
1639 set_type_state(frame_tp, layout_undefined);
1640 for (ent = new_list; ent; ent = get_entity_link(ent)) {
1641 ir_type *tp = get_entity_type(ent);
1642 unsigned align = get_type_alignment_bytes(tp);
1644 offset += align - 1;
1645 offset &= ~(align - 1);
1646 set_entity_owner(ent, frame_tp);
1647 add_class_member(frame_tp, ent);
1648 /* must be automatic to set a fixed layout */
1649 set_entity_allocation(ent, allocation_automatic);
1650 set_entity_offset(ent, offset);
1651 offset += get_type_size_bytes(tp);
1653 set_type_size_bytes(frame_tp, offset);
1654 /* fix the layout again */
1655 set_type_state(frame_tp, layout_fixed);
1661 * The start block has no jump, instead it has an initial exec Proj.
1662 * The backend wants to handle all blocks the same way, so we replace
1663 * the out cfg edge with a real jump.
1665 static void fix_start_block(ir_node *block, void *env) {
1668 ir_node *start_block;
1671 /* we processed the start block, return */
1675 irg = get_irn_irg(block);
1676 start_block = get_irg_start_block(irg);
1678 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
1679 ir_node *pred = get_Block_cfgpred(block, i);
1680 ir_node *pred_block = get_nodes_block(pred);
1682 /* ok, we are in the block, having start as cfg predecessor */
1683 if (pred_block == start_block) {
1684 ir_node *jump = new_r_Jmp(irg, pred_block);
1685 set_Block_cfgpred(block, i, jump);
1693 * Modify the irg itself and the frame type.
1695 static void modify_irg(be_abi_irg_t *env)
1697 be_abi_call_t *call = env->call;
1698 const arch_isa_t *isa = env->birg->main_env->arch_env.isa;
1699 const arch_register_t *sp = arch_isa_sp(isa);
1700 ir_graph *irg = env->birg->irg;
1701 ir_node *bl = get_irg_start_block(irg);
1702 ir_node *end = get_irg_end_block(irg);
1703 ir_node *old_mem = get_irg_initial_mem(irg);
1704 ir_node *new_mem_proj;
1706 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1707 pset *dont_save = pset_new_ptr(8);
1714 const arch_register_t *fp_reg;
1715 ir_node *frame_pointer;
1716 ir_node *reg_params_bl;
1719 ir_node *value_param_base;
1720 const ir_edge_t *edge;
1721 ir_type *arg_type, *bet_type, *tp;
1722 lower_frame_sels_env_t ctx;
1723 ir_entity **param_map;
1725 bitset_t *used_proj_nr;
1726 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1728 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1730 /* set the links of all frame entities to NULL, we use it
1731 to detect if an entity is already linked in the value_param_list */
1732 tp = get_method_value_param_type(method_type);
1734 for (i = get_struct_n_members(tp) - 1; i >= 0; --i)
1735 set_entity_link(get_struct_member(tp, i), NULL);
1738 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1740 ctx.value_param_list = NULL;
1741 ctx.value_param_tail = NULL;
1742 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1744 /* value_param_base anchor is not needed anymore now */
1745 value_param_base = get_irg_value_param_base(irg);
1746 be_kill_node(value_param_base);
1747 set_irg_value_param_base(irg, new_r_Bad(irg));
1749 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1750 env->regs = pmap_create();
1752 used_proj_nr = bitset_alloca(1024);
1753 n_params = get_method_n_params(method_type);
1754 args = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
1755 memset(args, 0, n_params * sizeof(args[0]));
1757 /* Check if a value parameter is transmitted as a register.
1758 * This might happen if the address of an parameter is taken which is
1759 * transmitted in registers.
1761 * Note that on some architectures this case must be handled specially
1762 * because the place of the backing store is determined by their ABI.
1764 * In the default case we move the entity to the frame type and create
1765 * a backing store into the first block.
1767 fix_address_of_parameter_access(env, ctx.value_param_list);
1769 /* Fill the argument vector */
1770 arg_tuple = get_irg_args(irg);
1771 foreach_out_edge(arg_tuple, edge) {
1772 ir_node *irn = get_edge_src_irn(edge);
1773 if (! is_Anchor(irn)) {
1774 int nr = get_Proj_proj(irn);
1776 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1780 arg_type = compute_arg_type(env, call, method_type, ¶m_map);
1781 bet_type = call->cb->get_between_type(env->cb);
1782 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map);
1784 /* Count the register params and add them to the number of Projs for the RegParams node */
1785 for(i = 0; i < n_params; ++i) {
1786 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1787 if(arg->in_reg && args[i]) {
1788 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1789 assert(i == get_Proj_proj(args[i]));
1791 /* For now, associate the register with the old Proj from Start representing that argument. */
1792 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1793 bitset_set(used_proj_nr, i);
1794 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1798 /* Collect all callee-save registers */
1799 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1800 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1801 for(j = 0; j < cls->n_regs; ++j) {
1802 const arch_register_t *reg = &cls->regs[j];
1803 if(arch_register_type_is(reg, callee_save) ||
1804 arch_register_type_is(reg, state)) {
1805 pmap_insert(env->regs, (void *) reg, NULL);
1810 pmap_insert(env->regs, (void *) sp, NULL);
1811 pmap_insert(env->regs, (void *) isa->bp, NULL);
1812 reg_params_bl = get_irg_start_block(irg);
1813 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1814 add_irn_dep(env->reg_params, get_irg_start(irg));
1817 * make proj nodes for the callee save registers.
1818 * memorize them, since Return nodes get those as inputs.
1820 * Note, that if a register corresponds to an argument, the regs map contains
1821 * the old Proj from start for that argument.
1824 rm = reg_map_to_arr(&env->obst, env->regs);
1825 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1826 arch_register_t *reg = (void *) rm[i].reg;
1827 ir_mode *mode = reg->reg_class->mode;
1829 int pos = BE_OUT_POS((int) nr);
1835 bitset_set(used_proj_nr, nr);
1836 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1837 pmap_insert(env->regs, (void *) reg, proj);
1838 be_set_constr_single_reg(env->reg_params, pos, reg);
1839 arch_set_irn_register(&env->birg->main_env->arch_env, proj, reg);
1842 * If the register is an ignore register,
1843 * The Proj for that register shall also be ignored during register allocation.
1845 if(arch_register_type_is(reg, ignore))
1846 flags |= arch_irn_flags_ignore;
1849 flags |= arch_irn_flags_modify_sp;
1851 be_node_set_flags(env->reg_params, pos, flags);
1853 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1855 obstack_free(&env->obst, rm);
1857 /* create a new initial memory proj */
1858 assert(is_Proj(old_mem));
1859 new_mem_proj = new_r_Proj(irg, get_nodes_block(old_mem),
1860 new_r_Unknown(irg, mode_T), mode_M,
1861 get_Proj_proj(old_mem));
1864 /* Generate the Prologue */
1865 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1867 /* do the stack allocation BEFORE the barrier, or spill code
1868 might be added before it */
1869 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1870 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1871 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1873 create_barrier(env, bl, &mem, env->regs, 0);
1875 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1876 arch_set_irn_register(&env->birg->main_env->arch_env, env->init_sp, sp);
1878 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1879 set_irg_frame(irg, frame_pointer);
1880 pset_insert_ptr(env->ignore_regs, fp_reg);
1882 /* rewire old mem users to new mem */
1883 set_Proj_pred(new_mem_proj, get_Proj_pred(old_mem));
1884 exchange(old_mem, mem);
1886 set_irg_initial_mem(irg, mem);
1888 /* Now, introduce stack param nodes for all parameters passed on the stack */
1889 for(i = 0; i < n_params; ++i) {
1890 ir_node *arg_proj = args[i];
1891 ir_node *repl = NULL;
1893 if(arg_proj != NULL) {
1894 be_abi_call_arg_t *arg;
1895 ir_type *param_type;
1896 int nr = get_Proj_proj(arg_proj);
1899 nr = MIN(nr, n_params);
1900 arg = get_call_arg(call, 0, nr);
1901 param_type = get_method_param_type(method_type, nr);
1904 repl = pmap_get(env->regs, (void *) arg->reg);
1905 } else if(arg->on_stack) {
1906 ir_node *addr = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1908 /* For atomic parameters which are actually used, we create a Load node. */
1909 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1910 ir_mode *mode = get_type_mode(param_type);
1911 ir_mode *load_mode = arg->load_mode;
1913 ir_node *load = new_r_Load(irg, reg_params_bl, new_NoMem(), addr, load_mode);
1914 set_irn_pinned(load, op_pin_state_floats);
1915 repl = new_r_Proj(irg, reg_params_bl, load, load_mode, pn_Load_res);
1917 if (mode != load_mode) {
1918 repl = new_r_Conv(irg, reg_params_bl, repl, mode);
1921 /* The stack parameter is not primitive (it is a struct or array),
1922 * we thus will create a node representing the parameter's address
1928 assert(repl != NULL);
1930 /* Beware: the mode of the register parameters is always the mode of the register class
1931 which may be wrong. Add Conv's then. */
1932 mode = get_irn_mode(args[i]);
1933 if (mode != get_irn_mode(repl)) {
1934 repl = new_r_Conv(irg, get_irn_n(repl, -1), repl, mode);
1936 exchange(args[i], repl);
1940 /* the arg proj is not needed anymore now and should be only used by the anchor */
1941 assert(get_irn_n_edges(arg_tuple) == 1);
1942 be_kill_node(arg_tuple);
1943 set_irg_args(irg, new_rd_Bad(irg));
1945 /* All Return nodes hang on the End node, so look for them there. */
1946 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1947 ir_node *irn = get_Block_cfgpred(end, i);
1949 if (is_Return(irn)) {
1950 ir_node *blk = get_nodes_block(irn);
1951 ir_node *mem = get_Return_mem(irn);
1952 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
1956 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
1957 the code is dead and will never be executed. */
1959 del_pset(dont_save);
1960 obstack_free(&env->obst, args);
1962 /* handle start block here (place a jump in the block) */
1964 irg_block_walk_graph(irg, fix_start_block, NULL, &i);
1967 /** Fix the state inputs of calls that still hang on unknowns */
1969 void fix_call_state_inputs(be_abi_irg_t *env)
1971 const arch_isa_t *isa = env->isa;
1973 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
1975 /* Collect caller save registers */
1976 n = arch_isa_get_n_reg_class(isa);
1977 for(i = 0; i < n; ++i) {
1979 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1980 for(j = 0; j < cls->n_regs; ++j) {
1981 const arch_register_t *reg = arch_register_for_index(cls, j);
1982 if(arch_register_type_is(reg, state)) {
1983 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
1988 n = ARR_LEN(env->calls);
1989 n_states = ARR_LEN(stateregs);
1990 for(i = 0; i < n; ++i) {
1992 ir_node *call = env->calls[i];
1994 arity = get_irn_arity(call);
1996 /* the state reg inputs are the last n inputs of the calls */
1997 for(s = 0; s < n_states; ++s) {
1998 int inp = arity - n_states + s;
1999 const arch_register_t *reg = stateregs[s];
2000 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2002 set_irn_n(call, inp, regnode);
2008 * Create a trampoline entity for the given method.
2010 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2012 ir_type *type = get_entity_type(method);
2013 ident *old_id = get_entity_ld_ident(method);
2014 ident *id = mangle3("L", old_id, "$stub");
2015 ir_type *parent = be->pic_trampolines_type;
2016 ir_entity *ent = new_entity(parent, old_id, type);
2017 set_entity_ld_ident(ent, id);
2018 set_entity_visibility(ent, visibility_local);
2019 set_entity_variability(ent, variability_uninitialized);
2025 * Returns the trampoline entity for the given method.
2027 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2029 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2030 if (result == NULL) {
2031 result = create_trampoline(env, method);
2032 pmap_insert(env->ent_trampoline_map, method, result);
2039 * Returns non-zero if a given entity can be accessed using a relative address.
2041 static int can_address_relative(ir_entity *entity)
2043 return get_entity_variability(entity) == variability_initialized
2044 || get_entity_visibility(entity) == visibility_local;
2047 /** patches SymConsts to work in position independent code */
2048 static void fix_pic_symconsts(ir_node *node, void *data)
2058 be_abi_irg_t *env = data;
2060 be_main_env_t *be = env->birg->main_env;
2062 arity = get_irn_arity(node);
2063 for (i = 0; i < arity; ++i) {
2064 ir_node *pred = get_irn_n(node, i);
2066 if (!is_SymConst(pred))
2069 entity = get_SymConst_entity(pred);
2070 block = get_nodes_block(pred);
2071 irg = get_irn_irg(pred);
2073 /* calls can jump to relative addresses, so we can directly jump to
2074 the (relatively) known call address or the trampoline */
2075 if (is_Call(node) && i == 1) {
2077 ir_entity *trampoline;
2078 ir_node *trampoline_const;
2080 if (can_address_relative(entity))
2083 dbgi = get_irn_dbg_info(pred);
2084 trampoline = get_trampoline(be, entity);
2085 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code, trampoline, NULL);
2086 set_irn_n(node, i, trampoline_const);
2090 /* everything else is accessed relative to EIP */
2091 mode = get_irn_mode(pred);
2092 unknown = new_r_Unknown(irg, mode);
2093 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2094 add = new_r_Add(irg, block, pic_base, pred, mode);
2096 /* make sure the walker doesn't visit this add again */
2097 mark_irn_visited(add);
2099 /* all ok now for locally constructed stuff */
2100 if (can_address_relative(entity)) {
2101 set_irn_n(node, i, add);
2105 /* we need an extra indirection for global data outside our current
2106 module. The loads are always safe and can therefore float
2107 and need no memory input */
2108 load = new_r_Load(irg, block, new_NoMem(), add, mode);
2109 load_res = new_r_Proj(irg, block, load, mode, pn_Load_res);
2110 set_irn_pinned(load, op_pin_state_floats);
2112 set_irn_n(node, i, load_res);
2116 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2118 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
2119 ir_node *old_frame = get_irg_frame(birg->irg);
2120 ir_graph *irg = birg->irg;
2124 optimization_state_t state;
2125 unsigned *limited_bitset;
2127 be_omit_fp = birg->main_env->options->omit_fp;
2129 obstack_init(&env->obst);
2131 env->isa = birg->main_env->arch_env.isa;
2132 env->method_type = get_entity_type(get_irg_entity(irg));
2133 env->call = be_abi_call_new(env->isa->sp->reg_class);
2134 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
2136 env->ignore_regs = pset_new_ptr_default();
2137 env->keep_map = pmap_create();
2138 env->dce_survivor = new_survive_dce();
2141 env->sp_req.type = arch_register_req_type_limited;
2142 env->sp_req.cls = arch_register_get_class(env->isa->sp);
2143 limited_bitset = rbitset_obstack_alloc(&env->obst, env->sp_req.cls->n_regs);
2144 rbitset_set(limited_bitset, arch_register_get_index(env->isa->sp));
2145 env->sp_req.limited = limited_bitset;
2147 env->sp_cls_req.type = arch_register_req_type_normal;
2148 env->sp_cls_req.cls = arch_register_get_class(env->isa->sp);
2150 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
2151 to another Unknown or the stack pointer gets used */
2152 save_optimization_state(&state);
2154 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
2155 restore_optimization_state(&state);
2156 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
2158 env->calls = NEW_ARR_F(ir_node*, 0);
2160 if (birg->main_env->options->pic) {
2161 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2164 /* Lower all call nodes in the IRG. */
2168 Beware: init backend abi call object after processing calls,
2169 otherwise some information might be not yet available.
2171 env->cb = env->call->cb->init(env->call, &birg->main_env->arch_env, irg);
2173 /* Process the IRG */
2176 /* fix call inputs for state registers */
2177 fix_call_state_inputs(env);
2179 /* We don't need the keep map anymore. */
2180 pmap_destroy(env->keep_map);
2181 env->keep_map = NULL;
2183 /* calls array is not needed anymore */
2184 DEL_ARR_F(env->calls);
2187 /* reroute the stack origin of the calls to the true stack origin. */
2188 exchange(dummy, env->init_sp);
2189 exchange(old_frame, get_irg_frame(irg));
2191 /* Make some important node pointers survive the dead node elimination. */
2192 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2193 foreach_pmap(env->regs, ent) {
2194 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2197 env->call->cb->done(env->cb);
2202 void be_abi_free(be_abi_irg_t *env)
2204 be_abi_call_free(env->call);
2205 free_survive_dce(env->dce_survivor);
2206 del_pset(env->ignore_regs);
2207 pmap_destroy(env->regs);
2208 obstack_free(&env->obst, NULL);
2212 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2214 arch_register_t *reg;
2216 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2217 if(reg->reg_class == cls)
2218 bitset_set(bs, reg->index);
2221 /* Returns the stack layout from a abi environment. */
2222 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
2229 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2230 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2231 | _| | |> < ___) | || (_| | (__| <
2232 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2236 typedef ir_node **node_array;
2238 typedef struct fix_stack_walker_env_t {
2239 node_array sp_nodes;
2240 const arch_env_t *arch_env;
2241 } fix_stack_walker_env_t;
2244 * Walker. Collect all stack modifying nodes.
2246 static void collect_stack_nodes_walker(ir_node *node, void *data)
2248 fix_stack_walker_env_t *env = data;
2250 if (arch_irn_is(env->arch_env, node, modify_sp)) {
2251 assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
2252 ARR_APP1(ir_node*, env->sp_nodes, node);
2256 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2258 be_ssa_construction_env_t senv;
2261 be_irg_t *birg = env->birg;
2262 be_lv_t *lv = be_get_birg_liveness(birg);
2263 fix_stack_walker_env_t walker_env;
2266 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2267 walker_env.arch_env = &birg->main_env->arch_env;
2268 isa = walker_env.arch_env->isa;
2270 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2272 /* nothing to be done if we didn't find any node, in fact we mustn't
2273 * continue, as for endless loops incsp might have had no users and is bad
2276 len = ARR_LEN(walker_env.sp_nodes);
2278 DEL_ARR_F(walker_env.sp_nodes);
2282 be_ssa_construction_init(&senv, birg);
2283 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2284 ARR_LEN(walker_env.sp_nodes));
2285 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2286 ARR_LEN(walker_env.sp_nodes));
2289 len = ARR_LEN(walker_env.sp_nodes);
2290 for(i = 0; i < len; ++i) {
2291 be_liveness_update(lv, walker_env.sp_nodes[i]);
2293 be_ssa_construction_update_liveness_phis(&senv, lv);
2296 phis = be_ssa_construction_get_new_phis(&senv);
2298 /* set register requirements for stack phis */
2299 len = ARR_LEN(phis);
2300 for(i = 0; i < len; ++i) {
2301 ir_node *phi = phis[i];
2302 be_set_phi_reg_req(walker_env.arch_env, phi, &env->sp_req);
2303 be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
2304 arch_set_irn_register(walker_env.arch_env, phi, env->isa->sp);
2306 be_ssa_construction_destroy(&senv);
2308 DEL_ARR_F(walker_env.sp_nodes);
2311 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2313 const arch_env_t *arch_env = &env->birg->main_env->arch_env;
2314 int omit_fp = env->call->flags.bits.try_omit_fp;
2316 int wanted_bias = real_bias;
2318 sched_foreach(bl, irn) {
2322 Check, if the node relates to an entity on the stack frame.
2323 If so, set the true offset (including the bias) for that
2326 ir_entity *ent = arch_get_frame_entity(arch_env, irn);
2328 int bias = omit_fp ? real_bias : 0;
2329 int offset = get_stack_entity_offset(env->frame, ent, bias);
2330 arch_set_frame_offset(arch_env, irn, offset);
2331 DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2332 ent, offset, bias));
2336 * If the node modifies the stack pointer by a constant offset,
2337 * record that in the bias.
2339 ofs = arch_get_sp_bias(arch_env, irn);
2341 if(be_is_IncSP(irn)) {
2342 /* fill in real stack frame size */
2343 if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2344 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2345 ofs = (int) get_type_size_bytes(frame_type);
2346 be_set_IncSP_offset(irn, ofs);
2347 } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2348 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2349 ofs = - (int)get_type_size_bytes(frame_type);
2350 be_set_IncSP_offset(irn, ofs);
2352 if (be_get_IncSP_align(irn)) {
2353 /* patch IncSP to produce an aligned stack pointer */
2354 ir_type *between_type = env->frame->between_type;
2355 int between_size = get_type_size_bytes(between_type);
2356 int alignment = env->isa->stack_alignment;
2357 int delta = (real_bias + ofs + between_size) % env->isa->stack_alignment;
2360 be_set_IncSP_offset(irn, ofs + alignment - delta);
2361 real_bias += alignment - delta;
2364 /* adjust so real_bias corresponds with wanted_bias */
2365 int delta = wanted_bias - real_bias;
2368 be_set_IncSP_offset(irn, ofs + delta);
2379 assert(real_bias == wanted_bias);
2384 * A helper struct for the bias walker.
2387 be_abi_irg_t *env; /**< The ABI irg environment. */
2388 int start_block_bias; /**< The bias at the end of the start block. */
2390 ir_node *start_block; /**< The start block of the current graph. */
2394 * Block-Walker: fix all stack offsets
2396 static void stack_bias_walker(ir_node *bl, void *data)
2398 struct bias_walk *bw = data;
2399 if (bl != bw->start_block) {
2400 process_stack_bias(bw->env, bl, bw->start_block_bias);
2404 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2406 ir_graph *irg = env->birg->irg;
2407 struct bias_walk bw;
2409 stack_frame_compute_initial_offset(env->frame);
2410 // stack_layout_dump(stdout, env->frame);
2412 /* Determine the stack bias at the end of the start block. */
2413 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
2414 bw.between_size = get_type_size_bytes(env->frame->between_type);
2416 /* fix the bias is all other blocks */
2418 bw.start_block = get_irg_start_block(irg);
2419 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2422 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2424 assert(arch_register_type_is(reg, callee_save));
2425 assert(pmap_contains(abi->regs, (void *) reg));
2426 return pmap_get(abi->regs, (void *) reg);
2429 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2431 assert(arch_register_type_is(reg, ignore));
2432 assert(pmap_contains(abi->regs, (void *) reg));
2433 return pmap_get(abi->regs, (void *) reg);
2437 * Returns non-zero if the ABI has omitted the frame pointer in
2438 * the current graph.
2440 int be_abi_omit_fp(const be_abi_irg_t *abi) {
2441 return abi->call->flags.bits.try_omit_fp;