2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
33 #include "irgraph_t.h"
36 #include "iredges_t.h"
39 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 typedef struct _be_abi_call_arg_t {
62 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
64 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
67 const arch_register_t *reg;
70 unsigned alignment; /**< stack alignment */
71 unsigned space_before; /**< allocate space before */
72 unsigned space_after; /**< allocate space after */
75 struct _be_abi_call_t {
76 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
77 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
78 const be_abi_callbacks_t *cb;
79 ir_type *between_type;
81 const arch_register_class_t *cls_addr; /**< register class of the call address */
85 * The ABI information for the current birg.
87 struct _be_abi_irg_t {
88 be_irg_t *birg; /**< The back end IRG. */
90 const arch_env_t *arch_env;
91 survive_dce_t *dce_survivor;
93 be_abi_call_t *call; /**< The ABI call information. */
94 ir_type *method_type; /**< The type of the method of the IRG. */
96 ir_node *init_sp; /**< The node representing the stack pointer
97 at the start of the function. */
99 ir_node *start; /**< The be_Start params node. */
100 pmap *regs; /**< A map of all callee-save and ignore regs to
101 their Projs to the RegParams node. */
103 int start_block_bias; /**< The stack bias at the end of the start block. */
105 void *cb; /**< ABI Callback self pointer. */
107 pmap *keep_map; /**< mapping blocks to keep nodes. */
108 pset *ignore_regs; /**< Additional registers which shall be ignored. */
110 ir_node **calls; /**< flexible array containing all be_Call nodes */
112 arch_register_req_t *sp_req;
114 be_stack_layout_t frame; /**< The stack frame model. */
117 static heights_t *ir_heights;
119 /** Flag: if set, try to omit the frame pointer in all routines. */
120 static int be_omit_fp = 1;
122 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
123 static int be_omit_leaf_fp = 1;
126 _ ____ ___ ____ _ _ _ _
127 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
128 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
129 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
130 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
132 These callbacks are used by the backend to set the parameters
133 for a specific call type.
137 * Set compare function: compares two ABI call object arguments.
139 static int cmp_call_arg(const void *a, const void *b, size_t n)
141 const be_abi_call_arg_t *p = a, *q = b;
143 return !(p->is_res == q->is_res && p->pos == q->pos);
147 * Get an ABI call object argument.
149 * @param call the abi call
150 * @param is_res true for call results, false for call arguments
151 * @param pos position of the argument
153 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
155 be_abi_call_arg_t arg;
158 memset(&arg, 0, sizeof(arg));
162 hash = is_res * 128 + pos;
164 return set_find(call->params, &arg, sizeof(arg), hash);
168 * Set an ABI call object argument.
170 * @param call the abi call
171 * @param is_res true for call results, false for call arguments
172 * @param pos position of the argument
174 static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
176 be_abi_call_arg_t arg;
179 memset(&arg, 0, sizeof(arg));
183 hash = is_res * 128 + pos;
185 return set_insert(call->params, &arg, sizeof(arg), hash);
188 /* Set the flags for a call. */
189 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
195 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
196 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
202 /* Set register class for call address */
203 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
205 call->cls_addr = cls;
209 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
211 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
213 arg->load_mode = load_mode;
214 arg->alignment = alignment;
215 arg->space_before = space_before;
216 arg->space_after = space_after;
217 assert(alignment > 0 && "Alignment must be greater than 0");
220 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
222 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
227 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
229 be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
234 /* Get the flags of a ABI call object. */
235 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
241 * Constructor for a new ABI call object.
243 * @param cls_addr register class of the call address
245 * @return the new ABI call object
247 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
249 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
252 call->params = new_set(cmp_call_arg, 16);
254 call->cls_addr = cls_addr;
256 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
262 * Destructor for an ABI call object.
264 static void be_abi_call_free(be_abi_call_t *call)
266 del_set(call->params);
272 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
273 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
274 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
275 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
278 Handling of the stack frame. It is composed of three types:
279 1) The type of the arguments which are pushed on the stack.
280 2) The "between type" which consists of stuff the call of the
281 function pushes on the stack (like the return address and
282 the old base pointer for ia32).
283 3) The Firm frame type which consists of all local variables
287 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
290 ir_type *t = get_entity_owner(ent);
291 int ofs = get_entity_offset(ent);
295 /* Find the type the entity is contained in. */
296 for (index = 0; index < N_FRAME_TYPES; ++index) {
297 if (frame->order[index] == t)
299 /* Add the size of all the types below the one of the entity to the entity's offset */
300 ofs += get_type_size_bytes(frame->order[index]);
303 /* correct the offset by the initial position of the frame pointer */
304 ofs -= frame->initial_offset;
306 /* correct the offset with the current bias. */
313 * Retrieve the entity with given offset from a frame type.
315 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
319 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
320 ir_entity *ent = get_compound_member(t, i);
321 if (get_entity_offset(ent) == offset)
328 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
330 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
331 ir_entity *ent = search_ent_with_offset(base, 0);
334 frame->initial_offset
335 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
337 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
340 return frame->initial_offset;
344 * Initializes the frame layout from parts
346 * @param frame the stack layout that will be initialized
347 * @param args the stack argument layout type
348 * @param between the between layout type
349 * @param locals the method frame type
350 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
351 * @param param_map an array mapping method argument positions to the stack argument type
353 * @return the initialized stack layout
355 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
356 ir_type *between, ir_type *locals, int stack_dir,
357 ir_entity *param_map[])
359 frame->arg_type = args;
360 frame->between_type = between;
361 frame->frame_type = locals;
362 frame->initial_offset = 0;
363 frame->initial_bias = 0;
364 frame->stack_dir = stack_dir;
365 frame->order[1] = between;
366 frame->param_map = param_map;
369 frame->order[0] = args;
370 frame->order[2] = locals;
373 /* typical decreasing stack: locals have the
374 * lowest addresses, arguments the highest */
375 frame->order[0] = locals;
376 frame->order[2] = args;
382 * Returns non-zero if the call argument at given position
383 * is transfered on the stack.
385 static inline int is_on_stack(be_abi_call_t *call, int pos)
387 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
388 return arg && !arg->in_reg;
398 Adjustment of the calls inside a graph.
403 * Transform a call node into a be_Call node.
405 * @param env The ABI environment for the current irg.
406 * @param irn The call node.
407 * @param curr_sp The stack pointer node to use.
408 * @return The stack pointer after the call.
410 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
412 ir_graph *irg = env->birg->irg;
413 const arch_env_t *arch_env = env->birg->main_env->arch_env;
414 ir_type *call_tp = get_Call_type(irn);
415 ir_node *call_ptr = get_Call_ptr(irn);
416 int n_params = get_method_n_params(call_tp);
417 ir_node *curr_mem = get_Call_mem(irn);
418 ir_node *bl = get_nodes_block(irn);
420 int stack_dir = arch_env->stack_dir;
421 const arch_register_t *sp = arch_env->sp;
422 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
423 ir_mode *mach_mode = sp->reg_class->mode;
424 struct obstack *obst = be_get_birg_obst(irg);
425 int no_alloc = call->flags.bits.frame_is_setup_on_call;
426 int n_res = get_method_n_ress(call_tp);
427 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
429 ir_node *res_proj = NULL;
430 int n_reg_params = 0;
431 int n_stack_params = 0;
434 pset_new_t destroyed_regs, states;
435 pset_new_iterator_t iter;
439 int n_reg_results = 0;
440 const arch_register_t *reg;
441 const ir_edge_t *edge;
443 int *stack_param_idx;
444 int i, n, destroy_all_regs;
447 pset_new_init(&destroyed_regs);
448 pset_new_init(&states);
450 /* Let the isa fill out the abi description for that call node. */
451 arch_env_get_call_abi(arch_env, call_tp, call);
453 /* Insert code to put the stack arguments on the stack. */
454 assert(get_Call_n_params(irn) == n_params);
455 assert(obstack_object_size(obst) == 0);
456 stack_param_idx = ALLOCAN(int, n_params);
457 for (i = 0; i < n_params; ++i) {
458 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
461 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
463 stack_size += round_up2(arg->space_before, arg->alignment);
464 stack_size += round_up2(arg_size, arg->alignment);
465 stack_size += round_up2(arg->space_after, arg->alignment);
467 stack_param_idx[n_stack_params++] = i;
471 /* Collect all arguments which are passed in registers. */
472 reg_param_idxs = ALLOCAN(int, n_params);
473 for (i = 0; i < n_params; ++i) {
474 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
475 if (arg && arg->in_reg) {
476 reg_param_idxs[n_reg_params++] = i;
481 * If the stack is decreasing and we do not want to store sequentially,
482 * or someone else allocated the call frame
483 * we allocate as much space on the stack all parameters need, by
484 * moving the stack pointer along the stack's direction.
486 * Note: we also have to do this for stack_size == 0, because we may have
487 * to adjust stack alignment for the call.
489 if (stack_dir < 0 && !do_seq && !no_alloc) {
490 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
493 dbgi = get_irn_dbg_info(irn);
494 /* If there are some parameters which shall be passed on the stack. */
495 if (n_stack_params > 0) {
497 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
501 * Reverse list of stack parameters if call arguments are from left to right.
502 * We must them reverse again if they are pushed (not stored) and the stack
503 * direction is downwards.
505 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
506 for (i = 0; i < n_stack_params >> 1; ++i) {
507 int other = n_stack_params - i - 1;
508 int tmp = stack_param_idx[i];
509 stack_param_idx[i] = stack_param_idx[other];
510 stack_param_idx[other] = tmp;
514 curr_mem = get_Call_mem(irn);
516 in[n_in++] = curr_mem;
519 for (i = 0; i < n_stack_params; ++i) {
520 int p = stack_param_idx[i];
521 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
522 ir_node *param = get_Call_param(irn, p);
523 ir_node *addr = curr_sp;
525 ir_type *param_type = get_method_param_type(call_tp, p);
526 int param_size = get_type_size_bytes(param_type) + arg->space_after;
529 * If we wanted to build the arguments sequentially,
530 * the stack pointer for the next must be incremented,
531 * and the memory value propagated.
535 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
536 param_size + arg->space_before, 0);
537 add_irn_dep(curr_sp, curr_mem);
539 curr_ofs += arg->space_before;
540 curr_ofs = round_up2(curr_ofs, arg->alignment);
542 /* Make the expression to compute the argument's offset. */
544 ir_mode *constmode = mach_mode;
545 if (mode_is_reference(mach_mode)) {
548 addr = new_r_Const_long(irg, constmode, curr_ofs);
549 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
553 /* Insert a store for primitive arguments. */
554 if (is_atomic_type(param_type)) {
556 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
557 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
558 mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
560 /* Make a mem copy for compound arguments. */
563 assert(mode_is_reference(get_irn_mode(param)));
564 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
565 mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
568 curr_ofs += param_size;
576 /* We need the sync only, if we didn't build the stores sequentially. */
578 if (n_stack_params >= 1) {
579 curr_mem = new_r_Sync(bl, n_in, in);
581 curr_mem = get_Call_mem(irn);
586 /* check for the return_twice property */
587 destroy_all_regs = 0;
588 if (is_SymConst_addr_ent(call_ptr)) {
589 ir_entity *ent = get_SymConst_entity(call_ptr);
591 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
592 destroy_all_regs = 1;
594 ir_type *call_tp = get_Call_type(irn);
596 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
597 destroy_all_regs = 1;
600 /* Put caller save into the destroyed set and state registers in the states set */
601 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
603 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
604 for (j = 0; j < cls->n_regs; ++j) {
605 const arch_register_t *reg = arch_register_for_index(cls, j);
607 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
608 if (! arch_register_type_is(reg, ignore))
609 pset_new_insert(&destroyed_regs, (void *) reg);
611 if (arch_register_type_is(reg, state)) {
612 pset_new_insert(&destroyed_regs, (void*) reg);
613 pset_new_insert(&states, (void*) reg);
618 if (destroy_all_regs) {
619 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
620 pset_new_remove(&destroyed_regs, arch_env->sp);
621 pset_new_remove(&destroyed_regs, arch_env->bp);
624 /* search the largest result proj number */
625 res_projs = ALLOCANZ(ir_node*, n_res);
627 foreach_out_edge(irn, edge) {
628 const ir_edge_t *res_edge;
629 ir_node *irn = get_edge_src_irn(edge);
631 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
634 foreach_out_edge(irn, res_edge) {
636 ir_node *res = get_edge_src_irn(res_edge);
638 assert(is_Proj(res));
640 proj = get_Proj_proj(res);
641 assert(proj < n_res);
642 assert(res_projs[proj] == NULL);
643 res_projs[proj] = res;
649 /** TODO: this is not correct for cases where return values are passed
650 * on the stack, but no known ABI does this currently...
652 n_reg_results = n_res;
654 assert(obstack_object_size(obst) == 0);
656 in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
658 /* make the back end call node and set its register requirements. */
659 for (i = 0; i < n_reg_params; ++i) {
660 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
663 /* add state registers ins */
664 foreach_pset_new(&states, reg, iter) {
665 const arch_register_class_t *cls = arch_register_get_class(reg);
667 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
668 ir_fprintf(stderr, "Adding %+F\n", regnode);
670 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
671 in[n_ins++] = regnode;
673 assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
675 /* ins collected, build the call */
676 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
678 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
679 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
680 n_ins, in, get_Call_type(irn));
681 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
684 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
685 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
686 n_ins, in, get_Call_type(irn));
688 be_Call_set_pop(low_call, call->pop);
690 /* put the call into the list of all calls for later processing */
691 ARR_APP1(ir_node *, env->calls, low_call);
693 /* create new stack pointer */
694 curr_sp = new_r_Proj(bl, low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
695 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
696 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
697 arch_set_irn_register(curr_sp, sp);
699 /* now handle results */
700 for (i = 0; i < n_res; ++i) {
702 ir_node *proj = res_projs[i];
703 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
705 /* returns values on stack not supported yet */
709 shift the proj number to the right, since we will drop the
710 unspeakable Proj_T from the Call. Therefore, all real argument
711 Proj numbers must be increased by pn_be_Call_first_res
713 pn = i + pn_be_Call_first_res;
716 ir_type *res_type = get_method_res_type(call_tp, i);
717 ir_mode *mode = get_type_mode(res_type);
718 proj = new_r_Proj(bl, low_call, mode, pn);
721 set_Proj_pred(proj, low_call);
722 set_Proj_proj(proj, pn);
726 pset_new_remove(&destroyed_regs, arg->reg);
731 Set the register class of the call address to
732 the backend provided class (default: stack pointer class)
734 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
736 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
738 /* Set the register classes and constraints of the Call parameters. */
739 for (i = 0; i < n_reg_params; ++i) {
740 int index = reg_param_idxs[i];
741 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
742 assert(arg->reg != NULL);
744 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
748 /* Set the register constraints of the results. */
749 for (i = 0; i < n_res; ++i) {
750 ir_node *proj = res_projs[i];
751 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
752 int pn = get_Proj_proj(proj);
755 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
756 arch_set_irn_register(proj, arg->reg);
758 exchange(irn, low_call);
760 /* kill the ProjT node */
761 if (res_proj != NULL) {
765 /* Make additional projs for the caller save registers
766 and the Keep node which keeps them alive. */
768 const arch_register_t *reg;
772 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
773 pset_new_iterator_t iter;
776 n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
777 in = ALLOCAN(ir_node *, n_ins);
779 /* also keep the stack pointer */
780 set_irn_link(curr_sp, (void*) sp);
783 foreach_pset_new(&destroyed_regs, reg, iter) {
784 ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj);
786 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
787 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
788 arch_set_irn_register(proj, reg);
790 set_irn_link(proj, (void*) reg);
795 for (i = 0; i < n_reg_results; ++i) {
796 ir_node *proj = res_projs[i];
797 const arch_register_t *reg = arch_get_irn_register(proj);
798 set_irn_link(proj, (void*) reg);
803 /* create the Keep for the caller save registers */
804 keep = be_new_Keep(bl, n, in);
805 for (i = 0; i < n; ++i) {
806 const arch_register_t *reg = get_irn_link(in[i]);
807 be_node_set_reg_class_in(keep, i, reg->reg_class);
811 /* Clean up the stack. */
812 assert(stack_size >= call->pop);
813 stack_size -= call->pop;
815 if (stack_size > 0) {
816 ir_node *mem_proj = NULL;
818 foreach_out_edge(low_call, edge) {
819 ir_node *irn = get_edge_src_irn(edge);
820 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
827 mem_proj = new_r_Proj(bl, low_call, mode_M, pn_be_Call_M_regular);
828 keep_alive(mem_proj);
831 /* Clean up the stack frame or revert alignment fixes if we allocated it */
833 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
836 be_abi_call_free(call);
838 pset_new_destroy(&states);
839 pset_new_destroy(&destroyed_regs);
845 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
847 * @param alignment the minimum stack alignment
848 * @param size the node containing the non-aligned size
849 * @param block the block where new nodes are allocated on
850 * @param dbg debug info for new nodes
852 * @return a node representing the aligned size
854 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
855 ir_node *block, dbg_info *dbg)
857 if (stack_alignment > 1) {
863 assert(is_po2(stack_alignment));
865 mode = get_irn_mode(size);
866 tv = new_tarval_from_long(stack_alignment-1, mode);
867 irg = get_Block_irg(block);
868 mask = new_r_Const(irg, tv);
869 size = new_rd_Add(dbg, block, size, mask, mode);
871 tv = new_tarval_from_long(-(long)stack_alignment, mode);
872 mask = new_r_Const(irg, tv);
873 size = new_rd_And(dbg, block, size, mask, mode);
879 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
881 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
890 const ir_edge_t *edge;
891 ir_node *new_alloc, *size, *addr, *ins[2];
892 unsigned stack_alignment;
894 assert(get_Alloc_where(alloc) == stack_alloc);
896 block = get_nodes_block(alloc);
897 irg = get_Block_irg(block);
900 type = get_Alloc_type(alloc);
902 foreach_out_edge(alloc, edge) {
903 ir_node *irn = get_edge_src_irn(edge);
905 assert(is_Proj(irn));
906 switch (get_Proj_proj(irn)) {
918 /* Beware: currently Alloc nodes without a result might happen,
919 only escape analysis kills them and this phase runs only for object
920 oriented source. We kill the Alloc here. */
921 if (alloc_res == NULL && alloc_mem) {
922 exchange(alloc_mem, get_Alloc_mem(alloc));
926 dbg = get_irn_dbg_info(alloc);
927 size = get_Alloc_size(alloc);
929 /* we might need to multiply the size with the element size */
930 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
931 ir_mode *mode = get_irn_mode(size);
932 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
934 ir_node *cnst = new_rd_Const(dbg, irg, tv);
935 size = new_rd_Mul(dbg, block, size, cnst, mode);
938 /* The stack pointer will be modified in an unknown manner.
939 We cannot omit it. */
940 env->call->flags.bits.try_omit_fp = 0;
942 stack_alignment = 1 << env->arch_env->stack_alignment;
943 size = adjust_alloc_size(stack_alignment, size, block, dbg);
944 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
945 set_irn_dbg_info(new_alloc, dbg);
947 if (alloc_mem != NULL) {
951 addsp_mem = new_r_Proj(block, new_alloc, mode_M, pn_be_AddSP_M);
953 /* We need to sync the output mem of the AddSP with the input mem
954 edge into the alloc node. */
955 ins[0] = get_Alloc_mem(alloc);
957 sync = new_r_Sync(block, 2, ins);
959 exchange(alloc_mem, sync);
962 exchange(alloc, new_alloc);
964 /* fix projnum of alloca res */
965 set_Proj_proj(alloc_res, pn_be_AddSP_res);
968 curr_sp = new_r_Proj(block, new_alloc, get_irn_mode(curr_sp),
976 * The Free is transformed into a back end free node and connected to the stack nodes.
978 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
982 ir_node *subsp, *mem, *res, *size, *sync;
986 unsigned stack_alignment;
989 assert(get_Free_where(free) == stack_alloc);
991 block = get_nodes_block(free);
992 irg = get_irn_irg(block);
993 type = get_Free_type(free);
994 sp_mode = env->arch_env->sp->reg_class->mode;
995 dbg = get_irn_dbg_info(free);
997 /* we might need to multiply the size with the element size */
998 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
999 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1000 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1001 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1005 size = get_Free_size(free);
1008 stack_alignment = 1 << env->arch_env->stack_alignment;
1009 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1011 /* The stack pointer will be modified in an unknown manner.
1012 We cannot omit it. */
1013 env->call->flags.bits.try_omit_fp = 0;
1014 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1015 set_irn_dbg_info(subsp, dbg);
1017 mem = new_r_Proj(block, subsp, mode_M, pn_be_SubSP_M);
1018 res = new_r_Proj(block, subsp, sp_mode, pn_be_SubSP_sp);
1020 /* we need to sync the memory */
1021 in[0] = get_Free_mem(free);
1023 sync = new_r_Sync(block, 2, in);
1025 /* and make the AddSP dependent on the former memory */
1026 add_irn_dep(subsp, get_Free_mem(free));
1029 exchange(free, sync);
1036 * Check if a node is somehow data dependent on another one.
1037 * both nodes must be in the same basic block.
1038 * @param n1 The first node.
1039 * @param n2 The second node.
1040 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1042 static int dependent_on(ir_node *n1, ir_node *n2)
1044 assert(get_nodes_block(n1) == get_nodes_block(n2));
1046 return heights_reachable_in_block(ir_heights, n1, n2);
1049 static int cmp_call_dependency(const void *c1, const void *c2)
1051 ir_node *n1 = *(ir_node **) c1;
1052 ir_node *n2 = *(ir_node **) c2;
1055 Classical qsort() comparison function behavior:
1056 0 if both elements are equal
1057 1 if second is "smaller" that first
1058 -1 if first is "smaller" that second
1060 if (dependent_on(n1, n2))
1063 if (dependent_on(n2, n1))
1066 /* The nodes have no depth order, but we need a total order because qsort()
1068 return get_irn_idx(n1) - get_irn_idx(n2);
1072 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1073 * Clears the irg_is_leaf flag if a Call is detected.
1075 static void link_ops_in_block_walker(ir_node *irn, void *data)
1077 be_abi_irg_t *env = data;
1078 ir_opcode code = get_irn_opcode(irn);
1080 if (code == iro_Call ||
1081 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1082 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1083 ir_node *bl = get_nodes_block(irn);
1084 void *save = get_irn_link(bl);
1086 if (code == iro_Call)
1087 env->call->flags.bits.irg_is_leaf = 0;
1089 set_irn_link(irn, save);
1090 set_irn_link(bl, irn);
1093 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1094 ir_node *param = get_Builtin_param(irn, 0);
1095 tarval *tv = get_Const_tarval(param);
1096 unsigned long value = get_tarval_long(tv);
1097 /* use ebp, so the climbframe algo works... */
1099 env->call->flags.bits.try_omit_fp = 0;
1106 * Process all Call/Alloc/Free nodes inside a basic block.
1107 * Note that the link field of the block must contain a linked list of all
1108 * Call nodes inside the Block. We first order this list according to data dependency
1109 * and that connect the calls together.
1111 static void process_ops_in_block(ir_node *bl, void *data)
1113 be_abi_irg_t *env = data;
1114 ir_node *curr_sp = env->init_sp;
1121 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1125 nodes = ALLOCAN(ir_node*, n_nodes);
1126 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1130 /* If there were call nodes in the block. */
1135 /* order the call nodes according to data dependency */
1136 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1138 for (i = n_nodes - 1; i >= 0; --i) {
1139 ir_node *irn = nodes[i];
1141 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1142 switch (get_irn_opcode(irn)) {
1145 /* The stack pointer will be modified due to a call. */
1146 env->call->flags.bits.try_omit_fp = 0;
1148 curr_sp = adjust_call(env, irn, curr_sp);
1151 if (get_Alloc_where(irn) == stack_alloc)
1152 curr_sp = adjust_alloc(env, irn, curr_sp);
1155 if (get_Free_where(irn) == stack_alloc)
1156 curr_sp = adjust_free(env, irn, curr_sp);
1159 panic("invalid call");
1164 /* Keep the last stack state in the block by tying it to Keep node,
1165 * the proj from calls is already kept */
1166 if (curr_sp != env->init_sp &&
1167 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1169 keep = be_new_Keep(bl, 1, nodes);
1170 pmap_insert(env->keep_map, bl, keep);
1174 set_irn_link(bl, curr_sp);
1178 * Adjust all call nodes in the graph to the ABI conventions.
1180 static void process_calls(be_abi_irg_t *env)
1182 ir_graph *irg = env->birg->irg;
1184 env->call->flags.bits.irg_is_leaf = 1;
1185 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1187 ir_heights = heights_new(env->birg->irg);
1188 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1189 heights_free(ir_heights);
1193 * Computes the stack argument layout type.
1194 * Changes a possibly allocated value param type by moving
1195 * entities to the stack layout type.
1197 * @param env the ABI environment
1198 * @param call the current call ABI
1199 * @param method_type the method type
1200 * @param val_param_tp the value parameter type, will be destroyed
1201 * @param param_map an array mapping method arguments to the stack layout type
1203 * @return the stack argument layout type
1205 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1206 ir_type *method_type, ir_type *val_param_tp,
1207 ir_entity ***param_map)
1209 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1210 int inc = env->birg->main_env->arch_env->stack_dir * dir;
1211 int n = get_method_n_params(method_type);
1212 int curr = inc > 0 ? 0 : n - 1;
1213 struct obstack *obst = be_get_birg_obst(env->irg);
1219 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1222 *param_map = map = OALLOCN(obst, ir_entity*, n);
1223 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1224 for (i = 0; i < n; ++i, curr += inc) {
1225 ir_type *param_type = get_method_param_type(method_type, curr);
1226 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1229 if (arg->on_stack) {
1230 if (val_param_tp != NULL) {
1231 /* the entity was already created, create a copy in the param type */
1232 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1233 arg->stack_ent = copy_entity_own(val_ent, res);
1234 set_entity_link(val_ent, arg->stack_ent);
1235 set_entity_link(arg->stack_ent, NULL);
1236 /* must be automatic to set a fixed layout */
1237 set_entity_allocation(arg->stack_ent, allocation_automatic);
1239 /* create a new entity */
1240 snprintf(buf, sizeof(buf), "param_%d", i);
1241 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1243 ofs += arg->space_before;
1244 ofs = round_up2(ofs, arg->alignment);
1245 set_entity_offset(arg->stack_ent, ofs);
1246 ofs += arg->space_after;
1247 ofs += get_type_size_bytes(param_type);
1248 map[i] = arg->stack_ent;
1251 set_type_size_bytes(res, ofs);
1252 set_type_state(res, layout_fixed);
1257 const arch_register_t *reg;
1261 static int cmp_regs(const void *a, const void *b)
1263 const reg_node_map_t *p = a;
1264 const reg_node_map_t *q = b;
1266 if (p->reg->reg_class == q->reg->reg_class)
1267 return p->reg->index - q->reg->index;
1269 return p->reg->reg_class - q->reg->reg_class;
1272 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1275 int n = pmap_count(reg_map);
1278 foreach_pmap(reg_map, ent) {
1279 res[i].reg = ent->key;
1280 res[i].irn = ent->value;
1284 qsort(res, n, sizeof(res[0]), cmp_regs);
1288 * Creates a barrier.
1290 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1293 int n_regs = pmap_count(regs);
1299 in = ALLOCAN(ir_node*, n_regs+1);
1300 rm = ALLOCAN(reg_node_map_t, n_regs);
1301 reg_map_to_arr(rm, regs);
1302 for (n = 0; n < n_regs; ++n) {
1310 irn = be_new_Barrier(bl, n, in);
1312 for (n = 0; n < n_regs; ++n) {
1313 ir_node *pred = rm[n].irn;
1314 const arch_register_t *reg = rm[n].reg;
1315 arch_register_type_t add_type = 0;
1317 const backend_info_t *info;
1319 /* stupid workaround for now... as not all nodes report register
1321 info = be_get_info(skip_Proj(pred));
1322 if (info != NULL && info->out_infos != NULL) {
1323 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1324 if (ireq->type & arch_register_req_type_ignore)
1325 add_type |= arch_register_req_type_ignore;
1326 if (ireq->type & arch_register_req_type_produces_sp)
1327 add_type |= arch_register_req_type_produces_sp;
1330 proj = new_r_Proj(bl, irn, get_irn_mode(pred), n);
1331 be_node_set_reg_class_in(irn, n, reg->reg_class);
1333 be_set_constr_single_reg_in(irn, n, reg, 0);
1334 be_set_constr_single_reg_out(irn, n, reg, add_type);
1335 arch_set_irn_register(proj, reg);
1337 pmap_insert(regs, (void *) reg, proj);
1341 *mem = new_r_Proj(bl, irn, mode_M, n);
1348 * Creates a be_Return for a Return node.
1350 * @param @env the abi environment
1351 * @param irn the Return node or NULL if there was none
1352 * @param bl the block where the be_Retun should be placed
1353 * @param mem the current memory
1354 * @param n_res number of return results
1356 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1357 ir_node *mem, int n_res)
1359 be_abi_call_t *call = env->call;
1360 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1362 pmap *reg_map = pmap_create();
1363 ir_node *keep = pmap_get(env->keep_map, bl);
1370 const arch_register_t **regs;
1374 get the valid stack node in this block.
1375 If we had a call in that block there is a Keep constructed by process_calls()
1376 which points to the last stack modification in that block. we'll use
1377 it then. Else we use the stack from the start block and let
1378 the ssa construction fix the usage.
1380 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1382 stack = get_irn_n(keep, 0);
1384 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1387 /* Insert results for Return into the register map. */
1388 for (i = 0; i < n_res; ++i) {
1389 ir_node *res = get_Return_res(irn, i);
1390 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1391 assert(arg->in_reg && "return value must be passed in register");
1392 pmap_insert(reg_map, (void *) arg->reg, res);
1395 /* Add uses of the callee save registers. */
1396 foreach_pmap(env->regs, ent) {
1397 const arch_register_t *reg = ent->key;
1398 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1399 pmap_insert(reg_map, ent->key, ent->value);
1402 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1404 /* Make the Epilogue node and call the arch's epilogue maker. */
1405 create_barrier(bl, &mem, reg_map, 1);
1406 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1409 Maximum size of the in array for Return nodes is
1410 return args + callee save/ignore registers + memory + stack pointer
1412 in_max = pmap_count(reg_map) + n_res + 2;
1414 in = ALLOCAN(ir_node*, in_max);
1415 regs = ALLOCAN(arch_register_t const*, in_max);
1418 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1420 regs[1] = arch_env->sp;
1423 /* clear SP entry, since it has already been grown. */
1424 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1425 for (i = 0; i < n_res; ++i) {
1426 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1428 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1429 regs[n++] = arg->reg;
1431 /* Clear the map entry to mark the register as processed. */
1432 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1435 /* grow the rest of the stuff. */
1436 foreach_pmap(reg_map, ent) {
1439 regs[n++] = ent->key;
1443 /* The in array for the new back end return is now ready. */
1445 dbgi = get_irn_dbg_info(irn);
1449 /* we have to pop the shadow parameter in in case of struct returns */
1451 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1453 /* Set the register classes of the return's parameter accordingly. */
1454 for (i = 0; i < n; ++i) {
1455 if (regs[i] == NULL)
1458 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1461 /* Free the space of the Epilog's in array and the register <-> proj map. */
1462 pmap_destroy(reg_map);
1467 typedef struct ent_pos_pair ent_pos_pair;
1468 struct ent_pos_pair {
1469 ir_entity *ent; /**< a value param entity */
1470 int pos; /**< its parameter number */
1471 ent_pos_pair *next; /**< for linking */
1474 typedef struct lower_frame_sels_env_t {
1475 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1476 ir_node *frame; /**< the current frame */
1477 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1478 const arch_register_class_t *link_class; /**< register class of the link pointer */
1479 ir_type *value_tp; /**< the value type if any */
1480 ir_type *frame_tp; /**< the frame type */
1481 int static_link_pos; /**< argument number of the hidden static link */
1482 } lower_frame_sels_env_t;
1485 * Return an entity from the backend for an value param entity.
1487 * @param ent an value param type entity
1488 * @param ctx context
1490 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1492 ir_entity *argument_ent = get_entity_link(ent);
1494 if (argument_ent == NULL) {
1495 /* we have NO argument entity yet: This is bad, as we will
1496 * need one for backing store.
1499 ir_type *frame_tp = ctx->frame_tp;
1500 unsigned offset = get_type_size_bytes(frame_tp);
1501 ir_type *tp = get_entity_type(ent);
1502 unsigned align = get_type_alignment_bytes(tp);
1504 offset += align - 1;
1505 offset &= ~(align - 1);
1507 argument_ent = copy_entity_own(ent, frame_tp);
1509 /* must be automatic to set a fixed layout */
1510 set_entity_allocation(argument_ent, allocation_automatic);
1511 set_entity_offset(argument_ent, offset);
1512 offset += get_type_size_bytes(tp);
1514 set_type_size_bytes(frame_tp, offset);
1515 set_entity_link(ent, argument_ent);
1517 return argument_ent;
1520 * Walker: Replaces Sels of frame type and
1521 * value param type entities by FrameAddress.
1522 * Links all used entities.
1524 static void lower_frame_sels_walker(ir_node *irn, void *data)
1526 lower_frame_sels_env_t *ctx = data;
1529 ir_node *ptr = get_Sel_ptr(irn);
1531 if (ptr == ctx->frame) {
1532 ir_entity *ent = get_Sel_entity(irn);
1533 ir_node *bl = get_nodes_block(irn);
1536 int is_value_param = 0;
1538 if (get_entity_owner(ent) == ctx->value_tp) {
1541 /* replace by its copy from the argument type */
1542 pos = get_struct_member_index(ctx->value_tp, ent);
1543 ent = get_argument_entity(ent, ctx);
1546 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1549 /* check, if it's a param Sel and if have not seen this entity before */
1550 if (is_value_param && get_entity_link(ent) == NULL) {
1556 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1558 set_entity_link(ent, ctx->value_param_list);
1565 * Check if a value parameter is transmitted as a register.
1566 * This might happen if the address of an parameter is taken which is
1567 * transmitted in registers.
1569 * Note that on some architectures this case must be handled specially
1570 * because the place of the backing store is determined by their ABI.
1572 * In the default case we move the entity to the frame type and create
1573 * a backing store into the first block.
1575 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1577 be_abi_call_t *call = env->call;
1578 ir_graph *irg = env->birg->irg;
1579 ent_pos_pair *entry, *new_list;
1581 int i, n = ARR_LEN(value_param_list);
1584 for (i = 0; i < n; ++i) {
1585 int pos = value_param_list[i].pos;
1586 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
1589 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1590 value_param_list[i].next = new_list;
1591 new_list = &value_param_list[i];
1594 if (new_list != NULL) {
1595 /* ok, change the graph */
1596 ir_node *start_bl = get_irg_start_block(irg);
1597 ir_node *first_bl = NULL;
1598 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1599 const ir_edge_t *edge;
1600 optimization_state_t state;
1603 foreach_block_succ(start_bl, edge) {
1604 first_bl = get_edge_src_irn(edge);
1607 assert(first_bl && first_bl != start_bl);
1608 /* we had already removed critical edges, so the following
1609 assertion should be always true. */
1610 assert(get_Block_n_cfgpreds(first_bl) == 1);
1612 /* now create backing stores */
1613 frame = get_irg_frame(irg);
1614 imem = get_irg_initial_mem(irg);
1616 save_optimization_state(&state);
1618 nmem = new_r_Proj(start_bl, get_irg_start(irg), mode_M, pn_Start_M);
1619 restore_optimization_state(&state);
1621 /* reroute all edges to the new memory source */
1622 edges_reroute(imem, nmem, irg);
1626 args = get_irg_args(irg);
1627 args_bl = get_nodes_block(args);
1628 for (entry = new_list; entry != NULL; entry = entry->next) {
1630 ir_type *tp = get_entity_type(entry->ent);
1631 ir_mode *mode = get_type_mode(tp);
1634 /* address for the backing store */
1635 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1638 mem = new_r_Proj(first_bl, store, mode_M, pn_Store_M);
1640 /* the backing store itself */
1641 store = new_r_Store(first_bl, mem, addr,
1642 new_r_Proj(args_bl, args, mode, i), 0);
1644 /* the new memory Proj gets the last Proj from store */
1645 set_Proj_pred(nmem, store);
1646 set_Proj_proj(nmem, pn_Store_M);
1648 /* move all entities to the frame type */
1649 frame_tp = get_irg_frame_type(irg);
1650 offset = get_type_size_bytes(frame_tp);
1652 /* we will add new entities: set the layout to undefined */
1653 assert(get_type_state(frame_tp) == layout_fixed);
1654 set_type_state(frame_tp, layout_undefined);
1655 for (entry = new_list; entry != NULL; entry = entry->next) {
1656 ir_entity *ent = entry->ent;
1658 /* If the entity is still on the argument type, move it to the frame type.
1659 This happens if the value_param type was build due to compound
1661 if (get_entity_owner(ent) != frame_tp) {
1662 ir_type *tp = get_entity_type(ent);
1663 unsigned align = get_type_alignment_bytes(tp);
1665 offset += align - 1;
1666 offset &= ~(align - 1);
1667 set_entity_owner(ent, frame_tp);
1668 add_class_member(frame_tp, ent);
1669 /* must be automatic to set a fixed layout */
1670 set_entity_allocation(ent, allocation_automatic);
1671 set_entity_offset(ent, offset);
1672 offset += get_type_size_bytes(tp);
1675 set_type_size_bytes(frame_tp, offset);
1676 /* fix the layout again */
1677 set_type_state(frame_tp, layout_fixed);
1682 * The start block has no jump, instead it has an initial exec Proj.
1683 * The backend wants to handle all blocks the same way, so we replace
1684 * the out cfg edge with a real jump.
1686 static void fix_start_block(ir_graph *irg)
1688 ir_node *initial_X = get_irg_initial_exec(irg);
1689 ir_node *start_block = get_irg_start_block(irg);
1690 const ir_edge_t *edge;
1692 assert(is_Proj(initial_X));
1694 foreach_out_edge(initial_X, edge) {
1695 ir_node *block = get_edge_src_irn(edge);
1697 if (is_Anchor(block))
1699 if (block != start_block) {
1700 ir_node *jmp = new_r_Jmp(start_block);
1701 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1702 set_irg_initial_exec(irg, jmp);
1706 panic("Initial exec has no follow block in %+F", irg);
1710 * Update the entity of Sels to the outer value parameters.
1712 static void update_outer_frame_sels(ir_node *irn, void *env) {
1713 lower_frame_sels_env_t *ctx = env;
1720 ptr = get_Sel_ptr(irn);
1721 if (! is_arg_Proj(ptr))
1723 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1725 ent = get_Sel_entity(irn);
1727 if (get_entity_owner(ent) == ctx->value_tp) {
1728 /* replace by its copy from the argument type */
1729 pos = get_struct_member_index(ctx->value_tp, ent);
1730 ent = get_argument_entity(ent, ctx);
1731 set_Sel_entity(irn, ent);
1733 /* check, if we have not seen this entity before */
1734 if (get_entity_link(ent) == NULL) {
1740 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1742 set_entity_link(ent, ctx->value_param_list);
1748 * Fix access to outer local variables.
1750 static void fix_outer_variable_access(be_abi_irg_t *env,
1751 lower_frame_sels_env_t *ctx)
1757 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1758 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1760 if (! is_method_entity(ent))
1762 if (get_entity_peculiarity(ent) == peculiarity_description)
1766 * FIXME: find the number of the static link parameter
1767 * for now we assume 0 here
1769 ctx->static_link_pos = 0;
1771 irg = get_entity_irg(ent);
1772 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1777 * Modify the irg itself and the frame type.
1779 static void modify_irg(be_abi_irg_t *env)
1781 be_abi_call_t *call = env->call;
1782 const arch_env_t *arch_env= env->birg->main_env->arch_env;
1783 const arch_register_t *sp = arch_env->sp;
1784 ir_graph *irg = env->birg->irg;
1787 ir_node *new_mem_proj;
1789 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1790 struct obstack *obst = be_get_birg_obst(irg);
1795 unsigned frame_size;
1798 const arch_register_t *fp_reg;
1799 ir_node *frame_pointer;
1803 const ir_edge_t *edge;
1804 ir_type *arg_type, *bet_type, *tp;
1805 lower_frame_sels_env_t ctx;
1806 ir_entity **param_map;
1808 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1810 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1811 * memory, which leads to loops in the DAG. */
1812 old_mem = get_irg_initial_mem(irg);
1814 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1816 /* set the links of all frame entities to NULL, we use it
1817 to detect if an entity is already linked in the value_param_list */
1818 tp = get_method_value_param_type(method_type);
1821 /* clear the links of the clone type, let the
1822 original entities point to its clones */
1823 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1824 ir_entity *mem = get_struct_member(tp, i);
1825 set_entity_link(mem, NULL);
1829 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1831 /* Convert the Sel nodes in the irg to frame addr nodes: */
1832 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1833 ctx.frame = get_irg_frame(irg);
1834 ctx.sp_class = env->arch_env->sp->reg_class;
1835 ctx.link_class = env->arch_env->link_class;
1836 ctx.frame_tp = get_irg_frame_type(irg);
1838 /* we will possible add new entities to the frame: set the layout to undefined */
1839 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1840 set_type_state(ctx.frame_tp, layout_undefined);
1842 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1844 /* fix the frame type layout again */
1845 set_type_state(ctx.frame_tp, layout_fixed);
1846 /* align stackframe to 4 byte */
1847 frame_size = get_type_size_bytes(ctx.frame_tp);
1848 if (frame_size % 4 != 0) {
1849 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1852 env->regs = pmap_create();
1854 n_params = get_method_n_params(method_type);
1855 args = OALLOCNZ(obst, ir_node*, n_params);
1858 * for inner function we must now fix access to outer frame entities.
1860 fix_outer_variable_access(env, &ctx);
1862 /* Check if a value parameter is transmitted as a register.
1863 * This might happen if the address of an parameter is taken which is
1864 * transmitted in registers.
1866 * Note that on some architectures this case must be handled specially
1867 * because the place of the backing store is determined by their ABI.
1869 * In the default case we move the entity to the frame type and create
1870 * a backing store into the first block.
1872 fix_address_of_parameter_access(env, ctx.value_param_list);
1874 DEL_ARR_F(ctx.value_param_list);
1875 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1877 /* Fill the argument vector */
1878 arg_tuple = get_irg_args(irg);
1879 foreach_out_edge(arg_tuple, edge) {
1880 ir_node *irn = get_edge_src_irn(edge);
1881 if (! is_Anchor(irn)) {
1882 int nr = get_Proj_proj(irn);
1884 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1888 bet_type = call->cb->get_between_type(env->cb);
1889 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1891 /* Count the register params and add them to the number of Projs for the RegParams node */
1892 for (i = 0; i < n_params; ++i) {
1893 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1894 if (arg->in_reg && args[i]) {
1895 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1896 assert(i == get_Proj_proj(args[i]));
1898 /* For now, associate the register with the old Proj from Start representing that argument. */
1899 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1900 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1904 /* Collect all callee-save registers */
1905 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1906 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1907 for (j = 0; j < cls->n_regs; ++j) {
1908 const arch_register_t *reg = &cls->regs[j];
1909 if (arch_register_type_is(reg, callee_save) ||
1910 arch_register_type_is(reg, state)) {
1911 pmap_insert(env->regs, (void *) reg, NULL);
1916 /* handle start block here (place a jump in the block) */
1917 fix_start_block(irg);
1919 pmap_insert(env->regs, (void *) sp, NULL);
1920 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1921 start_bl = get_irg_start_block(irg);
1922 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1925 * make proj nodes for the callee save registers.
1926 * memorize them, since Return nodes get those as inputs.
1928 * Note, that if a register corresponds to an argument, the regs map contains
1929 * the old Proj from start for that argument.
1932 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1933 reg_map_to_arr(rm, env->regs);
1934 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1935 arch_register_t *reg = (void *) rm[i].reg;
1936 ir_mode *mode = reg->reg_class->mode;
1938 arch_register_req_type_t add_type = 0;
1942 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1945 proj = new_r_Proj(start_bl, env->start, mode, nr + 1);
1946 pmap_insert(env->regs, (void *) reg, proj);
1947 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1948 arch_set_irn_register(proj, reg);
1950 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1953 /* create a new initial memory proj */
1954 assert(is_Proj(old_mem));
1955 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1956 new_mem_proj = new_r_Proj(start_bl, env->start, mode_M, 0);
1958 set_irg_initial_mem(irg, mem);
1960 /* Generate the Prologue */
1961 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
1963 /* do the stack allocation BEFORE the barrier, or spill code
1964 might be added before it */
1965 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1966 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1967 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1969 create_barrier(start_bl, &mem, env->regs, 0);
1971 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1972 arch_set_irn_register(env->init_sp, sp);
1974 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1975 set_irg_frame(irg, frame_pointer);
1976 pset_insert_ptr(env->ignore_regs, fp_reg);
1978 /* rewire old mem users to new mem */
1979 exchange(old_mem, mem);
1981 /* keep the mem (for functions with an endless loop = no return) */
1984 set_irg_initial_mem(irg, mem);
1986 /* Now, introduce stack param nodes for all parameters passed on the stack */
1987 for (i = 0; i < n_params; ++i) {
1988 ir_node *arg_proj = args[i];
1989 ir_node *repl = NULL;
1991 if (arg_proj != NULL) {
1992 be_abi_call_arg_t *arg;
1993 ir_type *param_type;
1994 int nr = get_Proj_proj(arg_proj);
1997 nr = MIN(nr, n_params);
1998 arg = get_call_arg(call, 0, nr);
1999 param_type = get_method_param_type(method_type, nr);
2002 repl = pmap_get(env->regs, (void *) arg->reg);
2003 } else if (arg->on_stack) {
2004 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2006 /* For atomic parameters which are actually used, we create a Load node. */
2007 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2008 ir_mode *mode = get_type_mode(param_type);
2009 ir_mode *load_mode = arg->load_mode;
2011 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2012 repl = new_r_Proj(start_bl, load, load_mode, pn_Load_res);
2014 if (mode != load_mode) {
2015 repl = new_r_Conv(start_bl, repl, mode);
2018 /* The stack parameter is not primitive (it is a struct or array),
2019 * we thus will create a node representing the parameter's address
2025 assert(repl != NULL);
2027 /* Beware: the mode of the register parameters is always the mode of the register class
2028 which may be wrong. Add Conv's then. */
2029 mode = get_irn_mode(args[i]);
2030 if (mode != get_irn_mode(repl)) {
2031 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2033 exchange(args[i], repl);
2037 /* the arg proj is not needed anymore now and should be only used by the anchor */
2038 assert(get_irn_n_edges(arg_tuple) == 1);
2039 kill_node(arg_tuple);
2040 set_irg_args(irg, new_r_Bad(irg));
2042 /* All Return nodes hang on the End node, so look for them there. */
2043 end = get_irg_end_block(irg);
2044 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2045 ir_node *irn = get_Block_cfgpred(end, i);
2047 if (is_Return(irn)) {
2048 ir_node *blk = get_nodes_block(irn);
2049 ir_node *mem = get_Return_mem(irn);
2050 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2055 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2056 the code is dead and will never be executed. */
2059 /** Fix the state inputs of calls that still hang on unknowns */
2061 void fix_call_state_inputs(be_abi_irg_t *env)
2063 const arch_env_t *arch_env = env->arch_env;
2065 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2067 /* Collect caller save registers */
2068 n = arch_env_get_n_reg_class(arch_env);
2069 for (i = 0; i < n; ++i) {
2071 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2072 for (j = 0; j < cls->n_regs; ++j) {
2073 const arch_register_t *reg = arch_register_for_index(cls, j);
2074 if (arch_register_type_is(reg, state)) {
2075 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2080 n = ARR_LEN(env->calls);
2081 n_states = ARR_LEN(stateregs);
2082 for (i = 0; i < n; ++i) {
2084 ir_node *call = env->calls[i];
2086 arity = get_irn_arity(call);
2088 /* the state reg inputs are the last n inputs of the calls */
2089 for (s = 0; s < n_states; ++s) {
2090 int inp = arity - n_states + s;
2091 const arch_register_t *reg = stateregs[s];
2092 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2094 set_irn_n(call, inp, regnode);
2098 DEL_ARR_F(stateregs);
2102 * Create a trampoline entity for the given method.
2104 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2106 ir_type *type = get_entity_type(method);
2107 ident *old_id = get_entity_ld_ident(method);
2108 ident *id = id_mangle3("L", old_id, "$stub");
2109 ir_type *parent = be->pic_trampolines_type;
2110 ir_entity *ent = new_entity(parent, old_id, type);
2111 set_entity_ld_ident(ent, id);
2112 set_entity_visibility(ent, visibility_local);
2113 set_entity_variability(ent, variability_uninitialized);
2119 * Returns the trampoline entity for the given method.
2121 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2123 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2124 if (result == NULL) {
2125 result = create_trampoline(env, method);
2126 pmap_insert(env->ent_trampoline_map, method, result);
2132 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2134 ident *old_id = get_entity_ld_ident(entity);
2135 ident *id = id_mangle3("L", old_id, "$non_lazy_ptr");
2136 ir_type *e_type = get_entity_type(entity);
2137 ir_type *type = new_type_pointer(id, e_type, mode_P_data);
2138 ir_type *parent = be->pic_symbols_type;
2139 ir_entity *ent = new_entity(parent, old_id, type);
2140 set_entity_ld_ident(ent, id);
2141 set_entity_visibility(ent, visibility_local);
2142 set_entity_variability(ent, variability_uninitialized);
2147 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2149 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2150 if (result == NULL) {
2151 result = create_pic_symbol(env, entity);
2152 pmap_insert(env->ent_pic_symbol_map, entity, result);
2161 * Returns non-zero if a given entity can be accessed using a relative address.
2163 static int can_address_relative(ir_entity *entity)
2165 return get_entity_visibility(entity) != visibility_external_allocated;
2168 /** patches SymConsts to work in position independent code */
2169 static void fix_pic_symconsts(ir_node *node, void *data)
2179 be_abi_irg_t *env = data;
2181 be_main_env_t *be = env->birg->main_env;
2183 arity = get_irn_arity(node);
2184 for (i = 0; i < arity; ++i) {
2186 ir_node *pred = get_irn_n(node, i);
2188 ir_entity *pic_symbol;
2189 ir_node *pic_symconst;
2191 if (!is_SymConst(pred))
2194 entity = get_SymConst_entity(pred);
2195 block = get_nodes_block(pred);
2196 irg = get_irn_irg(pred);
2198 /* calls can jump to relative addresses, so we can directly jump to
2199 the (relatively) known call address or the trampoline */
2200 if (i == 1 && is_Call(node)) {
2201 ir_entity *trampoline;
2202 ir_node *trampoline_const;
2204 if (can_address_relative(entity))
2207 dbgi = get_irn_dbg_info(pred);
2208 trampoline = get_trampoline(be, entity);
2209 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2211 set_irn_n(node, i, trampoline_const);
2215 /* everything else is accessed relative to EIP */
2216 mode = get_irn_mode(pred);
2217 unknown = new_r_Unknown(irg, mode);
2218 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2220 /* all ok now for locally constructed stuff */
2221 if (can_address_relative(entity)) {
2222 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2224 /* make sure the walker doesn't visit this add again */
2225 mark_irn_visited(add);
2226 set_irn_n(node, i, add);
2230 /* get entry from pic symbol segment */
2231 dbgi = get_irn_dbg_info(pred);
2232 pic_symbol = get_pic_symbol(be, entity);
2233 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2235 add = new_r_Add(block, pic_base, pic_symconst, mode);
2236 mark_irn_visited(add);
2238 /* we need an extra indirection for global data outside our current
2239 module. The loads are always safe and can therefore float
2240 and need no memory input */
2241 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2242 load_res = new_r_Proj(block, load, mode, pn_Load_res);
2244 set_irn_n(node, i, load_res);
2248 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2250 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2251 ir_node *old_frame = get_irg_frame(birg->irg);
2252 ir_graph *irg = birg->irg;
2253 struct obstack *obst = be_get_birg_obst(irg);
2257 optimization_state_t state;
2258 unsigned *limited_bitset;
2259 arch_register_req_t *sp_req;
2261 be_omit_fp = birg->main_env->options->omit_fp;
2262 be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
2266 env->arch_env = birg->main_env->arch_env;
2267 env->method_type = get_entity_type(get_irg_entity(irg));
2268 env->call = be_abi_call_new(env->arch_env->sp->reg_class);
2269 arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
2271 env->ignore_regs = pset_new_ptr_default();
2272 env->keep_map = pmap_create();
2273 env->dce_survivor = new_survive_dce();
2277 sp_req = OALLOCZ(obst, arch_register_req_t);
2278 env->sp_req = sp_req;
2280 sp_req->type = arch_register_req_type_limited
2281 | arch_register_req_type_produces_sp;
2282 sp_req->cls = arch_register_get_class(env->arch_env->sp);
2284 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2285 rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
2286 sp_req->limited = limited_bitset;
2287 if (env->arch_env->sp->type & arch_register_type_ignore) {
2288 sp_req->type |= arch_register_req_type_ignore;
2291 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
2292 to another Unknown or the stack pointer gets used */
2293 save_optimization_state(&state);
2295 env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
2296 restore_optimization_state(&state);
2298 env->calls = NEW_ARR_F(ir_node*, 0);
2300 if (birg->main_env->options->pic) {
2301 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2304 /* Lower all call nodes in the IRG. */
2308 Beware: init backend abi call object after processing calls,
2309 otherwise some information might be not yet available.
2311 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2313 /* Process the IRG */
2316 /* fix call inputs for state registers */
2317 fix_call_state_inputs(env);
2319 /* We don't need the keep map anymore. */
2320 pmap_destroy(env->keep_map);
2321 env->keep_map = NULL;
2323 /* calls array is not needed anymore */
2324 DEL_ARR_F(env->calls);
2327 /* reroute the stack origin of the calls to the true stack origin. */
2328 exchange(dummy, env->init_sp);
2329 exchange(old_frame, get_irg_frame(irg));
2331 /* Make some important node pointers survive the dead node elimination. */
2332 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2333 foreach_pmap(env->regs, ent) {
2334 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2337 env->call->cb->done(env->cb);
2342 void be_abi_free(be_abi_irg_t *env)
2344 be_abi_call_free(env->call);
2345 free_survive_dce(env->dce_survivor);
2346 del_pset(env->ignore_regs);
2347 pmap_destroy(env->regs);
2351 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2353 arch_register_t *reg;
2355 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2356 if (reg->reg_class == cls)
2357 bitset_set(bs, reg->index);
2360 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2363 arch_register_t *reg;
2365 for (i = 0; i < cls->n_regs; ++i) {
2366 if (arch_register_type_is(&cls->regs[i], ignore))
2369 rbitset_set(raw_bitset, i);
2372 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2373 reg = pset_next(abi->ignore_regs)) {
2374 if (reg->reg_class != cls)
2377 rbitset_clear(raw_bitset, reg->index);
2381 /* Returns the stack layout from a abi environment. */
2382 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2390 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2391 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2392 | _| | |> < ___) | || (_| | (__| <
2393 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2397 typedef ir_node **node_array;
2399 typedef struct fix_stack_walker_env_t {
2400 node_array sp_nodes;
2401 } fix_stack_walker_env_t;
2404 * Walker. Collect all stack modifying nodes.
2406 static void collect_stack_nodes_walker(ir_node *node, void *data)
2408 ir_node *insn = node;
2409 fix_stack_walker_env_t *env = data;
2410 const arch_register_req_t *req;
2412 if (is_Proj(node)) {
2413 insn = get_Proj_pred(node);
2416 if (arch_irn_get_n_outs(insn) == 0)
2419 req = arch_get_register_req_out(node);
2420 if (! (req->type & arch_register_req_type_produces_sp))
2423 ARR_APP1(ir_node*, env->sp_nodes, node);
2426 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2428 be_ssa_construction_env_t senv;
2431 be_irg_t *birg = env->birg;
2432 be_lv_t *lv = be_get_birg_liveness(birg);
2433 fix_stack_walker_env_t walker_env;
2435 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2437 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2439 /* nothing to be done if we didn't find any node, in fact we mustn't
2440 * continue, as for endless loops incsp might have had no users and is bad
2443 len = ARR_LEN(walker_env.sp_nodes);
2445 DEL_ARR_F(walker_env.sp_nodes);
2449 be_ssa_construction_init(&senv, birg);
2450 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2451 ARR_LEN(walker_env.sp_nodes));
2452 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2453 ARR_LEN(walker_env.sp_nodes));
2456 len = ARR_LEN(walker_env.sp_nodes);
2457 for (i = 0; i < len; ++i) {
2458 be_liveness_update(lv, walker_env.sp_nodes[i]);
2460 be_ssa_construction_update_liveness_phis(&senv, lv);
2463 phis = be_ssa_construction_get_new_phis(&senv);
2465 /* set register requirements for stack phis */
2466 len = ARR_LEN(phis);
2467 for (i = 0; i < len; ++i) {
2468 ir_node *phi = phis[i];
2469 be_set_phi_reg_req(phi, env->sp_req);
2470 arch_set_irn_register(phi, env->arch_env->sp);
2472 be_ssa_construction_destroy(&senv);
2474 DEL_ARR_F(walker_env.sp_nodes);
2478 * Fix all stack accessing operations in the block bl.
2480 * @param env the abi environment
2481 * @param bl the block to process
2482 * @param real_bias the bias value
2484 * @return the bias at the end of this block
2486 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2488 int omit_fp = env->call->flags.bits.try_omit_fp;
2490 int wanted_bias = real_bias;
2492 sched_foreach(bl, irn) {
2496 Check, if the node relates to an entity on the stack frame.
2497 If so, set the true offset (including the bias) for that
2500 ir_entity *ent = arch_get_frame_entity(irn);
2502 int bias = omit_fp ? real_bias : 0;
2503 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2504 arch_set_frame_offset(irn, offset);
2505 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2506 ent, offset, bias));
2510 * If the node modifies the stack pointer by a constant offset,
2511 * record that in the bias.
2513 ofs = arch_get_sp_bias(irn);
2515 if (be_is_IncSP(irn)) {
2516 /* fill in real stack frame size */
2517 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2518 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2519 ofs = (int) get_type_size_bytes(frame_type);
2520 be_set_IncSP_offset(irn, ofs);
2521 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2522 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2523 ofs = - (int)get_type_size_bytes(frame_type);
2524 be_set_IncSP_offset(irn, ofs);
2526 if (be_get_IncSP_align(irn)) {
2527 /* patch IncSP to produce an aligned stack pointer */
2528 ir_type *between_type = env->frame.between_type;
2529 int between_size = get_type_size_bytes(between_type);
2530 int alignment = 1 << env->arch_env->stack_alignment;
2531 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2534 be_set_IncSP_offset(irn, ofs + alignment - delta);
2535 real_bias += alignment - delta;
2538 /* adjust so real_bias corresponds with wanted_bias */
2539 int delta = wanted_bias - real_bias;
2542 be_set_IncSP_offset(irn, ofs + delta);
2553 assert(real_bias == wanted_bias);
2558 * A helper struct for the bias walker.
2561 be_abi_irg_t *env; /**< The ABI irg environment. */
2562 int start_block_bias; /**< The bias at the end of the start block. */
2564 ir_node *start_block; /**< The start block of the current graph. */
2568 * Block-Walker: fix all stack offsets for all blocks
2569 * except the start block
2571 static void stack_bias_walker(ir_node *bl, void *data)
2573 struct bias_walk *bw = data;
2574 if (bl != bw->start_block) {
2575 process_stack_bias(bw->env, bl, bw->start_block_bias);
2580 * Walker: finally lower all Sels of outer frame or parameter
2583 static void lower_outer_frame_sels(ir_node *sel, void *ctx) {
2584 be_abi_irg_t *env = ctx;
2592 ent = get_Sel_entity(sel);
2593 owner = get_entity_owner(ent);
2594 ptr = get_Sel_ptr(sel);
2596 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2597 /* found access to outer frame or arguments */
2598 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2601 ir_node *bl = get_nodes_block(sel);
2602 dbg_info *dbgi = get_irn_dbg_info(sel);
2603 ir_mode *mode = get_irn_mode(sel);
2604 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2605 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2607 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2613 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2615 ir_graph *irg = env->birg->irg;
2618 struct bias_walk bw;
2620 stack_frame_compute_initial_offset(&env->frame);
2621 // stack_layout_dump(stdout, frame);
2623 /* Determine the stack bias at the end of the start block. */
2624 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2625 bw.between_size = get_type_size_bytes(env->frame.between_type);
2627 /* fix the bias is all other blocks */
2629 bw.start_block = get_irg_start_block(irg);
2630 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2632 /* fix now inner functions: these still have Sel node to outer
2633 frame and parameter entities */
2634 frame_tp = get_irg_frame_type(irg);
2635 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2636 ir_entity *ent = get_class_member(frame_tp, i);
2638 if (is_method_entity(ent) && get_entity_peculiarity(ent) != peculiarity_description) {
2639 ir_graph *irg = get_entity_irg(ent);
2641 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2646 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2648 assert(arch_register_type_is(reg, callee_save));
2649 assert(pmap_contains(abi->regs, (void *) reg));
2650 return pmap_get(abi->regs, (void *) reg);
2653 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2655 assert(arch_register_type_is(reg, ignore));
2656 assert(pmap_contains(abi->regs, (void *) reg));
2657 return pmap_get(abi->regs, (void *) reg);
2661 * Returns non-zero if the ABI has omitted the frame pointer in
2662 * the current graph.
2664 int be_abi_omit_fp(const be_abi_irg_t *abi)
2666 return abi->call->flags.bits.try_omit_fp;
2669 void be_init_abi(void)
2671 FIRM_DBG_REGISTER(dbg, "firm.be.abi");
2674 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);