2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
33 #include "irgraph_t.h"
36 #include "iredges_t.h"
39 #include "irprintf_t.h"
46 #include "raw_bitset.h"
57 #include "bessaconstr.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
62 typedef struct _be_abi_call_arg_t {
63 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
64 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
65 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct _be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
80 ir_type *between_type;
82 const arch_register_class_t *cls_addr; /**< register class of the call address */
86 * The ABI information for the current birg.
88 struct _be_abi_irg_t {
89 be_irg_t *birg; /**< The back end IRG. */
91 const arch_env_t *arch_env;
92 survive_dce_t *dce_survivor;
94 be_abi_call_t *call; /**< The ABI call information. */
95 ir_type *method_type; /**< The type of the method of the IRG. */
97 ir_node *init_sp; /**< The node representing the stack pointer
98 at the start of the function. */
100 ir_node *start; /**< The be_Start params node. */
101 pmap *regs; /**< A map of all callee-save and ignore regs to
102 their Projs to the RegParams node. */
104 int start_block_bias; /**< The stack bias at the end of the start block. */
106 void *cb; /**< ABI Callback self pointer. */
108 pmap *keep_map; /**< mapping blocks to keep nodes. */
109 pset *ignore_regs; /**< Additional registers which shall be ignored. */
111 ir_node **calls; /**< flexible array containing all be_Call nodes */
113 arch_register_req_t *sp_req;
115 be_stack_layout_t frame; /**< The stack frame model. */
118 static heights_t *ir_heights;
120 /** Flag: if set, try to omit the frame pointer in all routines. */
121 static int be_omit_fp = 1;
123 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
124 static int be_omit_leaf_fp = 1;
127 _ ____ ___ ____ _ _ _ _
128 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
129 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
130 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
131 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
133 These callbacks are used by the backend to set the parameters
134 for a specific call type.
138 * Set compare function: compares two ABI call object arguments.
140 static int cmp_call_arg(const void *a, const void *b, size_t n)
142 const be_abi_call_arg_t *p = a, *q = b;
144 return !(p->is_res == q->is_res && p->pos == q->pos);
148 * Get an ABI call object argument.
150 * @param call the abi call
151 * @param is_res true for call results, false for call arguments
152 * @param pos position of the argument
154 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
156 be_abi_call_arg_t arg;
159 memset(&arg, 0, sizeof(arg));
163 hash = is_res * 128 + pos;
165 return set_find(call->params, &arg, sizeof(arg), hash);
169 * Set an ABI call object argument.
171 * @param call the abi call
172 * @param is_res true for call results, false for call arguments
173 * @param pos position of the argument
175 static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
177 be_abi_call_arg_t arg;
180 memset(&arg, 0, sizeof(arg));
184 hash = is_res * 128 + pos;
186 return set_insert(call->params, &arg, sizeof(arg), hash);
189 /* Set the flags for a call. */
190 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
196 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
197 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
203 /* Set register class for call address */
204 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
206 call->cls_addr = cls;
210 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
212 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
214 arg->load_mode = load_mode;
215 arg->alignment = alignment;
216 arg->space_before = space_before;
217 arg->space_after = space_after;
218 assert(alignment > 0 && "Alignment must be greater than 0");
221 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
223 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
228 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
230 be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
235 /* Get the flags of a ABI call object. */
236 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
242 * Constructor for a new ABI call object.
244 * @param cls_addr register class of the call address
246 * @return the new ABI call object
248 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
250 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
253 call->params = new_set(cmp_call_arg, 16);
255 call->cls_addr = cls_addr;
257 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
263 * Destructor for an ABI call object.
265 static void be_abi_call_free(be_abi_call_t *call)
267 del_set(call->params);
273 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
274 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
275 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
276 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
279 Handling of the stack frame. It is composed of three types:
280 1) The type of the arguments which are pushed on the stack.
281 2) The "between type" which consists of stuff the call of the
282 function pushes on the stack (like the return address and
283 the old base pointer for ia32).
284 3) The Firm frame type which consists of all local variables
288 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
291 ir_type *t = get_entity_owner(ent);
292 int ofs = get_entity_offset(ent);
296 /* Find the type the entity is contained in. */
297 for (index = 0; index < N_FRAME_TYPES; ++index) {
298 if (frame->order[index] == t)
300 /* Add the size of all the types below the one of the entity to the entity's offset */
301 ofs += get_type_size_bytes(frame->order[index]);
304 /* correct the offset by the initial position of the frame pointer */
305 ofs -= frame->initial_offset;
307 /* correct the offset with the current bias. */
314 * Retrieve the entity with given offset from a frame type.
316 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
320 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
321 ir_entity *ent = get_compound_member(t, i);
322 if (get_entity_offset(ent) == offset)
329 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
331 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
332 ir_entity *ent = search_ent_with_offset(base, 0);
335 frame->initial_offset
336 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
338 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
341 return frame->initial_offset;
345 * Initializes the frame layout from parts
347 * @param frame the stack layout that will be initialized
348 * @param args the stack argument layout type
349 * @param between the between layout type
350 * @param locals the method frame type
351 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
352 * @param param_map an array mapping method argument positions to the stack argument type
354 * @return the initialized stack layout
356 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
357 ir_type *between, ir_type *locals, int stack_dir,
358 ir_entity *param_map[])
360 frame->arg_type = args;
361 frame->between_type = between;
362 frame->frame_type = locals;
363 frame->initial_offset = 0;
364 frame->initial_bias = 0;
365 frame->stack_dir = stack_dir;
366 frame->order[1] = between;
367 frame->param_map = param_map;
370 frame->order[0] = args;
371 frame->order[2] = locals;
374 /* typical decreasing stack: locals have the
375 * lowest addresses, arguments the highest */
376 frame->order[0] = locals;
377 frame->order[2] = args;
383 * Returns non-zero if the call argument at given position
384 * is transfered on the stack.
386 static inline int is_on_stack(be_abi_call_t *call, int pos)
388 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
389 return arg && !arg->in_reg;
399 Adjustment of the calls inside a graph.
404 * Transform a call node into a be_Call node.
406 * @param env The ABI environment for the current irg.
407 * @param irn The call node.
408 * @param curr_sp The stack pointer node to use.
409 * @return The stack pointer after the call.
411 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
413 ir_graph *irg = env->birg->irg;
414 const arch_env_t *arch_env = env->birg->main_env->arch_env;
415 ir_type *call_tp = get_Call_type(irn);
416 ir_node *call_ptr = get_Call_ptr(irn);
417 int n_params = get_method_n_params(call_tp);
418 ir_node *curr_mem = get_Call_mem(irn);
419 ir_node *bl = get_nodes_block(irn);
421 int stack_dir = arch_env->stack_dir;
422 const arch_register_t *sp = arch_env->sp;
423 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
424 ir_mode *mach_mode = sp->reg_class->mode;
425 struct obstack *obst = be_get_birg_obst(irg);
426 int no_alloc = call->flags.bits.frame_is_setup_on_call;
427 int n_res = get_method_n_ress(call_tp);
428 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
430 ir_node *res_proj = NULL;
431 int n_reg_params = 0;
432 int n_stack_params = 0;
435 pset_new_t destroyed_regs, states;
436 pset_new_iterator_t iter;
440 int n_reg_results = 0;
441 const arch_register_t *reg;
442 const ir_edge_t *edge;
444 int *stack_param_idx;
445 int i, n, destroy_all_regs;
448 pset_new_init(&destroyed_regs);
449 pset_new_init(&states);
451 /* Let the isa fill out the abi description for that call node. */
452 arch_env_get_call_abi(arch_env, call_tp, call);
454 /* Insert code to put the stack arguments on the stack. */
455 assert(get_Call_n_params(irn) == n_params);
456 assert(obstack_object_size(obst) == 0);
457 stack_param_idx = ALLOCAN(int, n_params);
458 for (i = 0; i < n_params; ++i) {
459 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
462 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
464 stack_size += round_up2(arg->space_before, arg->alignment);
465 stack_size += round_up2(arg_size, arg->alignment);
466 stack_size += round_up2(arg->space_after, arg->alignment);
468 stack_param_idx[n_stack_params++] = i;
472 /* Collect all arguments which are passed in registers. */
473 reg_param_idxs = ALLOCAN(int, n_params);
474 for (i = 0; i < n_params; ++i) {
475 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
476 if (arg && arg->in_reg) {
477 reg_param_idxs[n_reg_params++] = i;
482 * If the stack is decreasing and we do not want to store sequentially,
483 * or someone else allocated the call frame
484 * we allocate as much space on the stack all parameters need, by
485 * moving the stack pointer along the stack's direction.
487 * Note: we also have to do this for stack_size == 0, because we may have
488 * to adjust stack alignment for the call.
490 if (stack_dir < 0 && !do_seq && !no_alloc) {
491 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
494 dbgi = get_irn_dbg_info(irn);
495 /* If there are some parameters which shall be passed on the stack. */
496 if (n_stack_params > 0) {
498 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
502 * Reverse list of stack parameters if call arguments are from left to right.
503 * We must them reverse again if they are pushed (not stored) and the stack
504 * direction is downwards.
506 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
507 for (i = 0; i < n_stack_params >> 1; ++i) {
508 int other = n_stack_params - i - 1;
509 int tmp = stack_param_idx[i];
510 stack_param_idx[i] = stack_param_idx[other];
511 stack_param_idx[other] = tmp;
515 curr_mem = get_Call_mem(irn);
517 in[n_in++] = curr_mem;
520 for (i = 0; i < n_stack_params; ++i) {
521 int p = stack_param_idx[i];
522 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
523 ir_node *param = get_Call_param(irn, p);
524 ir_node *addr = curr_sp;
526 ir_type *param_type = get_method_param_type(call_tp, p);
527 int param_size = get_type_size_bytes(param_type) + arg->space_after;
530 * If we wanted to build the arguments sequentially,
531 * the stack pointer for the next must be incremented,
532 * and the memory value propagated.
536 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
537 param_size + arg->space_before, 0);
538 add_irn_dep(curr_sp, curr_mem);
540 curr_ofs += arg->space_before;
541 curr_ofs = round_up2(curr_ofs, arg->alignment);
543 /* Make the expression to compute the argument's offset. */
545 ir_mode *constmode = mach_mode;
546 if (mode_is_reference(mach_mode)) {
549 addr = new_r_Const_long(irg, constmode, curr_ofs);
550 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
554 /* Insert a store for primitive arguments. */
555 if (is_atomic_type(param_type)) {
557 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
558 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
559 mem = new_r_Proj(store, mode_M, pn_Store_M);
561 /* Make a mem copy for compound arguments. */
564 assert(mode_is_reference(get_irn_mode(param)));
565 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
566 mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular);
569 curr_ofs += param_size;
577 /* We need the sync only, if we didn't build the stores sequentially. */
579 if (n_stack_params >= 1) {
580 curr_mem = new_r_Sync(bl, n_in, in);
582 curr_mem = get_Call_mem(irn);
587 /* check for the return_twice property */
588 destroy_all_regs = 0;
589 if (is_SymConst_addr_ent(call_ptr)) {
590 ir_entity *ent = get_SymConst_entity(call_ptr);
592 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
593 destroy_all_regs = 1;
595 ir_type *call_tp = get_Call_type(irn);
597 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
598 destroy_all_regs = 1;
601 /* Put caller save into the destroyed set and state registers in the states set */
602 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
604 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
605 for (j = 0; j < cls->n_regs; ++j) {
606 const arch_register_t *reg = arch_register_for_index(cls, j);
608 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
609 if (! arch_register_type_is(reg, ignore))
610 pset_new_insert(&destroyed_regs, (void *) reg);
612 if (arch_register_type_is(reg, state)) {
613 pset_new_insert(&destroyed_regs, (void*) reg);
614 pset_new_insert(&states, (void*) reg);
619 if (destroy_all_regs) {
620 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
621 pset_new_remove(&destroyed_regs, arch_env->sp);
622 pset_new_remove(&destroyed_regs, arch_env->bp);
625 /* search the largest result proj number */
626 res_projs = ALLOCANZ(ir_node*, n_res);
628 foreach_out_edge(irn, edge) {
629 const ir_edge_t *res_edge;
630 ir_node *irn = get_edge_src_irn(edge);
632 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
635 foreach_out_edge(irn, res_edge) {
637 ir_node *res = get_edge_src_irn(res_edge);
639 assert(is_Proj(res));
641 proj = get_Proj_proj(res);
642 assert(proj < n_res);
643 assert(res_projs[proj] == NULL);
644 res_projs[proj] = res;
650 /** TODO: this is not correct for cases where return values are passed
651 * on the stack, but no known ABI does this currently...
653 n_reg_results = n_res;
655 assert(obstack_object_size(obst) == 0);
657 in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
659 /* make the back end call node and set its register requirements. */
660 for (i = 0; i < n_reg_params; ++i) {
661 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
664 /* add state registers ins */
665 foreach_pset_new(&states, reg, iter) {
666 const arch_register_class_t *cls = arch_register_get_class(reg);
668 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
669 ir_fprintf(stderr, "Adding %+F\n", regnode);
671 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
672 in[n_ins++] = regnode;
674 assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
676 /* ins collected, build the call */
677 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
679 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
680 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
681 n_ins, in, get_Call_type(irn));
682 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
685 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
686 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
687 n_ins, in, get_Call_type(irn));
689 be_Call_set_pop(low_call, call->pop);
691 /* put the call into the list of all calls for later processing */
692 ARR_APP1(ir_node *, env->calls, low_call);
694 /* create new stack pointer */
695 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
696 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
697 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
698 arch_set_irn_register(curr_sp, sp);
700 /* now handle results */
701 for (i = 0; i < n_res; ++i) {
703 ir_node *proj = res_projs[i];
704 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
706 /* returns values on stack not supported yet */
710 shift the proj number to the right, since we will drop the
711 unspeakable Proj_T from the Call. Therefore, all real argument
712 Proj numbers must be increased by pn_be_Call_first_res
714 pn = i + pn_be_Call_first_res;
717 ir_type *res_type = get_method_res_type(call_tp, i);
718 ir_mode *mode = get_type_mode(res_type);
719 proj = new_r_Proj(low_call, mode, pn);
722 set_Proj_pred(proj, low_call);
723 set_Proj_proj(proj, pn);
727 pset_new_remove(&destroyed_regs, arg->reg);
732 Set the register class of the call address to
733 the backend provided class (default: stack pointer class)
735 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
737 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
739 /* Set the register classes and constraints of the Call parameters. */
740 for (i = 0; i < n_reg_params; ++i) {
741 int index = reg_param_idxs[i];
742 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
743 assert(arg->reg != NULL);
745 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
749 /* Set the register constraints of the results. */
750 for (i = 0; i < n_res; ++i) {
751 ir_node *proj = res_projs[i];
752 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
753 int pn = get_Proj_proj(proj);
756 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
757 arch_set_irn_register(proj, arg->reg);
759 exchange(irn, low_call);
761 /* kill the ProjT node */
762 if (res_proj != NULL) {
766 /* Make additional projs for the caller save registers
767 and the Keep node which keeps them alive. */
769 const arch_register_t *reg;
773 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
774 pset_new_iterator_t iter;
777 n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
778 in = ALLOCAN(ir_node *, n_ins);
780 /* also keep the stack pointer */
781 set_irn_link(curr_sp, (void*) sp);
784 foreach_pset_new(&destroyed_regs, reg, iter) {
785 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
787 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
788 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
789 arch_set_irn_register(proj, reg);
791 set_irn_link(proj, (void*) reg);
796 for (i = 0; i < n_reg_results; ++i) {
797 ir_node *proj = res_projs[i];
798 const arch_register_t *reg = arch_get_irn_register(proj);
799 set_irn_link(proj, (void*) reg);
804 /* create the Keep for the caller save registers */
805 keep = be_new_Keep(bl, n, in);
806 for (i = 0; i < n; ++i) {
807 const arch_register_t *reg = get_irn_link(in[i]);
808 be_node_set_reg_class_in(keep, i, reg->reg_class);
812 /* Clean up the stack. */
813 assert(stack_size >= call->pop);
814 stack_size -= call->pop;
816 if (stack_size > 0) {
817 ir_node *mem_proj = NULL;
819 foreach_out_edge(low_call, edge) {
820 ir_node *irn = get_edge_src_irn(edge);
821 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
828 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
829 keep_alive(mem_proj);
832 /* Clean up the stack frame or revert alignment fixes if we allocated it */
834 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
837 be_abi_call_free(call);
839 pset_new_destroy(&states);
840 pset_new_destroy(&destroyed_regs);
846 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
848 * @param alignment the minimum stack alignment
849 * @param size the node containing the non-aligned size
850 * @param block the block where new nodes are allocated on
851 * @param dbg debug info for new nodes
853 * @return a node representing the aligned size
855 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
856 ir_node *block, dbg_info *dbg)
858 if (stack_alignment > 1) {
864 assert(is_po2(stack_alignment));
866 mode = get_irn_mode(size);
867 tv = new_tarval_from_long(stack_alignment-1, mode);
868 irg = get_Block_irg(block);
869 mask = new_r_Const(irg, tv);
870 size = new_rd_Add(dbg, block, size, mask, mode);
872 tv = new_tarval_from_long(-(long)stack_alignment, mode);
873 mask = new_r_Const(irg, tv);
874 size = new_rd_And(dbg, block, size, mask, mode);
880 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
882 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
891 const ir_edge_t *edge;
897 unsigned stack_alignment;
899 assert(get_Alloc_where(alloc) == stack_alloc);
901 block = get_nodes_block(alloc);
902 irg = get_Block_irg(block);
905 type = get_Alloc_type(alloc);
907 foreach_out_edge(alloc, edge) {
908 ir_node *irn = get_edge_src_irn(edge);
910 assert(is_Proj(irn));
911 switch (get_Proj_proj(irn)) {
923 /* Beware: currently Alloc nodes without a result might happen,
924 only escape analysis kills them and this phase runs only for object
925 oriented source. We kill the Alloc here. */
926 if (alloc_res == NULL && alloc_mem) {
927 exchange(alloc_mem, get_Alloc_mem(alloc));
931 dbg = get_irn_dbg_info(alloc);
932 count = get_Alloc_count(alloc);
934 /* we might need to multiply the count with the element size */
935 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
936 ir_mode *mode = get_irn_mode(count);
937 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
939 ir_node *cnst = new_rd_Const(dbg, irg, tv);
940 size = new_rd_Mul(dbg, block, count, cnst, mode);
945 /* The stack pointer will be modified in an unknown manner.
946 We cannot omit it. */
947 env->call->flags.bits.try_omit_fp = 0;
949 stack_alignment = 1 << env->arch_env->stack_alignment;
950 size = adjust_alloc_size(stack_alignment, size, block, dbg);
951 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
952 set_irn_dbg_info(new_alloc, dbg);
954 if (alloc_mem != NULL) {
958 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
960 /* We need to sync the output mem of the AddSP with the input mem
961 edge into the alloc node. */
962 ins[0] = get_Alloc_mem(alloc);
964 sync = new_r_Sync(block, 2, ins);
966 exchange(alloc_mem, sync);
969 exchange(alloc, new_alloc);
971 /* fix projnum of alloca res */
972 set_Proj_proj(alloc_res, pn_be_AddSP_res);
975 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
982 * The Free is transformed into a back end free node and connected to the stack nodes.
984 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
988 ir_node *subsp, *mem, *res, *size, *sync;
992 unsigned stack_alignment;
995 assert(get_Free_where(free) == stack_alloc);
997 block = get_nodes_block(free);
998 irg = get_irn_irg(block);
999 type = get_Free_type(free);
1000 sp_mode = env->arch_env->sp->reg_class->mode;
1001 dbg = get_irn_dbg_info(free);
1003 /* we might need to multiply the size with the element size */
1004 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1005 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1006 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1007 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1011 size = get_Free_size(free);
1014 stack_alignment = 1 << env->arch_env->stack_alignment;
1015 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1017 /* The stack pointer will be modified in an unknown manner.
1018 We cannot omit it. */
1019 env->call->flags.bits.try_omit_fp = 0;
1020 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1021 set_irn_dbg_info(subsp, dbg);
1023 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
1024 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
1026 /* we need to sync the memory */
1027 in[0] = get_Free_mem(free);
1029 sync = new_r_Sync(block, 2, in);
1031 /* and make the AddSP dependent on the former memory */
1032 add_irn_dep(subsp, get_Free_mem(free));
1035 exchange(free, sync);
1042 * Check if a node is somehow data dependent on another one.
1043 * both nodes must be in the same basic block.
1044 * @param n1 The first node.
1045 * @param n2 The second node.
1046 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1048 static int dependent_on(ir_node *n1, ir_node *n2)
1050 assert(get_nodes_block(n1) == get_nodes_block(n2));
1052 return heights_reachable_in_block(ir_heights, n1, n2);
1055 static int cmp_call_dependency(const void *c1, const void *c2)
1057 ir_node *n1 = *(ir_node **) c1;
1058 ir_node *n2 = *(ir_node **) c2;
1061 Classical qsort() comparison function behavior:
1062 0 if both elements are equal
1063 1 if second is "smaller" that first
1064 -1 if first is "smaller" that second
1066 if (dependent_on(n1, n2))
1069 if (dependent_on(n2, n1))
1072 /* The nodes have no depth order, but we need a total order because qsort()
1074 return get_irn_idx(n1) - get_irn_idx(n2);
1078 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1079 * Clears the irg_is_leaf flag if a Call is detected.
1081 static void link_ops_in_block_walker(ir_node *irn, void *data)
1083 be_abi_irg_t *env = data;
1084 ir_opcode code = get_irn_opcode(irn);
1086 if (code == iro_Call ||
1087 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1088 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1089 ir_node *bl = get_nodes_block(irn);
1090 void *save = get_irn_link(bl);
1092 if (code == iro_Call)
1093 env->call->flags.bits.irg_is_leaf = 0;
1095 set_irn_link(irn, save);
1096 set_irn_link(bl, irn);
1099 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1100 ir_node *param = get_Builtin_param(irn, 0);
1101 tarval *tv = get_Const_tarval(param);
1102 unsigned long value = get_tarval_long(tv);
1103 /* use ebp, so the climbframe algo works... */
1105 env->call->flags.bits.try_omit_fp = 0;
1112 * Process all Call/Alloc/Free nodes inside a basic block.
1113 * Note that the link field of the block must contain a linked list of all
1114 * Call nodes inside the Block. We first order this list according to data dependency
1115 * and that connect the calls together.
1117 static void process_ops_in_block(ir_node *bl, void *data)
1119 be_abi_irg_t *env = data;
1120 ir_node *curr_sp = env->init_sp;
1127 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1131 nodes = ALLOCAN(ir_node*, n_nodes);
1132 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1136 /* If there were call nodes in the block. */
1141 /* order the call nodes according to data dependency */
1142 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1144 for (i = n_nodes - 1; i >= 0; --i) {
1145 ir_node *irn = nodes[i];
1147 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1148 switch (get_irn_opcode(irn)) {
1151 /* The stack pointer will be modified due to a call. */
1152 env->call->flags.bits.try_omit_fp = 0;
1154 curr_sp = adjust_call(env, irn, curr_sp);
1157 if (get_Alloc_where(irn) == stack_alloc)
1158 curr_sp = adjust_alloc(env, irn, curr_sp);
1161 if (get_Free_where(irn) == stack_alloc)
1162 curr_sp = adjust_free(env, irn, curr_sp);
1165 panic("invalid call");
1170 /* Keep the last stack state in the block by tying it to Keep node,
1171 * the proj from calls is already kept */
1172 if (curr_sp != env->init_sp &&
1173 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1175 keep = be_new_Keep(bl, 1, nodes);
1176 pmap_insert(env->keep_map, bl, keep);
1180 set_irn_link(bl, curr_sp);
1184 * Adjust all call nodes in the graph to the ABI conventions.
1186 static void process_calls(be_abi_irg_t *env)
1188 ir_graph *irg = env->birg->irg;
1190 env->call->flags.bits.irg_is_leaf = 1;
1191 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1193 ir_heights = heights_new(env->birg->irg);
1194 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1195 heights_free(ir_heights);
1199 * Computes the stack argument layout type.
1200 * Changes a possibly allocated value param type by moving
1201 * entities to the stack layout type.
1203 * @param env the ABI environment
1204 * @param call the current call ABI
1205 * @param method_type the method type
1206 * @param val_param_tp the value parameter type, will be destroyed
1207 * @param param_map an array mapping method arguments to the stack layout type
1209 * @return the stack argument layout type
1211 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1212 ir_type *method_type, ir_type *val_param_tp,
1213 ir_entity ***param_map)
1215 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1216 int inc = env->birg->main_env->arch_env->stack_dir * dir;
1217 int n = get_method_n_params(method_type);
1218 int curr = inc > 0 ? 0 : n - 1;
1219 struct obstack *obst = be_get_birg_obst(env->irg);
1225 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1228 *param_map = map = OALLOCN(obst, ir_entity*, n);
1229 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1230 for (i = 0; i < n; ++i, curr += inc) {
1231 ir_type *param_type = get_method_param_type(method_type, curr);
1232 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1235 if (arg->on_stack) {
1236 if (val_param_tp != NULL) {
1237 /* the entity was already created, create a copy in the param type */
1238 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1239 arg->stack_ent = copy_entity_own(val_ent, res);
1240 set_entity_link(val_ent, arg->stack_ent);
1241 set_entity_link(arg->stack_ent, NULL);
1243 /* create a new entity */
1244 snprintf(buf, sizeof(buf), "param_%d", i);
1245 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1247 ofs += arg->space_before;
1248 ofs = round_up2(ofs, arg->alignment);
1249 set_entity_offset(arg->stack_ent, ofs);
1250 ofs += arg->space_after;
1251 ofs += get_type_size_bytes(param_type);
1252 map[i] = arg->stack_ent;
1255 set_type_size_bytes(res, ofs);
1256 set_type_state(res, layout_fixed);
1261 const arch_register_t *reg;
1265 static int cmp_regs(const void *a, const void *b)
1267 const reg_node_map_t *p = a;
1268 const reg_node_map_t *q = b;
1270 if (p->reg->reg_class == q->reg->reg_class)
1271 return p->reg->index - q->reg->index;
1273 return p->reg->reg_class - q->reg->reg_class;
1276 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1279 int n = pmap_count(reg_map);
1282 foreach_pmap(reg_map, ent) {
1283 res[i].reg = ent->key;
1284 res[i].irn = ent->value;
1288 qsort(res, n, sizeof(res[0]), cmp_regs);
1292 * Creates a barrier.
1294 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1297 int n_regs = pmap_count(regs);
1303 in = ALLOCAN(ir_node*, n_regs+1);
1304 rm = ALLOCAN(reg_node_map_t, n_regs);
1305 reg_map_to_arr(rm, regs);
1306 for (n = 0; n < n_regs; ++n) {
1314 irn = be_new_Barrier(bl, n, in);
1316 for (n = 0; n < n_regs; ++n) {
1317 ir_node *pred = rm[n].irn;
1318 const arch_register_t *reg = rm[n].reg;
1319 arch_register_type_t add_type = 0;
1321 const backend_info_t *info;
1323 /* stupid workaround for now... as not all nodes report register
1325 info = be_get_info(skip_Proj(pred));
1326 if (info != NULL && info->out_infos != NULL) {
1327 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1328 if (ireq->type & arch_register_req_type_ignore)
1329 add_type |= arch_register_req_type_ignore;
1330 if (ireq->type & arch_register_req_type_produces_sp)
1331 add_type |= arch_register_req_type_produces_sp;
1334 proj = new_r_Proj(irn, get_irn_mode(pred), n);
1335 be_node_set_reg_class_in(irn, n, reg->reg_class);
1337 be_set_constr_single_reg_in(irn, n, reg, 0);
1338 be_set_constr_single_reg_out(irn, n, reg, add_type);
1339 arch_set_irn_register(proj, reg);
1341 pmap_insert(regs, (void *) reg, proj);
1345 *mem = new_r_Proj(irn, mode_M, n);
1352 * Creates a be_Return for a Return node.
1354 * @param @env the abi environment
1355 * @param irn the Return node or NULL if there was none
1356 * @param bl the block where the be_Retun should be placed
1357 * @param mem the current memory
1358 * @param n_res number of return results
1360 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1361 ir_node *mem, int n_res)
1363 be_abi_call_t *call = env->call;
1364 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1366 pmap *reg_map = pmap_create();
1367 ir_node *keep = pmap_get(env->keep_map, bl);
1374 const arch_register_t **regs;
1378 get the valid stack node in this block.
1379 If we had a call in that block there is a Keep constructed by process_calls()
1380 which points to the last stack modification in that block. we'll use
1381 it then. Else we use the stack from the start block and let
1382 the ssa construction fix the usage.
1384 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1386 stack = get_irn_n(keep, 0);
1388 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1391 /* Insert results for Return into the register map. */
1392 for (i = 0; i < n_res; ++i) {
1393 ir_node *res = get_Return_res(irn, i);
1394 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1395 assert(arg->in_reg && "return value must be passed in register");
1396 pmap_insert(reg_map, (void *) arg->reg, res);
1399 /* Add uses of the callee save registers. */
1400 foreach_pmap(env->regs, ent) {
1401 const arch_register_t *reg = ent->key;
1402 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1403 pmap_insert(reg_map, ent->key, ent->value);
1406 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1408 /* Make the Epilogue node and call the arch's epilogue maker. */
1409 create_barrier(bl, &mem, reg_map, 1);
1410 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1413 Maximum size of the in array for Return nodes is
1414 return args + callee save/ignore registers + memory + stack pointer
1416 in_max = pmap_count(reg_map) + n_res + 2;
1418 in = ALLOCAN(ir_node*, in_max);
1419 regs = ALLOCAN(arch_register_t const*, in_max);
1422 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1424 regs[1] = arch_env->sp;
1427 /* clear SP entry, since it has already been grown. */
1428 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1429 for (i = 0; i < n_res; ++i) {
1430 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1432 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1433 regs[n++] = arg->reg;
1435 /* Clear the map entry to mark the register as processed. */
1436 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1439 /* grow the rest of the stuff. */
1440 foreach_pmap(reg_map, ent) {
1443 regs[n++] = ent->key;
1447 /* The in array for the new back end return is now ready. */
1449 dbgi = get_irn_dbg_info(irn);
1453 /* we have to pop the shadow parameter in in case of struct returns */
1455 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1457 /* Set the register classes of the return's parameter accordingly. */
1458 for (i = 0; i < n; ++i) {
1459 if (regs[i] == NULL)
1462 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1465 /* Free the space of the Epilog's in array and the register <-> proj map. */
1466 pmap_destroy(reg_map);
1471 typedef struct ent_pos_pair ent_pos_pair;
1472 struct ent_pos_pair {
1473 ir_entity *ent; /**< a value param entity */
1474 int pos; /**< its parameter number */
1475 ent_pos_pair *next; /**< for linking */
1478 typedef struct lower_frame_sels_env_t {
1479 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1480 ir_node *frame; /**< the current frame */
1481 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1482 const arch_register_class_t *link_class; /**< register class of the link pointer */
1483 ir_type *value_tp; /**< the value type if any */
1484 ir_type *frame_tp; /**< the frame type */
1485 int static_link_pos; /**< argument number of the hidden static link */
1486 } lower_frame_sels_env_t;
1489 * Return an entity from the backend for an value param entity.
1491 * @param ent an value param type entity
1492 * @param ctx context
1494 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1496 ir_entity *argument_ent = get_entity_link(ent);
1498 if (argument_ent == NULL) {
1499 /* we have NO argument entity yet: This is bad, as we will
1500 * need one for backing store.
1503 ir_type *frame_tp = ctx->frame_tp;
1504 unsigned offset = get_type_size_bytes(frame_tp);
1505 ir_type *tp = get_entity_type(ent);
1506 unsigned align = get_type_alignment_bytes(tp);
1508 offset += align - 1;
1509 offset &= ~(align - 1);
1511 argument_ent = copy_entity_own(ent, frame_tp);
1513 /* must be automatic to set a fixed layout */
1514 set_entity_offset(argument_ent, offset);
1515 offset += get_type_size_bytes(tp);
1517 set_type_size_bytes(frame_tp, offset);
1518 set_entity_link(ent, argument_ent);
1520 return argument_ent;
1523 * Walker: Replaces Sels of frame type and
1524 * value param type entities by FrameAddress.
1525 * Links all used entities.
1527 static void lower_frame_sels_walker(ir_node *irn, void *data)
1529 lower_frame_sels_env_t *ctx = data;
1532 ir_node *ptr = get_Sel_ptr(irn);
1534 if (ptr == ctx->frame) {
1535 ir_entity *ent = get_Sel_entity(irn);
1536 ir_node *bl = get_nodes_block(irn);
1539 int is_value_param = 0;
1541 if (get_entity_owner(ent) == ctx->value_tp) {
1544 /* replace by its copy from the argument type */
1545 pos = get_struct_member_index(ctx->value_tp, ent);
1546 ent = get_argument_entity(ent, ctx);
1549 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1552 /* check, if it's a param Sel and if have not seen this entity before */
1553 if (is_value_param && get_entity_link(ent) == NULL) {
1559 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1561 set_entity_link(ent, ctx->value_param_list);
1568 * Check if a value parameter is transmitted as a register.
1569 * This might happen if the address of an parameter is taken which is
1570 * transmitted in registers.
1572 * Note that on some architectures this case must be handled specially
1573 * because the place of the backing store is determined by their ABI.
1575 * In the default case we move the entity to the frame type and create
1576 * a backing store into the first block.
1578 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1580 be_abi_call_t *call = env->call;
1581 ir_graph *irg = env->birg->irg;
1582 ent_pos_pair *entry, *new_list;
1584 int i, n = ARR_LEN(value_param_list);
1587 for (i = 0; i < n; ++i) {
1588 int pos = value_param_list[i].pos;
1589 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
1592 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1593 value_param_list[i].next = new_list;
1594 new_list = &value_param_list[i];
1597 if (new_list != NULL) {
1598 /* ok, change the graph */
1599 ir_node *start_bl = get_irg_start_block(irg);
1600 ir_node *first_bl = get_first_block_succ(start_bl);
1601 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1602 optimization_state_t state;
1605 assert(first_bl && first_bl != start_bl);
1606 /* we had already removed critical edges, so the following
1607 assertion should be always true. */
1608 assert(get_Block_n_cfgpreds(first_bl) == 1);
1610 /* now create backing stores */
1611 frame = get_irg_frame(irg);
1612 imem = get_irg_initial_mem(irg);
1614 save_optimization_state(&state);
1616 nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
1617 restore_optimization_state(&state);
1619 /* reroute all edges to the new memory source */
1620 edges_reroute(imem, nmem, irg);
1624 args = get_irg_args(irg);
1625 args_bl = get_nodes_block(args);
1626 for (entry = new_list; entry != NULL; entry = entry->next) {
1628 ir_type *tp = get_entity_type(entry->ent);
1629 ir_mode *mode = get_type_mode(tp);
1632 /* address for the backing store */
1633 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1636 mem = new_r_Proj(store, mode_M, pn_Store_M);
1638 /* the backing store itself */
1639 store = new_r_Store(first_bl, mem, addr,
1640 new_r_Proj(args, mode, i), 0);
1642 /* the new memory Proj gets the last Proj from store */
1643 set_Proj_pred(nmem, store);
1644 set_Proj_proj(nmem, pn_Store_M);
1646 /* move all entities to the frame type */
1647 frame_tp = get_irg_frame_type(irg);
1648 offset = get_type_size_bytes(frame_tp);
1650 /* we will add new entities: set the layout to undefined */
1651 assert(get_type_state(frame_tp) == layout_fixed);
1652 set_type_state(frame_tp, layout_undefined);
1653 for (entry = new_list; entry != NULL; entry = entry->next) {
1654 ir_entity *ent = entry->ent;
1656 /* If the entity is still on the argument type, move it to the frame type.
1657 This happens if the value_param type was build due to compound
1659 if (get_entity_owner(ent) != frame_tp) {
1660 ir_type *tp = get_entity_type(ent);
1661 unsigned align = get_type_alignment_bytes(tp);
1663 offset += align - 1;
1664 offset &= ~(align - 1);
1665 set_entity_owner(ent, frame_tp);
1666 add_class_member(frame_tp, ent);
1667 /* must be automatic to set a fixed layout */
1668 set_entity_offset(ent, offset);
1669 offset += get_type_size_bytes(tp);
1672 set_type_size_bytes(frame_tp, offset);
1673 /* fix the layout again */
1674 set_type_state(frame_tp, layout_fixed);
1679 * The start block has no jump, instead it has an initial exec Proj.
1680 * The backend wants to handle all blocks the same way, so we replace
1681 * the out cfg edge with a real jump.
1683 static void fix_start_block(ir_graph *irg)
1685 ir_node *initial_X = get_irg_initial_exec(irg);
1686 ir_node *start_block = get_irg_start_block(irg);
1687 const ir_edge_t *edge;
1689 assert(is_Proj(initial_X));
1691 foreach_out_edge(initial_X, edge) {
1692 ir_node *block = get_edge_src_irn(edge);
1694 if (is_Anchor(block))
1696 if (block != start_block) {
1697 ir_node *jmp = new_r_Jmp(start_block);
1698 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1699 set_irg_initial_exec(irg, jmp);
1703 panic("Initial exec has no follow block in %+F", irg);
1707 * Update the entity of Sels to the outer value parameters.
1709 static void update_outer_frame_sels(ir_node *irn, void *env)
1711 lower_frame_sels_env_t *ctx = env;
1718 ptr = get_Sel_ptr(irn);
1719 if (! is_arg_Proj(ptr))
1721 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1723 ent = get_Sel_entity(irn);
1725 if (get_entity_owner(ent) == ctx->value_tp) {
1726 /* replace by its copy from the argument type */
1727 pos = get_struct_member_index(ctx->value_tp, ent);
1728 ent = get_argument_entity(ent, ctx);
1729 set_Sel_entity(irn, ent);
1731 /* check, if we have not seen this entity before */
1732 if (get_entity_link(ent) == NULL) {
1738 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1740 set_entity_link(ent, ctx->value_param_list);
1746 * Fix access to outer local variables.
1748 static void fix_outer_variable_access(be_abi_irg_t *env,
1749 lower_frame_sels_env_t *ctx)
1755 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1756 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1758 if (! is_method_entity(ent))
1761 irg = get_entity_irg(ent);
1766 * FIXME: find the number of the static link parameter
1767 * for now we assume 0 here
1769 ctx->static_link_pos = 0;
1771 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1776 * Modify the irg itself and the frame type.
1778 static void modify_irg(be_abi_irg_t *env)
1780 be_abi_call_t *call = env->call;
1781 const arch_env_t *arch_env= env->birg->main_env->arch_env;
1782 const arch_register_t *sp = arch_env->sp;
1783 ir_graph *irg = env->birg->irg;
1786 ir_node *new_mem_proj;
1788 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1789 struct obstack *obst = be_get_birg_obst(irg);
1794 unsigned frame_size;
1797 const arch_register_t *fp_reg;
1798 ir_node *frame_pointer;
1802 const ir_edge_t *edge;
1803 ir_type *arg_type, *bet_type, *tp;
1804 lower_frame_sels_env_t ctx;
1805 ir_entity **param_map;
1807 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1809 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1810 * memory, which leads to loops in the DAG. */
1811 old_mem = get_irg_initial_mem(irg);
1813 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1815 /* set the links of all frame entities to NULL, we use it
1816 to detect if an entity is already linked in the value_param_list */
1817 tp = get_method_value_param_type(method_type);
1820 /* clear the links of the clone type, let the
1821 original entities point to its clones */
1822 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1823 ir_entity *mem = get_struct_member(tp, i);
1824 set_entity_link(mem, NULL);
1828 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1830 /* Convert the Sel nodes in the irg to frame addr nodes: */
1831 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1832 ctx.frame = get_irg_frame(irg);
1833 ctx.sp_class = env->arch_env->sp->reg_class;
1834 ctx.link_class = env->arch_env->link_class;
1835 ctx.frame_tp = get_irg_frame_type(irg);
1837 /* layout the stackframe now */
1838 if (get_type_state(ctx.frame_tp) == layout_undefined) {
1839 default_layout_compound_type(ctx.frame_tp);
1842 /* we will possible add new entities to the frame: set the layout to undefined */
1843 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1844 set_type_state(ctx.frame_tp, layout_undefined);
1846 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1848 /* fix the frame type layout again */
1849 set_type_state(ctx.frame_tp, layout_fixed);
1850 /* align stackframe to 4 byte */
1851 frame_size = get_type_size_bytes(ctx.frame_tp);
1852 if (frame_size % 4 != 0) {
1853 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1856 env->regs = pmap_create();
1858 n_params = get_method_n_params(method_type);
1859 args = OALLOCNZ(obst, ir_node*, n_params);
1862 * for inner function we must now fix access to outer frame entities.
1864 fix_outer_variable_access(env, &ctx);
1866 /* Check if a value parameter is transmitted as a register.
1867 * This might happen if the address of an parameter is taken which is
1868 * transmitted in registers.
1870 * Note that on some architectures this case must be handled specially
1871 * because the place of the backing store is determined by their ABI.
1873 * In the default case we move the entity to the frame type and create
1874 * a backing store into the first block.
1876 fix_address_of_parameter_access(env, ctx.value_param_list);
1878 DEL_ARR_F(ctx.value_param_list);
1879 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1881 /* Fill the argument vector */
1882 arg_tuple = get_irg_args(irg);
1883 foreach_out_edge(arg_tuple, edge) {
1884 ir_node *irn = get_edge_src_irn(edge);
1885 if (! is_Anchor(irn)) {
1886 int nr = get_Proj_proj(irn);
1888 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1892 bet_type = call->cb->get_between_type(env->cb);
1893 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1895 /* Count the register params and add them to the number of Projs for the RegParams node */
1896 for (i = 0; i < n_params; ++i) {
1897 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1898 if (arg->in_reg && args[i]) {
1899 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1900 assert(i == get_Proj_proj(args[i]));
1902 /* For now, associate the register with the old Proj from Start representing that argument. */
1903 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1904 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1908 /* Collect all callee-save registers */
1909 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1910 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1911 for (j = 0; j < cls->n_regs; ++j) {
1912 const arch_register_t *reg = &cls->regs[j];
1913 if (arch_register_type_is(reg, callee_save) ||
1914 arch_register_type_is(reg, state)) {
1915 pmap_insert(env->regs, (void *) reg, NULL);
1920 /* handle start block here (place a jump in the block) */
1921 fix_start_block(irg);
1923 pmap_insert(env->regs, (void *) sp, NULL);
1924 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1925 start_bl = get_irg_start_block(irg);
1926 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1929 * make proj nodes for the callee save registers.
1930 * memorize them, since Return nodes get those as inputs.
1932 * Note, that if a register corresponds to an argument, the regs map contains
1933 * the old Proj from start for that argument.
1936 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1937 reg_map_to_arr(rm, env->regs);
1938 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1939 arch_register_t *reg = (void *) rm[i].reg;
1940 ir_mode *mode = reg->reg_class->mode;
1942 arch_register_req_type_t add_type = 0;
1946 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1949 proj = new_r_Proj(env->start, mode, nr + 1);
1950 pmap_insert(env->regs, (void *) reg, proj);
1951 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1952 arch_set_irn_register(proj, reg);
1954 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1957 /* create a new initial memory proj */
1958 assert(is_Proj(old_mem));
1959 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1960 new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1962 set_irg_initial_mem(irg, mem);
1964 /* Generate the Prologue */
1965 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
1967 /* do the stack allocation BEFORE the barrier, or spill code
1968 might be added before it */
1969 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1970 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1971 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1973 create_barrier(start_bl, &mem, env->regs, 0);
1975 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1976 arch_set_irn_register(env->init_sp, sp);
1978 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1979 set_irg_frame(irg, frame_pointer);
1980 pset_insert_ptr(env->ignore_regs, fp_reg);
1982 /* rewire old mem users to new mem */
1983 exchange(old_mem, mem);
1985 /* keep the mem (for functions with an endless loop = no return) */
1988 set_irg_initial_mem(irg, mem);
1990 /* Now, introduce stack param nodes for all parameters passed on the stack */
1991 for (i = 0; i < n_params; ++i) {
1992 ir_node *arg_proj = args[i];
1993 ir_node *repl = NULL;
1995 if (arg_proj != NULL) {
1996 be_abi_call_arg_t *arg;
1997 ir_type *param_type;
1998 int nr = get_Proj_proj(arg_proj);
2001 nr = MIN(nr, n_params);
2002 arg = get_call_arg(call, 0, nr);
2003 param_type = get_method_param_type(method_type, nr);
2006 repl = pmap_get(env->regs, (void *) arg->reg);
2007 } else if (arg->on_stack) {
2008 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2010 /* For atomic parameters which are actually used, we create a Load node. */
2011 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2012 ir_mode *mode = get_type_mode(param_type);
2013 ir_mode *load_mode = arg->load_mode;
2015 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2016 repl = new_r_Proj(load, load_mode, pn_Load_res);
2018 if (mode != load_mode) {
2019 repl = new_r_Conv(start_bl, repl, mode);
2022 /* The stack parameter is not primitive (it is a struct or array),
2023 * we thus will create a node representing the parameter's address
2029 assert(repl != NULL);
2031 /* Beware: the mode of the register parameters is always the mode of the register class
2032 which may be wrong. Add Conv's then. */
2033 mode = get_irn_mode(args[i]);
2034 if (mode != get_irn_mode(repl)) {
2035 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2037 exchange(args[i], repl);
2041 /* the arg proj is not needed anymore now and should be only used by the anchor */
2042 assert(get_irn_n_edges(arg_tuple) == 1);
2043 kill_node(arg_tuple);
2044 set_irg_args(irg, new_r_Bad(irg));
2046 /* All Return nodes hang on the End node, so look for them there. */
2047 end = get_irg_end_block(irg);
2048 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2049 ir_node *irn = get_Block_cfgpred(end, i);
2051 if (is_Return(irn)) {
2052 ir_node *blk = get_nodes_block(irn);
2053 ir_node *mem = get_Return_mem(irn);
2054 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2059 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2060 the code is dead and will never be executed. */
2063 /** Fix the state inputs of calls that still hang on unknowns */
2064 static void fix_call_state_inputs(be_abi_irg_t *env)
2066 const arch_env_t *arch_env = env->arch_env;
2068 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2070 /* Collect caller save registers */
2071 n = arch_env_get_n_reg_class(arch_env);
2072 for (i = 0; i < n; ++i) {
2074 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2075 for (j = 0; j < cls->n_regs; ++j) {
2076 const arch_register_t *reg = arch_register_for_index(cls, j);
2077 if (arch_register_type_is(reg, state)) {
2078 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2083 n = ARR_LEN(env->calls);
2084 n_states = ARR_LEN(stateregs);
2085 for (i = 0; i < n; ++i) {
2087 ir_node *call = env->calls[i];
2089 arity = get_irn_arity(call);
2091 /* the state reg inputs are the last n inputs of the calls */
2092 for (s = 0; s < n_states; ++s) {
2093 int inp = arity - n_states + s;
2094 const arch_register_t *reg = stateregs[s];
2095 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2097 set_irn_n(call, inp, regnode);
2101 DEL_ARR_F(stateregs);
2105 * Create a trampoline entity for the given method.
2107 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2109 ir_type *type = get_entity_type(method);
2110 ident *old_id = get_entity_ld_ident(method);
2111 ident *id = id_mangle3("", old_id, "$stub");
2112 ir_type *parent = be->pic_trampolines_type;
2113 ir_entity *ent = new_entity(parent, old_id, type);
2114 set_entity_ld_ident(ent, id);
2115 set_entity_visibility(ent, ir_visibility_private);
2121 * Returns the trampoline entity for the given method.
2123 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2125 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2126 if (result == NULL) {
2127 result = create_trampoline(env, method);
2128 pmap_insert(env->ent_trampoline_map, method, result);
2134 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2136 ident *old_id = get_entity_ld_ident(entity);
2137 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
2138 ir_type *e_type = get_entity_type(entity);
2139 ir_type *type = new_type_pointer(e_type);
2140 ir_type *parent = be->pic_symbols_type;
2141 ir_entity *ent = new_entity(parent, old_id, type);
2142 set_entity_ld_ident(ent, id);
2143 set_entity_visibility(ent, ir_visibility_private);
2148 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2150 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2151 if (result == NULL) {
2152 result = create_pic_symbol(env, entity);
2153 pmap_insert(env->ent_pic_symbol_map, entity, result);
2162 * Returns non-zero if a given entity can be accessed using a relative address.
2164 static int can_address_relative(ir_entity *entity)
2166 return get_entity_visibility(entity) != ir_visibility_external
2167 && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
2170 /** patches SymConsts to work in position independent code */
2171 static void fix_pic_symconsts(ir_node *node, void *data)
2180 be_abi_irg_t *env = data;
2182 be_main_env_t *be = env->birg->main_env;
2184 arity = get_irn_arity(node);
2185 for (i = 0; i < arity; ++i) {
2187 ir_node *pred = get_irn_n(node, i);
2189 ir_entity *pic_symbol;
2190 ir_node *pic_symconst;
2192 if (!is_SymConst(pred))
2195 entity = get_SymConst_entity(pred);
2196 block = get_nodes_block(pred);
2197 irg = get_irn_irg(pred);
2199 /* calls can jump to relative addresses, so we can directly jump to
2200 the (relatively) known call address or the trampoline */
2201 if (i == 1 && is_Call(node)) {
2202 ir_entity *trampoline;
2203 ir_node *trampoline_const;
2205 if (can_address_relative(entity))
2208 dbgi = get_irn_dbg_info(pred);
2209 trampoline = get_trampoline(be, entity);
2210 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2212 set_irn_n(node, i, trampoline_const);
2216 /* everything else is accessed relative to EIP */
2217 mode = get_irn_mode(pred);
2218 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2220 /* all ok now for locally constructed stuff */
2221 if (can_address_relative(entity)) {
2222 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2224 /* make sure the walker doesn't visit this add again */
2225 mark_irn_visited(add);
2226 set_irn_n(node, i, add);
2230 /* get entry from pic symbol segment */
2231 dbgi = get_irn_dbg_info(pred);
2232 pic_symbol = get_pic_symbol(be, entity);
2233 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2235 add = new_r_Add(block, pic_base, pic_symconst, mode);
2236 mark_irn_visited(add);
2238 /* we need an extra indirection for global data outside our current
2239 module. The loads are always safe and can therefore float
2240 and need no memory input */
2241 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2242 load_res = new_r_Proj(load, mode, pn_Load_res);
2244 set_irn_n(node, i, load_res);
2248 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2250 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2251 ir_node *old_frame = get_irg_frame(birg->irg);
2252 ir_graph *irg = birg->irg;
2253 struct obstack *obst = be_get_birg_obst(irg);
2257 unsigned *limited_bitset;
2258 arch_register_req_t *sp_req;
2260 be_omit_fp = birg->main_env->options->omit_fp;
2261 be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
2265 env->arch_env = birg->main_env->arch_env;
2266 env->method_type = get_entity_type(get_irg_entity(irg));
2267 env->call = be_abi_call_new(env->arch_env->sp->reg_class);
2268 arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
2270 env->ignore_regs = pset_new_ptr_default();
2271 env->keep_map = pmap_create();
2272 env->dce_survivor = new_survive_dce();
2276 sp_req = OALLOCZ(obst, arch_register_req_t);
2277 env->sp_req = sp_req;
2279 sp_req->type = arch_register_req_type_limited
2280 | arch_register_req_type_produces_sp;
2281 sp_req->cls = arch_register_get_class(env->arch_env->sp);
2283 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2284 rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
2285 sp_req->limited = limited_bitset;
2286 if (env->arch_env->sp->type & arch_register_type_ignore) {
2287 sp_req->type |= arch_register_req_type_ignore;
2290 env->init_sp = dummy = new_r_Dummy(irg, env->arch_env->sp->reg_class->mode);
2292 env->calls = NEW_ARR_F(ir_node*, 0);
2294 if (birg->main_env->options->pic) {
2295 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2298 /* Lower all call nodes in the IRG. */
2302 Beware: init backend abi call object after processing calls,
2303 otherwise some information might be not yet available.
2305 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2307 /* Process the IRG */
2310 /* fix call inputs for state registers */
2311 fix_call_state_inputs(env);
2313 /* We don't need the keep map anymore. */
2314 pmap_destroy(env->keep_map);
2315 env->keep_map = NULL;
2317 /* calls array is not needed anymore */
2318 DEL_ARR_F(env->calls);
2321 /* reroute the stack origin of the calls to the true stack origin. */
2322 exchange(dummy, env->init_sp);
2323 exchange(old_frame, get_irg_frame(irg));
2325 /* Make some important node pointers survive the dead node elimination. */
2326 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2327 foreach_pmap(env->regs, ent) {
2328 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2331 env->call->cb->done(env->cb);
2336 void be_abi_free(be_abi_irg_t *env)
2338 be_abi_call_free(env->call);
2339 free_survive_dce(env->dce_survivor);
2340 del_pset(env->ignore_regs);
2341 pmap_destroy(env->regs);
2345 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2347 arch_register_t *reg;
2349 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2350 if (reg->reg_class == cls)
2351 bitset_set(bs, reg->index);
2354 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2357 arch_register_t *reg;
2359 for (i = 0; i < cls->n_regs; ++i) {
2360 if (arch_register_type_is(&cls->regs[i], ignore))
2363 rbitset_set(raw_bitset, i);
2366 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2367 reg = pset_next(abi->ignore_regs)) {
2368 if (reg->reg_class != cls)
2371 rbitset_clear(raw_bitset, reg->index);
2375 /* Returns the stack layout from a abi environment. */
2376 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2384 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2385 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2386 | _| | |> < ___) | || (_| | (__| <
2387 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2391 typedef ir_node **node_array;
2393 typedef struct fix_stack_walker_env_t {
2394 node_array sp_nodes;
2395 } fix_stack_walker_env_t;
2398 * Walker. Collect all stack modifying nodes.
2400 static void collect_stack_nodes_walker(ir_node *node, void *data)
2402 ir_node *insn = node;
2403 fix_stack_walker_env_t *env = data;
2404 const arch_register_req_t *req;
2406 if (is_Proj(node)) {
2407 insn = get_Proj_pred(node);
2410 if (arch_irn_get_n_outs(insn) == 0)
2413 req = arch_get_register_req_out(node);
2414 if (! (req->type & arch_register_req_type_produces_sp))
2417 ARR_APP1(ir_node*, env->sp_nodes, node);
2420 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2422 be_ssa_construction_env_t senv;
2425 be_irg_t *birg = env->birg;
2426 be_lv_t *lv = be_get_birg_liveness(birg);
2427 fix_stack_walker_env_t walker_env;
2429 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2431 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2433 /* nothing to be done if we didn't find any node, in fact we mustn't
2434 * continue, as for endless loops incsp might have had no users and is bad
2437 len = ARR_LEN(walker_env.sp_nodes);
2439 DEL_ARR_F(walker_env.sp_nodes);
2443 be_ssa_construction_init(&senv, birg);
2444 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2445 ARR_LEN(walker_env.sp_nodes));
2446 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2447 ARR_LEN(walker_env.sp_nodes));
2450 len = ARR_LEN(walker_env.sp_nodes);
2451 for (i = 0; i < len; ++i) {
2452 be_liveness_update(lv, walker_env.sp_nodes[i]);
2454 be_ssa_construction_update_liveness_phis(&senv, lv);
2457 phis = be_ssa_construction_get_new_phis(&senv);
2459 /* set register requirements for stack phis */
2460 len = ARR_LEN(phis);
2461 for (i = 0; i < len; ++i) {
2462 ir_node *phi = phis[i];
2463 be_set_phi_reg_req(phi, env->sp_req);
2464 arch_set_irn_register(phi, env->arch_env->sp);
2466 be_ssa_construction_destroy(&senv);
2468 DEL_ARR_F(walker_env.sp_nodes);
2472 * Fix all stack accessing operations in the block bl.
2474 * @param env the abi environment
2475 * @param bl the block to process
2476 * @param real_bias the bias value
2478 * @return the bias at the end of this block
2480 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2482 int omit_fp = env->call->flags.bits.try_omit_fp;
2484 int wanted_bias = real_bias;
2486 sched_foreach(bl, irn) {
2490 Check, if the node relates to an entity on the stack frame.
2491 If so, set the true offset (including the bias) for that
2494 ir_entity *ent = arch_get_frame_entity(irn);
2496 int bias = omit_fp ? real_bias : 0;
2497 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2498 arch_set_frame_offset(irn, offset);
2499 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2500 ent, offset, bias));
2504 * If the node modifies the stack pointer by a constant offset,
2505 * record that in the bias.
2507 ofs = arch_get_sp_bias(irn);
2509 if (be_is_IncSP(irn)) {
2510 /* fill in real stack frame size */
2511 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2512 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2513 ofs = (int) get_type_size_bytes(frame_type);
2514 be_set_IncSP_offset(irn, ofs);
2515 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2516 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2517 ofs = - (int)get_type_size_bytes(frame_type);
2518 be_set_IncSP_offset(irn, ofs);
2520 if (be_get_IncSP_align(irn)) {
2521 /* patch IncSP to produce an aligned stack pointer */
2522 ir_type *between_type = env->frame.between_type;
2523 int between_size = get_type_size_bytes(between_type);
2524 int alignment = 1 << env->arch_env->stack_alignment;
2525 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2528 be_set_IncSP_offset(irn, ofs + alignment - delta);
2529 real_bias += alignment - delta;
2532 /* adjust so real_bias corresponds with wanted_bias */
2533 int delta = wanted_bias - real_bias;
2536 be_set_IncSP_offset(irn, ofs + delta);
2547 assert(real_bias == wanted_bias);
2552 * A helper struct for the bias walker.
2555 be_abi_irg_t *env; /**< The ABI irg environment. */
2556 int start_block_bias; /**< The bias at the end of the start block. */
2558 ir_node *start_block; /**< The start block of the current graph. */
2562 * Block-Walker: fix all stack offsets for all blocks
2563 * except the start block
2565 static void stack_bias_walker(ir_node *bl, void *data)
2567 struct bias_walk *bw = data;
2568 if (bl != bw->start_block) {
2569 process_stack_bias(bw->env, bl, bw->start_block_bias);
2574 * Walker: finally lower all Sels of outer frame or parameter
2577 static void lower_outer_frame_sels(ir_node *sel, void *ctx)
2579 be_abi_irg_t *env = ctx;
2587 ent = get_Sel_entity(sel);
2588 owner = get_entity_owner(ent);
2589 ptr = get_Sel_ptr(sel);
2591 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2592 /* found access to outer frame or arguments */
2593 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2596 ir_node *bl = get_nodes_block(sel);
2597 dbg_info *dbgi = get_irn_dbg_info(sel);
2598 ir_mode *mode = get_irn_mode(sel);
2599 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2600 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2602 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2608 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2610 ir_graph *irg = env->birg->irg;
2613 struct bias_walk bw;
2615 stack_frame_compute_initial_offset(&env->frame);
2616 // stack_layout_dump(stdout, frame);
2618 /* Determine the stack bias at the end of the start block. */
2619 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2620 bw.between_size = get_type_size_bytes(env->frame.between_type);
2622 /* fix the bias is all other blocks */
2624 bw.start_block = get_irg_start_block(irg);
2625 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2627 /* fix now inner functions: these still have Sel node to outer
2628 frame and parameter entities */
2629 frame_tp = get_irg_frame_type(irg);
2630 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2631 ir_entity *ent = get_class_member(frame_tp, i);
2632 ir_graph *irg = get_entity_irg(ent);
2635 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2640 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2642 assert(arch_register_type_is(reg, callee_save));
2643 assert(pmap_contains(abi->regs, (void *) reg));
2644 return pmap_get(abi->regs, (void *) reg);
2647 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2649 assert(arch_register_type_is(reg, ignore));
2650 assert(pmap_contains(abi->regs, (void *) reg));
2651 return pmap_get(abi->regs, (void *) reg);
2655 * Returns non-zero if the ABI has omitted the frame pointer in
2656 * the current graph.
2658 int be_abi_omit_fp(const be_abi_irg_t *abi)
2660 return abi->call->flags.bits.try_omit_fp;
2663 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
2664 void be_init_abi(void)
2666 FIRM_DBG_REGISTER(dbg, "firm.be.abi");