2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
32 #include "irgraph_t.h"
35 #include "iredges_t.h"
38 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 typedef struct _be_abi_call_arg_t {
62 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
64 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
65 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct _be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
80 ir_type *between_type;
82 const arch_register_class_t *cls_addr; /**< register class of the call address */
86 * The ABI information for the current graph.
88 struct _be_abi_irg_t {
89 survive_dce_t *dce_survivor;
91 be_abi_call_t *call; /**< The ABI call information. */
93 ir_node *init_sp; /**< The node representing the stack pointer
94 at the start of the function. */
96 ir_node *start; /**< The be_Start params node. */
97 pmap *regs; /**< A map of all callee-save and ignore regs to
98 their Projs to the RegParams node. */
100 int start_block_bias; /**< The stack bias at the end of the start block. */
102 void *cb; /**< ABI Callback self pointer. */
104 pmap *keep_map; /**< mapping blocks to keep nodes. */
105 pset *ignore_regs; /**< Additional registers which shall be ignored. */
107 ir_node **calls; /**< flexible array containing all be_Call nodes */
109 arch_register_req_t *sp_req;
112 static heights_t *ir_heights;
114 /** Flag: if set, try to omit the frame pointer in all routines. */
115 static int be_omit_fp = 1;
117 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
118 static int be_omit_leaf_fp = 1;
121 _ ____ ___ ____ _ _ _ _
122 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
123 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
124 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
125 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
127 These callbacks are used by the backend to set the parameters
128 for a specific call type.
132 * Set compare function: compares two ABI call object arguments.
134 static int cmp_call_arg(const void *a, const void *b, size_t n)
136 const be_abi_call_arg_t *p = a, *q = b;
138 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
142 * Get an ABI call object argument.
144 * @param call the abi call
145 * @param is_res true for call results, false for call arguments
146 * @param pos position of the argument
147 * @param callee context type - if we are callee or caller
149 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
151 be_abi_call_arg_t arg;
154 memset(&arg, 0, sizeof(arg));
159 hash = is_res * 128 + pos;
161 return set_find(call->params, &arg, sizeof(arg), hash);
165 * Set an ABI call object argument.
167 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
169 unsigned hash = arg->is_res * 128 + arg->pos;
170 if (context & ABI_CONTEXT_CALLEE) {
172 set_insert(call->params, arg, sizeof(*arg), hash);
174 if (context & ABI_CONTEXT_CALLER) {
176 set_insert(call->params, arg, sizeof(*arg), hash);
180 /* Set the flags for a call. */
181 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
187 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
188 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
194 /* Set register class for call address */
195 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
197 call->cls_addr = cls;
201 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
202 ir_mode *load_mode, unsigned alignment,
203 unsigned space_before, unsigned space_after,
204 be_abi_context_t context)
206 be_abi_call_arg_t arg;
207 memset(&arg, 0, sizeof(arg));
208 assert(alignment > 0 && "Alignment must be greater than 0");
210 arg.load_mode = load_mode;
211 arg.alignment = alignment;
212 arg.space_before = space_before;
213 arg.space_after = space_after;
217 remember_call_arg(&arg, call, context);
220 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
222 be_abi_call_arg_t arg;
223 memset(&arg, 0, sizeof(arg));
230 remember_call_arg(&arg, call, context);
233 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
235 be_abi_call_arg_t arg;
236 memset(&arg, 0, sizeof(arg));
243 remember_call_arg(&arg, call, context);
246 /* Get the flags of a ABI call object. */
247 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
253 * Constructor for a new ABI call object.
255 * @param cls_addr register class of the call address
257 * @return the new ABI call object
259 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
261 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
264 call->params = new_set(cmp_call_arg, 16);
266 call->cls_addr = cls_addr;
268 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
274 * Destructor for an ABI call object.
276 static void be_abi_call_free(be_abi_call_t *call)
278 del_set(call->params);
284 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
285 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
286 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
287 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
290 Handling of the stack frame. It is composed of three types:
291 1) The type of the arguments which are pushed on the stack.
292 2) The "between type" which consists of stuff the call of the
293 function pushes on the stack (like the return address and
294 the old base pointer for ia32).
295 3) The Firm frame type which consists of all local variables
299 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
302 ir_type *t = get_entity_owner(ent);
303 int ofs = get_entity_offset(ent);
307 /* Find the type the entity is contained in. */
308 for (index = 0; index < N_FRAME_TYPES; ++index) {
309 if (frame->order[index] == t)
311 /* Add the size of all the types below the one of the entity to the entity's offset */
312 ofs += get_type_size_bytes(frame->order[index]);
315 /* correct the offset by the initial position of the frame pointer */
316 ofs -= frame->initial_offset;
318 /* correct the offset with the current bias. */
325 * Retrieve the entity with given offset from a frame type.
327 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
331 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
332 ir_entity *ent = get_compound_member(t, i);
333 if (get_entity_offset(ent) == offset)
340 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
342 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
343 ir_entity *ent = search_ent_with_offset(base, 0);
346 frame->initial_offset
347 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
349 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
352 return frame->initial_offset;
356 * Initializes the frame layout from parts
358 * @param frame the stack layout that will be initialized
359 * @param args the stack argument layout type
360 * @param between the between layout type
361 * @param locals the method frame type
362 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
363 * @param param_map an array mapping method argument positions to the stack argument type
365 * @return the initialized stack layout
367 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
368 ir_type *between, ir_type *locals, int stack_dir,
369 ir_entity *param_map[])
371 frame->arg_type = args;
372 frame->between_type = between;
373 frame->frame_type = locals;
374 frame->initial_offset = 0;
375 frame->initial_bias = 0;
376 frame->stack_dir = stack_dir;
377 frame->order[1] = between;
378 frame->param_map = param_map;
381 frame->order[0] = args;
382 frame->order[2] = locals;
385 /* typical decreasing stack: locals have the
386 * lowest addresses, arguments the highest */
387 frame->order[0] = locals;
388 frame->order[2] = args;
400 Adjustment of the calls inside a graph.
405 * Transform a call node into a be_Call node.
407 * @param env The ABI environment for the current irg.
408 * @param irn The call node.
409 * @param curr_sp The stack pointer node to use.
410 * @return The stack pointer after the call.
412 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
414 ir_graph *irg = get_irn_irg(irn);
415 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
416 ir_type *call_tp = get_Call_type(irn);
417 ir_node *call_ptr = get_Call_ptr(irn);
418 int n_params = get_method_n_params(call_tp);
419 ir_node *curr_mem = get_Call_mem(irn);
420 ir_node *bl = get_nodes_block(irn);
422 int stack_dir = arch_env->stack_dir;
423 const arch_register_t *sp = arch_env->sp;
424 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
425 ir_mode *mach_mode = sp->reg_class->mode;
426 int no_alloc = call->flags.bits.frame_is_setup_on_call;
427 int n_res = get_method_n_ress(call_tp);
428 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
430 ir_node *res_proj = NULL;
431 int n_reg_params = 0;
432 int n_stack_params = 0;
435 pset_new_t destroyed_regs, states;
436 pset_new_iterator_t iter;
440 int n_reg_results = 0;
441 const arch_register_t *reg;
442 const ir_edge_t *edge;
444 int *stack_param_idx;
445 int i, n, destroy_all_regs;
448 pset_new_init(&destroyed_regs);
449 pset_new_init(&states);
451 /* Let the isa fill out the abi description for that call node. */
452 arch_env_get_call_abi(arch_env, call_tp, call);
454 /* Insert code to put the stack arguments on the stack. */
455 assert(get_Call_n_params(irn) == n_params);
456 stack_param_idx = ALLOCAN(int, n_params);
457 for (i = 0; i < n_params; ++i) {
458 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
461 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
463 stack_size += round_up2(arg->space_before, arg->alignment);
464 stack_size += round_up2(arg_size, arg->alignment);
465 stack_size += round_up2(arg->space_after, arg->alignment);
467 stack_param_idx[n_stack_params++] = i;
471 /* Collect all arguments which are passed in registers. */
472 reg_param_idxs = ALLOCAN(int, n_params);
473 for (i = 0; i < n_params; ++i) {
474 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0);
475 if (arg && arg->in_reg) {
476 reg_param_idxs[n_reg_params++] = i;
481 * If the stack is decreasing and we do not want to store sequentially,
482 * or someone else allocated the call frame
483 * we allocate as much space on the stack all parameters need, by
484 * moving the stack pointer along the stack's direction.
486 * Note: we also have to do this for stack_size == 0, because we may have
487 * to adjust stack alignment for the call.
489 if (stack_dir < 0 && !do_seq && !no_alloc) {
490 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
493 dbgi = get_irn_dbg_info(irn);
494 /* If there are some parameters which shall be passed on the stack. */
495 if (n_stack_params > 0) {
497 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
501 * Reverse list of stack parameters if call arguments are from left to right.
502 * We must them reverse again if they are pushed (not stored) and the stack
503 * direction is downwards.
505 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
506 for (i = 0; i < n_stack_params >> 1; ++i) {
507 int other = n_stack_params - i - 1;
508 int tmp = stack_param_idx[i];
509 stack_param_idx[i] = stack_param_idx[other];
510 stack_param_idx[other] = tmp;
514 curr_mem = get_Call_mem(irn);
516 in[n_in++] = curr_mem;
519 for (i = 0; i < n_stack_params; ++i) {
520 int p = stack_param_idx[i];
521 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
522 ir_node *param = get_Call_param(irn, p);
523 ir_node *addr = curr_sp;
525 ir_type *param_type = get_method_param_type(call_tp, p);
526 int param_size = get_type_size_bytes(param_type) + arg->space_after;
529 * If we wanted to build the arguments sequentially,
530 * the stack pointer for the next must be incremented,
531 * and the memory value propagated.
535 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp,
536 param_size + arg->space_before, 0);
537 add_irn_dep(curr_sp, curr_mem);
539 curr_ofs += arg->space_before;
540 curr_ofs = round_up2(curr_ofs, arg->alignment);
542 /* Make the expression to compute the argument's offset. */
544 ir_mode *constmode = mach_mode;
545 if (mode_is_reference(mach_mode)) {
548 addr = new_r_Const_long(irg, constmode, curr_ofs);
549 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
553 /* Insert a store for primitive arguments. */
554 if (is_atomic_type(param_type)) {
556 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
557 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
558 mem = new_r_Proj(store, mode_M, pn_Store_M);
560 /* Make a mem copy for compound arguments. */
563 assert(mode_is_reference(get_irn_mode(param)));
564 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
565 mem = new_r_Proj(copy, mode_M, pn_CopyB_M_regular);
568 curr_ofs += param_size;
576 /* We need the sync only, if we didn't build the stores sequentially. */
578 if (n_stack_params >= 1) {
579 curr_mem = new_r_Sync(bl, n_in, in);
581 curr_mem = get_Call_mem(irn);
586 /* check for the return_twice property */
587 destroy_all_regs = 0;
588 if (is_SymConst_addr_ent(call_ptr)) {
589 ir_entity *ent = get_SymConst_entity(call_ptr);
591 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
592 destroy_all_regs = 1;
594 ir_type *call_tp = get_Call_type(irn);
596 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
597 destroy_all_regs = 1;
600 /* Put caller save into the destroyed set and state registers in the states set */
601 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
603 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
604 for (j = 0; j < cls->n_regs; ++j) {
605 const arch_register_t *reg = arch_register_for_index(cls, j);
607 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
608 if (! arch_register_type_is(reg, ignore))
609 pset_new_insert(&destroyed_regs, (void *) reg);
611 if (arch_register_type_is(reg, state)) {
612 pset_new_insert(&destroyed_regs, (void*) reg);
613 pset_new_insert(&states, (void*) reg);
618 if (destroy_all_regs) {
619 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
620 pset_new_remove(&destroyed_regs, arch_env->sp);
621 pset_new_remove(&destroyed_regs, arch_env->bp);
624 /* search the largest result proj number */
625 res_projs = ALLOCANZ(ir_node*, n_res);
627 foreach_out_edge(irn, edge) {
628 const ir_edge_t *res_edge;
629 ir_node *irn = get_edge_src_irn(edge);
631 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
634 foreach_out_edge(irn, res_edge) {
636 ir_node *res = get_edge_src_irn(res_edge);
638 assert(is_Proj(res));
640 proj = get_Proj_proj(res);
641 assert(proj < n_res);
642 assert(res_projs[proj] == NULL);
643 res_projs[proj] = res;
649 /** TODO: this is not correct for cases where return values are passed
650 * on the stack, but no known ABI does this currently...
652 n_reg_results = n_res;
655 in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
657 /* make the back end call node and set its register requirements. */
658 for (i = 0; i < n_reg_params; ++i) {
659 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
662 /* add state registers ins */
663 foreach_pset_new(&states, reg, iter) {
664 const arch_register_class_t *cls = arch_register_get_class(reg);
666 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
667 ir_fprintf(stderr, "Adding %+F\n", regnode);
669 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
670 in[n_ins++] = regnode;
672 assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
674 /* ins collected, build the call */
675 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
677 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
678 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
679 n_ins, in, get_Call_type(irn));
680 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
683 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
684 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
685 n_ins, in, get_Call_type(irn));
687 be_Call_set_pop(low_call, call->pop);
689 /* put the call into the list of all calls for later processing */
690 ARR_APP1(ir_node *, env->calls, low_call);
692 /* create new stack pointer */
693 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
694 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
695 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
696 arch_set_irn_register(curr_sp, sp);
698 /* now handle results */
699 for (i = 0; i < n_res; ++i) {
701 ir_node *proj = res_projs[i];
702 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
704 /* returns values on stack not supported yet */
708 shift the proj number to the right, since we will drop the
709 unspeakable Proj_T from the Call. Therefore, all real argument
710 Proj numbers must be increased by pn_be_Call_first_res
712 pn = i + pn_be_Call_first_res;
715 ir_type *res_type = get_method_res_type(call_tp, i);
716 ir_mode *mode = get_type_mode(res_type);
717 proj = new_r_Proj(low_call, mode, pn);
720 set_Proj_pred(proj, low_call);
721 set_Proj_proj(proj, pn);
725 pset_new_remove(&destroyed_regs, arg->reg);
730 Set the register class of the call address to
731 the backend provided class (default: stack pointer class)
733 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
735 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
737 /* Set the register classes and constraints of the Call parameters. */
738 for (i = 0; i < n_reg_params; ++i) {
739 int index = reg_param_idxs[i];
740 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
741 assert(arg->reg != NULL);
743 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
747 /* Set the register constraints of the results. */
748 for (i = 0; i < n_res; ++i) {
749 ir_node *proj = res_projs[i];
750 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
751 int pn = get_Proj_proj(proj);
754 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
755 arch_set_irn_register(proj, arg->reg);
757 exchange(irn, low_call);
759 /* kill the ProjT node */
760 if (res_proj != NULL) {
764 /* Make additional projs for the caller save registers
765 and the Keep node which keeps them alive. */
767 const arch_register_t *reg;
771 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
772 pset_new_iterator_t iter;
775 n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
776 in = ALLOCAN(ir_node *, n_ins);
778 /* also keep the stack pointer */
779 set_irn_link(curr_sp, (void*) sp);
782 foreach_pset_new(&destroyed_regs, reg, iter) {
783 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
785 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
786 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
787 arch_set_irn_register(proj, reg);
789 set_irn_link(proj, (void*) reg);
794 for (i = 0; i < n_reg_results; ++i) {
795 ir_node *proj = res_projs[i];
796 const arch_register_t *reg = arch_get_irn_register(proj);
797 set_irn_link(proj, (void*) reg);
802 /* create the Keep for the caller save registers */
803 keep = be_new_Keep(bl, n, in);
804 for (i = 0; i < n; ++i) {
805 const arch_register_t *reg = get_irn_link(in[i]);
806 be_node_set_reg_class_in(keep, i, reg->reg_class);
810 /* Clean up the stack. */
811 assert(stack_size >= call->pop);
812 stack_size -= call->pop;
814 if (stack_size > 0) {
815 ir_node *mem_proj = NULL;
817 foreach_out_edge(low_call, edge) {
818 ir_node *irn = get_edge_src_irn(edge);
819 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
826 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
827 keep_alive(mem_proj);
830 /* Clean up the stack frame or revert alignment fixes if we allocated it */
832 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
835 be_abi_call_free(call);
837 pset_new_destroy(&states);
838 pset_new_destroy(&destroyed_regs);
844 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
846 * @param alignment the minimum stack alignment
847 * @param size the node containing the non-aligned size
848 * @param block the block where new nodes are allocated on
849 * @param dbg debug info for new nodes
851 * @return a node representing the aligned size
853 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
854 ir_node *block, dbg_info *dbg)
856 if (stack_alignment > 1) {
862 assert(is_po2(stack_alignment));
864 mode = get_irn_mode(size);
865 tv = new_tarval_from_long(stack_alignment-1, mode);
866 irg = get_Block_irg(block);
867 mask = new_r_Const(irg, tv);
868 size = new_rd_Add(dbg, block, size, mask, mode);
870 tv = new_tarval_from_long(-(long)stack_alignment, mode);
871 mask = new_r_Const(irg, tv);
872 size = new_rd_And(dbg, block, size, mask, mode);
878 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
880 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
882 ir_node *block = get_nodes_block(alloc);
883 ir_graph *irg = get_Block_irg(block);
884 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
885 ir_node *alloc_mem = NULL;
886 ir_node *alloc_res = NULL;
887 ir_type *type = get_Alloc_type(alloc);
890 const ir_edge_t *edge;
895 unsigned stack_alignment;
897 /* all non-stack Alloc nodes should already be lowered before the backend */
898 assert(get_Alloc_where(alloc) == stack_alloc);
900 foreach_out_edge(alloc, edge) {
901 ir_node *irn = get_edge_src_irn(edge);
903 assert(is_Proj(irn));
904 switch (get_Proj_proj(irn)) {
916 /* Beware: currently Alloc nodes without a result might happen,
917 only escape analysis kills them and this phase runs only for object
918 oriented source. We kill the Alloc here. */
919 if (alloc_res == NULL && alloc_mem) {
920 exchange(alloc_mem, get_Alloc_mem(alloc));
924 dbg = get_irn_dbg_info(alloc);
925 count = get_Alloc_count(alloc);
927 /* we might need to multiply the count with the element size */
928 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
929 ir_mode *mode = get_irn_mode(count);
930 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
932 ir_node *cnst = new_rd_Const(dbg, irg, tv);
933 size = new_rd_Mul(dbg, block, count, cnst, mode);
938 /* The stack pointer will be modified in an unknown manner.
939 We cannot omit it. */
940 env->call->flags.bits.try_omit_fp = 0;
942 stack_alignment = 1 << arch_env->stack_alignment;
943 size = adjust_alloc_size(stack_alignment, size, block, dbg);
944 new_alloc = be_new_AddSP(arch_env->sp, block, curr_sp, size);
945 set_irn_dbg_info(new_alloc, dbg);
947 if (alloc_mem != NULL) {
951 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
953 /* We need to sync the output mem of the AddSP with the input mem
954 edge into the alloc node. */
955 ins[0] = get_Alloc_mem(alloc);
957 sync = new_r_Sync(block, 2, ins);
959 exchange(alloc_mem, sync);
962 exchange(alloc, new_alloc);
964 /* fix projnum of alloca res */
965 set_Proj_proj(alloc_res, pn_be_AddSP_res);
967 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
974 * The Free is transformed into a back end free node and connected to the stack nodes.
976 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
978 ir_node *block = get_nodes_block(free);
979 ir_graph *irg = get_irn_irg(free);
980 ir_type *type = get_Free_type(free);
981 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
982 ir_mode *sp_mode = arch_env->sp->reg_class->mode;
983 dbg_info *dbg = get_irn_dbg_info(free);
984 ir_node *subsp, *mem, *res, *size, *sync;
986 unsigned stack_alignment;
988 /* all non-stack-alloc Free nodes should already be lowered before the
990 assert(get_Free_where(free) == stack_alloc);
992 /* we might need to multiply the size with the element size */
993 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
994 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
995 ir_node *cnst = new_rd_Const(dbg, irg, tv);
996 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1000 size = get_Free_size(free);
1003 stack_alignment = 1 << arch_env->stack_alignment;
1004 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1006 /* The stack pointer will be modified in an unknown manner.
1007 We cannot omit it. */
1008 env->call->flags.bits.try_omit_fp = 0;
1009 subsp = be_new_SubSP(arch_env->sp, block, curr_sp, size);
1010 set_irn_dbg_info(subsp, dbg);
1012 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
1013 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
1015 /* we need to sync the memory */
1016 in[0] = get_Free_mem(free);
1018 sync = new_r_Sync(block, 2, in);
1020 /* and make the AddSP dependent on the former memory */
1021 add_irn_dep(subsp, get_Free_mem(free));
1024 exchange(free, sync);
1031 * Check if a node is somehow data dependent on another one.
1032 * both nodes must be in the same basic block.
1033 * @param n1 The first node.
1034 * @param n2 The second node.
1035 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1037 static int dependent_on(ir_node *n1, ir_node *n2)
1039 assert(get_nodes_block(n1) == get_nodes_block(n2));
1041 return heights_reachable_in_block(ir_heights, n1, n2);
1044 static int cmp_call_dependency(const void *c1, const void *c2)
1046 ir_node *n1 = *(ir_node **) c1;
1047 ir_node *n2 = *(ir_node **) c2;
1050 Classical qsort() comparison function behavior:
1051 0 if both elements are equal
1052 1 if second is "smaller" that first
1053 -1 if first is "smaller" that second
1055 if (dependent_on(n1, n2))
1058 if (dependent_on(n2, n1))
1061 /* The nodes have no depth order, but we need a total order because qsort()
1063 return get_irn_idx(n1) - get_irn_idx(n2);
1067 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1068 * Clears the irg_is_leaf flag if a Call is detected.
1070 static void link_ops_in_block_walker(ir_node *irn, void *data)
1072 be_abi_irg_t *env = data;
1073 ir_opcode code = get_irn_opcode(irn);
1075 if (code == iro_Call ||
1076 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1077 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1078 ir_node *bl = get_nodes_block(irn);
1079 void *save = get_irn_link(bl);
1081 if (code == iro_Call)
1082 env->call->flags.bits.irg_is_leaf = 0;
1084 set_irn_link(irn, save);
1085 set_irn_link(bl, irn);
1088 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1089 ir_node *param = get_Builtin_param(irn, 0);
1090 tarval *tv = get_Const_tarval(param);
1091 unsigned long value = get_tarval_long(tv);
1092 /* use ebp, so the climbframe algo works... */
1094 env->call->flags.bits.try_omit_fp = 0;
1101 * Process all Call/Alloc/Free nodes inside a basic block.
1102 * Note that the link field of the block must contain a linked list of all
1103 * Call nodes inside the Block. We first order this list according to data dependency
1104 * and that connect the calls together.
1106 static void process_ops_in_block(ir_node *bl, void *data)
1108 be_abi_irg_t *env = data;
1109 ir_node *curr_sp = env->init_sp;
1116 for (irn = get_irn_link(bl); irn != NULL; irn = get_irn_link(irn)) {
1120 nodes = ALLOCAN(ir_node*, n_nodes);
1121 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n) {
1125 /* If there were call nodes in the block. */
1130 /* order the call nodes according to data dependency */
1131 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1133 for (i = n_nodes - 1; i >= 0; --i) {
1134 ir_node *irn = nodes[i];
1136 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1137 switch (get_irn_opcode(irn)) {
1140 /* The stack pointer will be modified due to a call. */
1141 env->call->flags.bits.try_omit_fp = 0;
1143 curr_sp = adjust_call(env, irn, curr_sp);
1146 if (get_Alloc_where(irn) == stack_alloc)
1147 curr_sp = adjust_alloc(env, irn, curr_sp);
1150 if (get_Free_where(irn) == stack_alloc)
1151 curr_sp = adjust_free(env, irn, curr_sp);
1154 panic("invalid call");
1158 /* Keep the last stack state in the block by tying it to Keep node,
1159 * the proj from calls is already kept */
1160 if (curr_sp != env->init_sp &&
1161 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1163 keep = be_new_Keep(bl, 1, nodes);
1164 pmap_insert(env->keep_map, bl, keep);
1168 set_irn_link(bl, curr_sp);
1172 * Adjust all call nodes in the graph to the ABI conventions.
1174 static void process_calls(ir_graph *irg)
1176 be_abi_irg_t *abi = be_get_irg_abi(irg);
1178 abi->call->flags.bits.irg_is_leaf = 1;
1179 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi);
1181 ir_heights = heights_new(irg);
1182 irg_block_walk_graph(irg, NULL, process_ops_in_block, abi);
1183 heights_free(ir_heights);
1187 * Computes the stack argument layout type.
1188 * Changes a possibly allocated value param type by moving
1189 * entities to the stack layout type.
1191 * @param env the ABI environment
1192 * @param call the current call ABI
1193 * @param method_type the method type
1194 * @param val_param_tp the value parameter type, will be destroyed
1195 * @param param_map an array mapping method arguments to the stack layout type
1197 * @return the stack argument layout type
1199 static ir_type *compute_arg_type(be_abi_irg_t *env, ir_graph *irg,
1200 be_abi_call_t *call,
1201 ir_type *method_type, ir_type *val_param_tp,
1202 ir_entity ***param_map)
1204 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1205 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1206 int inc = arch_env->stack_dir * dir;
1207 int n = get_method_n_params(method_type);
1208 int curr = inc > 0 ? 0 : n - 1;
1209 struct obstack *obst = be_get_be_obst(irg);
1215 ident *id = get_entity_ident(get_irg_entity(irg));
1218 *param_map = map = OALLOCN(obst, ir_entity*, n);
1219 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1220 for (i = 0; i < n; ++i, curr += inc) {
1221 ir_type *param_type = get_method_param_type(method_type, curr);
1222 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr, 1);
1225 if (arg->on_stack) {
1226 if (val_param_tp != NULL) {
1227 /* the entity was already created, create a copy in the param type */
1228 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1229 arg->stack_ent = copy_entity_own(val_ent, res);
1230 set_entity_link(val_ent, arg->stack_ent);
1231 set_entity_link(arg->stack_ent, NULL);
1233 /* create a new entity */
1234 snprintf(buf, sizeof(buf), "param_%d", i);
1235 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1237 ofs += arg->space_before;
1238 ofs = round_up2(ofs, arg->alignment);
1239 set_entity_offset(arg->stack_ent, ofs);
1240 ofs += arg->space_after;
1241 ofs += get_type_size_bytes(param_type);
1242 map[i] = arg->stack_ent;
1245 set_type_size_bytes(res, ofs);
1246 set_type_state(res, layout_fixed);
1251 const arch_register_t *reg;
1255 static int cmp_regs(const void *a, const void *b)
1257 const reg_node_map_t *p = a;
1258 const reg_node_map_t *q = b;
1260 if (p->reg->reg_class == q->reg->reg_class)
1261 return p->reg->index - q->reg->index;
1263 return p->reg->reg_class - q->reg->reg_class;
1266 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1269 int n = pmap_count(reg_map);
1272 foreach_pmap(reg_map, ent) {
1273 res[i].reg = ent->key;
1274 res[i].irn = ent->value;
1278 qsort(res, n, sizeof(res[0]), cmp_regs);
1282 * Creates a barrier.
1284 static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs,
1287 int n_regs = pmap_count(regs);
1293 in = ALLOCAN(ir_node*, n_regs+1);
1294 rm = ALLOCAN(reg_node_map_t, n_regs);
1295 reg_map_to_arr(rm, regs);
1296 for (n = 0; n < n_regs; ++n) {
1304 irn = be_new_Barrier(bl, n, in);
1306 for (n = 0; n < n_regs; ++n) {
1307 ir_node *pred = rm[n].irn;
1308 const arch_register_t *reg = rm[n].reg;
1309 arch_register_type_t add_type = 0;
1311 const backend_info_t *info;
1313 /* stupid workaround for now... as not all nodes report register
1315 info = be_get_info(skip_Proj(pred));
1316 if (info != NULL && info->out_infos != NULL) {
1317 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1318 if (ireq->type & arch_register_req_type_ignore)
1319 add_type |= arch_register_req_type_ignore;
1320 if (ireq->type & arch_register_req_type_produces_sp)
1321 add_type |= arch_register_req_type_produces_sp;
1324 proj = new_r_Proj(irn, get_irn_mode(pred), n);
1325 be_node_set_reg_class_in(irn, n, reg->reg_class);
1327 be_set_constr_single_reg_in(irn, n, reg, 0);
1328 be_set_constr_single_reg_out(irn, n, reg, add_type);
1329 arch_set_irn_register(proj, reg);
1331 pmap_insert(regs, (void *) reg, proj);
1335 *mem = new_r_Proj(irn, mode_M, n);
1342 * Creates a be_Return for a Return node.
1344 * @param @env the abi environment
1345 * @param irn the Return node or NULL if there was none
1346 * @param bl the block where the be_Retun should be placed
1347 * @param mem the current memory
1348 * @param n_res number of return results
1350 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1351 ir_node *mem, int n_res)
1353 be_abi_call_t *call = env->call;
1354 ir_graph *irg = get_Block_irg(bl);
1355 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1357 pmap *reg_map = pmap_create();
1358 ir_node *keep = pmap_get(env->keep_map, bl);
1365 const arch_register_t **regs;
1369 get the valid stack node in this block.
1370 If we had a call in that block there is a Keep constructed by process_calls()
1371 which points to the last stack modification in that block. we'll use
1372 it then. Else we use the stack from the start block and let
1373 the ssa construction fix the usage.
1375 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1377 stack = get_irn_n(keep, 0);
1379 remove_End_keepalive(get_irg_end(irg), keep);
1382 /* Insert results for Return into the register map. */
1383 for (i = 0; i < n_res; ++i) {
1384 ir_node *res = get_Return_res(irn, i);
1385 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1386 assert(arg->in_reg && "return value must be passed in register");
1387 pmap_insert(reg_map, (void *) arg->reg, res);
1390 /* Add uses of the callee save registers. */
1391 foreach_pmap(env->regs, ent) {
1392 const arch_register_t *reg = ent->key;
1393 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1394 pmap_insert(reg_map, ent->key, ent->value);
1397 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1399 /* Make the Epilogue node and call the arch's epilogue maker. */
1400 create_barrier(bl, &mem, reg_map, 1);
1401 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1404 Maximum size of the in array for Return nodes is
1405 return args + callee save/ignore registers + memory + stack pointer
1407 in_max = pmap_count(reg_map) + n_res + 2;
1409 in = ALLOCAN(ir_node*, in_max);
1410 regs = ALLOCAN(arch_register_t const*, in_max);
1413 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1415 regs[1] = arch_env->sp;
1418 /* clear SP entry, since it has already been grown. */
1419 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1420 for (i = 0; i < n_res; ++i) {
1421 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1423 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1424 regs[n++] = arg->reg;
1426 /* Clear the map entry to mark the register as processed. */
1427 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1430 /* grow the rest of the stuff. */
1431 foreach_pmap(reg_map, ent) {
1434 regs[n++] = ent->key;
1438 /* The in array for the new back end return is now ready. */
1440 dbgi = get_irn_dbg_info(irn);
1444 /* we have to pop the shadow parameter in in case of struct returns */
1446 ret = be_new_Return(dbgi, irg, bl, n_res, pop, n, in);
1448 /* Set the register classes of the return's parameter accordingly. */
1449 for (i = 0; i < n; ++i) {
1450 if (regs[i] == NULL)
1453 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1456 /* Free the space of the Epilog's in array and the register <-> proj map. */
1457 pmap_destroy(reg_map);
1462 typedef struct ent_pos_pair ent_pos_pair;
1463 struct ent_pos_pair {
1464 ir_entity *ent; /**< a value param entity */
1465 int pos; /**< its parameter number */
1466 ent_pos_pair *next; /**< for linking */
1469 typedef struct lower_frame_sels_env_t {
1470 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1471 ir_node *frame; /**< the current frame */
1472 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1473 const arch_register_class_t *link_class; /**< register class of the link pointer */
1474 ir_type *value_tp; /**< the value type if any */
1475 ir_type *frame_tp; /**< the frame type */
1476 int static_link_pos; /**< argument number of the hidden static link */
1477 } lower_frame_sels_env_t;
1480 * Return an entity from the backend for an value param entity.
1482 * @param ent an value param type entity
1483 * @param ctx context
1485 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1487 ir_entity *argument_ent = get_entity_link(ent);
1489 if (argument_ent == NULL) {
1490 /* we have NO argument entity yet: This is bad, as we will
1491 * need one for backing store.
1494 ir_type *frame_tp = ctx->frame_tp;
1495 unsigned offset = get_type_size_bytes(frame_tp);
1496 ir_type *tp = get_entity_type(ent);
1497 unsigned align = get_type_alignment_bytes(tp);
1499 offset += align - 1;
1500 offset &= ~(align - 1);
1502 argument_ent = copy_entity_own(ent, frame_tp);
1504 /* must be automatic to set a fixed layout */
1505 set_entity_offset(argument_ent, offset);
1506 offset += get_type_size_bytes(tp);
1508 set_type_size_bytes(frame_tp, offset);
1509 set_entity_link(ent, argument_ent);
1511 return argument_ent;
1514 * Walker: Replaces Sels of frame type and
1515 * value param type entities by FrameAddress.
1516 * Links all used entities.
1518 static void lower_frame_sels_walker(ir_node *irn, void *data)
1520 lower_frame_sels_env_t *ctx = data;
1523 ir_node *ptr = get_Sel_ptr(irn);
1525 if (ptr == ctx->frame) {
1526 ir_entity *ent = get_Sel_entity(irn);
1527 ir_node *bl = get_nodes_block(irn);
1530 int is_value_param = 0;
1532 if (get_entity_owner(ent) == ctx->value_tp) {
1535 /* replace by its copy from the argument type */
1536 pos = get_struct_member_index(ctx->value_tp, ent);
1537 ent = get_argument_entity(ent, ctx);
1540 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1543 /* check, if it's a param Sel and if have not seen this entity before */
1544 if (is_value_param && get_entity_link(ent) == NULL) {
1550 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1552 set_entity_link(ent, ctx->value_param_list);
1559 * Check if a value parameter is transmitted as a register.
1560 * This might happen if the address of an parameter is taken which is
1561 * transmitted in registers.
1563 * Note that on some architectures this case must be handled specially
1564 * because the place of the backing store is determined by their ABI.
1566 * In the default case we move the entity to the frame type and create
1567 * a backing store into the first block.
1569 static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_graph *irg,
1570 ent_pos_pair *value_param_list)
1572 be_abi_call_t *call = env->call;
1573 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1574 ent_pos_pair *entry, *new_list;
1576 int i, n = ARR_LEN(value_param_list);
1579 for (i = 0; i < n; ++i) {
1580 int pos = value_param_list[i].pos;
1581 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos, 1);
1584 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1585 value_param_list[i].next = new_list;
1586 new_list = &value_param_list[i];
1589 if (new_list != NULL) {
1590 /* ok, change the graph */
1591 ir_node *start_bl = get_irg_start_block(irg);
1592 ir_node *first_bl = get_first_block_succ(start_bl);
1593 ir_node *frame, *imem, *nmem, *store, *mem, *args;
1594 optimization_state_t state;
1597 assert(first_bl && first_bl != start_bl);
1598 /* we had already removed critical edges, so the following
1599 assertion should be always true. */
1600 assert(get_Block_n_cfgpreds(first_bl) == 1);
1602 /* now create backing stores */
1603 frame = get_irg_frame(irg);
1604 imem = get_irg_initial_mem(irg);
1606 save_optimization_state(&state);
1608 nmem = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
1609 restore_optimization_state(&state);
1611 /* reroute all edges to the new memory source */
1612 edges_reroute(imem, nmem, irg);
1616 args = get_irg_args(irg);
1617 for (entry = new_list; entry != NULL; entry = entry->next) {
1619 ir_type *tp = get_entity_type(entry->ent);
1620 ir_mode *mode = get_type_mode(tp);
1623 /* address for the backing store */
1624 addr = be_new_FrameAddr(arch_env->sp->reg_class, first_bl, frame, entry->ent);
1627 mem = new_r_Proj(store, mode_M, pn_Store_M);
1629 /* the backing store itself */
1630 store = new_r_Store(first_bl, mem, addr,
1631 new_r_Proj(args, mode, i), 0);
1633 /* the new memory Proj gets the last Proj from store */
1634 set_Proj_pred(nmem, store);
1635 set_Proj_proj(nmem, pn_Store_M);
1637 /* move all entities to the frame type */
1638 frame_tp = get_irg_frame_type(irg);
1639 offset = get_type_size_bytes(frame_tp);
1641 /* we will add new entities: set the layout to undefined */
1642 assert(get_type_state(frame_tp) == layout_fixed);
1643 set_type_state(frame_tp, layout_undefined);
1644 for (entry = new_list; entry != NULL; entry = entry->next) {
1645 ir_entity *ent = entry->ent;
1647 /* If the entity is still on the argument type, move it to the
1649 * This happens if the value_param type was build due to compound
1651 if (get_entity_owner(ent) != frame_tp) {
1652 ir_type *tp = get_entity_type(ent);
1653 unsigned align = get_type_alignment_bytes(tp);
1655 offset += align - 1;
1656 offset &= ~(align - 1);
1657 set_entity_owner(ent, frame_tp);
1658 /* must be automatic to set a fixed layout */
1659 set_entity_offset(ent, offset);
1660 offset += get_type_size_bytes(tp);
1663 set_type_size_bytes(frame_tp, offset);
1664 /* fix the layout again */
1665 set_type_state(frame_tp, layout_fixed);
1670 * The start block has no jump, instead it has an initial exec Proj.
1671 * The backend wants to handle all blocks the same way, so we replace
1672 * the out cfg edge with a real jump.
1674 static void fix_start_block(ir_graph *irg)
1676 ir_node *initial_X = get_irg_initial_exec(irg);
1677 ir_node *start_block = get_irg_start_block(irg);
1678 const ir_edge_t *edge;
1680 assert(is_Proj(initial_X));
1682 foreach_out_edge(initial_X, edge) {
1683 ir_node *block = get_edge_src_irn(edge);
1685 if (is_Anchor(block))
1687 if (block != start_block) {
1688 ir_node *jmp = new_r_Jmp(start_block);
1689 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1690 set_irg_initial_exec(irg, jmp);
1694 panic("Initial exec has no follow block in %+F", irg);
1698 * Update the entity of Sels to the outer value parameters.
1700 static void update_outer_frame_sels(ir_node *irn, void *env)
1702 lower_frame_sels_env_t *ctx = env;
1709 ptr = get_Sel_ptr(irn);
1710 if (! is_arg_Proj(ptr))
1712 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1714 ent = get_Sel_entity(irn);
1716 if (get_entity_owner(ent) == ctx->value_tp) {
1717 /* replace by its copy from the argument type */
1718 pos = get_struct_member_index(ctx->value_tp, ent);
1719 ent = get_argument_entity(ent, ctx);
1720 set_Sel_entity(irn, ent);
1722 /* check, if we have not seen this entity before */
1723 if (get_entity_link(ent) == NULL) {
1729 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1731 set_entity_link(ent, ctx->value_param_list);
1737 * Fix access to outer local variables.
1739 static void fix_outer_variable_access(be_abi_irg_t *env,
1740 lower_frame_sels_env_t *ctx)
1746 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1747 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1749 if (! is_method_entity(ent))
1752 irg = get_entity_irg(ent);
1757 * FIXME: find the number of the static link parameter
1758 * for now we assume 0 here
1760 ctx->static_link_pos = 0;
1762 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1767 * Modify the irg itself and the frame type.
1769 static void modify_irg(ir_graph *irg)
1771 be_abi_irg_t *env = be_get_irg_abi(irg);
1772 be_abi_call_t *call = env->call;
1773 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1774 const arch_register_t *sp = arch_env->sp;
1775 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1776 struct obstack *obst = be_get_be_obst(irg);
1777 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1780 ir_node *new_mem_proj;
1786 unsigned frame_size;
1789 const arch_register_t *fp_reg;
1790 ir_node *frame_pointer;
1794 const ir_edge_t *edge;
1795 ir_type *arg_type, *bet_type, *tp;
1796 lower_frame_sels_env_t ctx;
1797 ir_entity **param_map;
1799 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1801 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1802 * memory, which leads to loops in the DAG. */
1803 old_mem = get_irg_initial_mem(irg);
1805 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1807 /* set the links of all frame entities to NULL, we use it
1808 to detect if an entity is already linked in the value_param_list */
1809 tp = get_method_value_param_type(method_type);
1812 /* clear the links of the clone type, let the
1813 original entities point to its clones */
1814 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1815 ir_entity *mem = get_struct_member(tp, i);
1816 set_entity_link(mem, NULL);
1820 arg_type = compute_arg_type(env, irg, call, method_type, tp, ¶m_map);
1822 /* Convert the Sel nodes in the irg to frame addr nodes: */
1823 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1824 ctx.frame = get_irg_frame(irg);
1825 ctx.sp_class = arch_env->sp->reg_class;
1826 ctx.link_class = arch_env->link_class;
1827 ctx.frame_tp = get_irg_frame_type(irg);
1829 /* layout the stackframe now */
1830 if (get_type_state(ctx.frame_tp) == layout_undefined) {
1831 default_layout_compound_type(ctx.frame_tp);
1834 /* we will possible add new entities to the frame: set the layout to undefined */
1835 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1836 set_type_state(ctx.frame_tp, layout_undefined);
1838 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1840 /* fix the frame type layout again */
1841 set_type_state(ctx.frame_tp, layout_fixed);
1842 /* align stackframe to 4 byte */
1843 frame_size = get_type_size_bytes(ctx.frame_tp);
1844 if (frame_size % 4 != 0) {
1845 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1848 env->regs = pmap_create();
1850 n_params = get_method_n_params(method_type);
1851 args = OALLOCNZ(obst, ir_node*, n_params);
1854 * for inner function we must now fix access to outer frame entities.
1856 fix_outer_variable_access(env, &ctx);
1858 /* Check if a value parameter is transmitted as a register.
1859 * This might happen if the address of an parameter is taken which is
1860 * transmitted in registers.
1862 * Note that on some architectures this case must be handled specially
1863 * because the place of the backing store is determined by their ABI.
1865 * In the default case we move the entity to the frame type and create
1866 * a backing store into the first block.
1868 fix_address_of_parameter_access(env, irg, ctx.value_param_list);
1870 DEL_ARR_F(ctx.value_param_list);
1871 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1873 /* Fill the argument vector */
1874 arg_tuple = get_irg_args(irg);
1875 foreach_out_edge(arg_tuple, edge) {
1876 ir_node *irn = get_edge_src_irn(edge);
1877 if (! is_Anchor(irn)) {
1878 int nr = get_Proj_proj(irn);
1880 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1884 bet_type = call->cb->get_between_type(env->cb);
1885 stack_frame_init(stack_layout, arg_type, bet_type,
1886 get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1888 /* Count the register params and add them to the number of Projs for the RegParams node */
1889 for (i = 0; i < n_params; ++i) {
1890 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1891 if (arg->in_reg && args[i]) {
1892 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1893 assert(i == get_Proj_proj(args[i]));
1895 /* For now, associate the register with the old Proj from Start representing that argument. */
1896 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1897 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1901 /* Collect all callee-save registers */
1902 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1903 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1904 for (j = 0; j < cls->n_regs; ++j) {
1905 const arch_register_t *reg = &cls->regs[j];
1906 if (arch_register_type_is(reg, callee_save) ||
1907 arch_register_type_is(reg, state)) {
1908 pmap_insert(env->regs, (void *) reg, NULL);
1913 /* handle start block here (place a jump in the block) */
1914 fix_start_block(irg);
1916 pmap_insert(env->regs, (void *) sp, NULL);
1917 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1918 start_bl = get_irg_start_block(irg);
1919 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1922 * make proj nodes for the callee save registers.
1923 * memorize them, since Return nodes get those as inputs.
1925 * Note, that if a register corresponds to an argument, the regs map contains
1926 * the old Proj from start for that argument.
1929 rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1930 reg_map_to_arr(rm, env->regs);
1931 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1932 arch_register_t *reg = (void *) rm[i].reg;
1933 ir_mode *mode = reg->reg_class->mode;
1935 arch_register_req_type_t add_type = 0;
1939 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1942 proj = new_r_Proj(env->start, mode, nr + 1);
1943 pmap_insert(env->regs, (void *) reg, proj);
1944 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1945 arch_set_irn_register(proj, reg);
1947 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1950 /* create a new initial memory proj */
1951 assert(is_Proj(old_mem));
1952 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1953 new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1955 set_irg_initial_mem(irg, mem);
1957 /* Generate the Prologue */
1958 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &stack_layout->initial_bias);
1960 /* do the stack allocation BEFORE the barrier, or spill code
1961 might be added before it */
1962 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1963 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1964 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1966 create_barrier(start_bl, &mem, env->regs, 0);
1968 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1969 arch_set_irn_register(env->init_sp, sp);
1971 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1972 set_irg_frame(irg, frame_pointer);
1973 pset_insert_ptr(env->ignore_regs, fp_reg);
1975 /* rewire old mem users to new mem */
1976 exchange(old_mem, mem);
1978 /* keep the mem (for functions with an endless loop = no return) */
1981 set_irg_initial_mem(irg, mem);
1983 /* Now, introduce stack param nodes for all parameters passed on the stack */
1984 for (i = 0; i < n_params; ++i) {
1985 ir_node *arg_proj = args[i];
1986 ir_node *repl = NULL;
1988 if (arg_proj != NULL) {
1989 be_abi_call_arg_t *arg;
1990 ir_type *param_type;
1991 int nr = get_Proj_proj(arg_proj);
1994 nr = MIN(nr, n_params);
1995 arg = get_call_arg(call, 0, nr, 1);
1996 param_type = get_method_param_type(method_type, nr);
1999 repl = pmap_get(env->regs, (void *) arg->reg);
2000 } else if (arg->on_stack) {
2001 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2003 /* For atomic parameters which are actually used, we create a Load node. */
2004 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2005 ir_mode *mode = get_type_mode(param_type);
2006 ir_mode *load_mode = arg->load_mode;
2008 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2009 repl = new_r_Proj(load, load_mode, pn_Load_res);
2011 if (mode != load_mode) {
2012 repl = new_r_Conv(start_bl, repl, mode);
2015 /* The stack parameter is not primitive (it is a struct or array),
2016 * we thus will create a node representing the parameter's address
2022 assert(repl != NULL);
2024 /* Beware: the mode of the register parameters is always the mode of the register class
2025 which may be wrong. Add Conv's then. */
2026 mode = get_irn_mode(args[i]);
2027 if (mode != get_irn_mode(repl)) {
2028 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2030 exchange(args[i], repl);
2034 /* the arg proj is not needed anymore now and should be only used by the anchor */
2035 assert(get_irn_n_edges(arg_tuple) == 1);
2036 kill_node(arg_tuple);
2037 set_irg_args(irg, new_r_Bad(irg));
2039 /* All Return nodes hang on the End node, so look for them there. */
2040 end = get_irg_end_block(irg);
2041 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2042 ir_node *irn = get_Block_cfgpred(end, i);
2044 if (is_Return(irn)) {
2045 ir_node *blk = get_nodes_block(irn);
2046 ir_node *mem = get_Return_mem(irn);
2047 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2052 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2053 the code is dead and will never be executed. */
2056 /** Fix the state inputs of calls that still hang on unknowns */
2057 static void fix_call_state_inputs(ir_graph *irg)
2059 be_abi_irg_t *env = be_get_irg_abi(irg);
2060 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2062 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2064 /* Collect caller save registers */
2065 n = arch_env_get_n_reg_class(arch_env);
2066 for (i = 0; i < n; ++i) {
2068 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2069 for (j = 0; j < cls->n_regs; ++j) {
2070 const arch_register_t *reg = arch_register_for_index(cls, j);
2071 if (arch_register_type_is(reg, state)) {
2072 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2077 n = ARR_LEN(env->calls);
2078 n_states = ARR_LEN(stateregs);
2079 for (i = 0; i < n; ++i) {
2081 ir_node *call = env->calls[i];
2083 arity = get_irn_arity(call);
2085 /* the state reg inputs are the last n inputs of the calls */
2086 for (s = 0; s < n_states; ++s) {
2087 int inp = arity - n_states + s;
2088 const arch_register_t *reg = stateregs[s];
2089 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2091 set_irn_n(call, inp, regnode);
2095 DEL_ARR_F(stateregs);
2099 * Create a trampoline entity for the given method.
2101 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2103 ir_type *type = get_entity_type(method);
2104 ident *old_id = get_entity_ld_ident(method);
2105 ident *id = id_mangle3("", old_id, "$stub");
2106 ir_type *parent = be->pic_trampolines_type;
2107 ir_entity *ent = new_entity(parent, old_id, type);
2108 set_entity_ld_ident(ent, id);
2109 set_entity_visibility(ent, ir_visibility_private);
2115 * Returns the trampoline entity for the given method.
2117 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2119 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2120 if (result == NULL) {
2121 result = create_trampoline(env, method);
2122 pmap_insert(env->ent_trampoline_map, method, result);
2128 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2130 ident *old_id = get_entity_ld_ident(entity);
2131 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
2132 ir_type *e_type = get_entity_type(entity);
2133 ir_type *type = new_type_pointer(e_type);
2134 ir_type *parent = be->pic_symbols_type;
2135 ir_entity *ent = new_entity(parent, old_id, type);
2136 set_entity_ld_ident(ent, id);
2137 set_entity_visibility(ent, ir_visibility_private);
2142 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2144 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2145 if (result == NULL) {
2146 result = create_pic_symbol(env, entity);
2147 pmap_insert(env->ent_pic_symbol_map, entity, result);
2156 * Returns non-zero if a given entity can be accessed using a relative address.
2158 static int can_address_relative(ir_entity *entity)
2160 return get_entity_visibility(entity) != ir_visibility_external
2161 && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
2164 /** patches SymConsts to work in position independent code */
2165 static void fix_pic_symconsts(ir_node *node, void *data)
2173 ir_graph *irg = get_irn_irg(node);
2175 be_main_env_t *be = be_get_irg_main_env(irg);
2178 arity = get_irn_arity(node);
2179 for (i = 0; i < arity; ++i) {
2181 ir_node *pred = get_irn_n(node, i);
2183 ir_entity *pic_symbol;
2184 ir_node *pic_symconst;
2186 if (!is_SymConst(pred))
2189 entity = get_SymConst_entity(pred);
2190 block = get_nodes_block(pred);
2192 /* calls can jump to relative addresses, so we can directly jump to
2193 the (relatively) known call address or the trampoline */
2194 if (i == 1 && is_Call(node)) {
2195 ir_entity *trampoline;
2196 ir_node *trampoline_const;
2198 if (can_address_relative(entity))
2201 dbgi = get_irn_dbg_info(pred);
2202 trampoline = get_trampoline(be, entity);
2203 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2205 set_irn_n(node, i, trampoline_const);
2209 /* everything else is accessed relative to EIP */
2210 mode = get_irn_mode(pred);
2211 pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(irg));
2213 /* all ok now for locally constructed stuff */
2214 if (can_address_relative(entity)) {
2215 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2217 /* make sure the walker doesn't visit this add again */
2218 mark_irn_visited(add);
2219 set_irn_n(node, i, add);
2223 /* get entry from pic symbol segment */
2224 dbgi = get_irn_dbg_info(pred);
2225 pic_symbol = get_pic_symbol(be, entity);
2226 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2228 add = new_r_Add(block, pic_base, pic_symconst, mode);
2229 mark_irn_visited(add);
2231 /* we need an extra indirection for global data outside our current
2232 module. The loads are always safe and can therefore float
2233 and need no memory input */
2234 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2235 load_res = new_r_Proj(load, mode, pn_Load_res);
2237 set_irn_n(node, i, load_res);
2241 be_abi_irg_t *be_abi_introduce(ir_graph *irg)
2243 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2244 ir_node *old_frame = get_irg_frame(irg);
2245 struct obstack *obst = be_get_be_obst(irg);
2246 be_options_t *options = be_get_irg_options(irg);
2247 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2248 ir_entity *entity = get_irg_entity(irg);
2249 ir_type *method_type = get_entity_type(entity);
2253 unsigned *limited_bitset;
2254 arch_register_req_t *sp_req;
2256 be_omit_fp = options->omit_fp;
2257 be_omit_leaf_fp = options->omit_leaf_fp;
2261 env->call = be_abi_call_new(arch_env->sp->reg_class);
2262 arch_env_get_call_abi(arch_env, method_type, env->call);
2264 env->ignore_regs = pset_new_ptr_default();
2265 env->keep_map = pmap_create();
2266 env->dce_survivor = new_survive_dce();
2268 sp_req = OALLOCZ(obst, arch_register_req_t);
2269 env->sp_req = sp_req;
2271 sp_req->type = arch_register_req_type_limited
2272 | arch_register_req_type_produces_sp;
2273 sp_req->cls = arch_register_get_class(arch_env->sp);
2275 limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
2276 rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
2277 sp_req->limited = limited_bitset;
2278 if (arch_env->sp->type & arch_register_type_ignore) {
2279 sp_req->type |= arch_register_req_type_ignore;
2282 env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
2284 env->calls = NEW_ARR_F(ir_node*, 0);
2285 be_set_irg_abi(irg, env);
2288 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2291 /* Lower all call nodes in the IRG. */
2295 Beware: init backend abi call object after processing calls,
2296 otherwise some information might be not yet available.
2298 env->cb = env->call->cb->init(env->call, arch_env, irg);
2300 /* Process the IRG */
2303 /* fix call inputs for state registers */
2304 fix_call_state_inputs(irg);
2306 /* We don't need the keep map anymore. */
2307 pmap_destroy(env->keep_map);
2308 env->keep_map = NULL;
2310 /* calls array is not needed anymore */
2311 DEL_ARR_F(env->calls);
2314 /* reroute the stack origin of the calls to the true stack origin. */
2315 exchange(dummy, env->init_sp);
2316 exchange(old_frame, get_irg_frame(irg));
2318 /* Make some important node pointers survive the dead node elimination. */
2319 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2320 foreach_pmap(env->regs, ent) {
2321 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2324 env->call->cb->done(env->cb);
2329 void be_abi_free(ir_graph *irg)
2331 be_abi_irg_t *env = be_get_irg_abi(irg);
2333 be_abi_call_free(env->call);
2334 free_survive_dce(env->dce_survivor);
2335 del_pset(env->ignore_regs);
2336 pmap_destroy(env->regs);
2339 be_set_irg_abi(irg, NULL);
2342 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2344 arch_register_t *reg;
2346 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2347 if (reg->reg_class == cls)
2348 bitset_set(bs, reg->index);
2351 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2354 arch_register_t *reg;
2356 for (i = 0; i < cls->n_regs; ++i) {
2357 if (arch_register_type_is(&cls->regs[i], ignore))
2360 rbitset_set(raw_bitset, i);
2363 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2364 reg = pset_next(abi->ignore_regs)) {
2365 if (reg->reg_class != cls)
2368 rbitset_clear(raw_bitset, reg->index);
2374 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2375 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2376 | _| | |> < ___) | || (_| | (__| <
2377 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2381 typedef ir_node **node_array;
2383 typedef struct fix_stack_walker_env_t {
2384 node_array sp_nodes;
2385 } fix_stack_walker_env_t;
2388 * Walker. Collect all stack modifying nodes.
2390 static void collect_stack_nodes_walker(ir_node *node, void *data)
2392 ir_node *insn = node;
2393 fix_stack_walker_env_t *env = data;
2394 const arch_register_req_t *req;
2396 if (is_Proj(node)) {
2397 insn = get_Proj_pred(node);
2400 if (arch_irn_get_n_outs(insn) == 0)
2402 if (get_irn_mode(node) == mode_T)
2405 req = arch_get_register_req_out(node);
2406 if (! (req->type & arch_register_req_type_produces_sp))
2409 ARR_APP1(ir_node*, env->sp_nodes, node);
2412 void be_abi_fix_stack_nodes(ir_graph *irg)
2414 be_abi_irg_t *abi = be_get_irg_abi(irg);
2415 be_lv_t *lv = be_get_irg_liveness(irg);
2416 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2417 be_ssa_construction_env_t senv;
2420 fix_stack_walker_env_t walker_env;
2422 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2424 irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
2426 /* nothing to be done if we didn't find any node, in fact we mustn't
2427 * continue, as for endless loops incsp might have had no users and is bad
2430 len = ARR_LEN(walker_env.sp_nodes);
2432 DEL_ARR_F(walker_env.sp_nodes);
2436 be_ssa_construction_init(&senv, irg);
2437 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2438 ARR_LEN(walker_env.sp_nodes));
2439 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2440 ARR_LEN(walker_env.sp_nodes));
2443 len = ARR_LEN(walker_env.sp_nodes);
2444 for (i = 0; i < len; ++i) {
2445 be_liveness_update(lv, walker_env.sp_nodes[i]);
2447 be_ssa_construction_update_liveness_phis(&senv, lv);
2450 phis = be_ssa_construction_get_new_phis(&senv);
2452 /* set register requirements for stack phis */
2453 len = ARR_LEN(phis);
2454 for (i = 0; i < len; ++i) {
2455 ir_node *phi = phis[i];
2456 be_set_phi_reg_req(phi, abi->sp_req);
2457 arch_set_irn_register(phi, arch_env->sp);
2459 be_ssa_construction_destroy(&senv);
2461 DEL_ARR_F(walker_env.sp_nodes);
2465 * Fix all stack accessing operations in the block bl.
2467 * @param env the abi environment
2468 * @param bl the block to process
2469 * @param real_bias the bias value
2471 * @return the bias at the end of this block
2473 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2475 int omit_fp = env->call->flags.bits.try_omit_fp;
2476 int wanted_bias = real_bias;
2477 ir_graph *irg = get_Block_irg(bl);
2478 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
2479 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2482 sched_foreach(bl, irn) {
2486 Check, if the node relates to an entity on the stack frame.
2487 If so, set the true offset (including the bias) for that
2490 ir_entity *ent = arch_get_frame_entity(irn);
2492 int bias = omit_fp ? real_bias : 0;
2493 int offset = get_stack_entity_offset(layout, ent, bias);
2494 arch_set_frame_offset(irn, offset);
2495 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2496 ent, offset, bias));
2500 * If the node modifies the stack pointer by a constant offset,
2501 * record that in the bias.
2503 ofs = arch_get_sp_bias(irn);
2505 if (be_is_IncSP(irn)) {
2506 /* fill in real stack frame size */
2507 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2508 ir_type *frame_type = get_irg_frame_type(irg);
2509 ofs = (int) get_type_size_bytes(frame_type);
2510 be_set_IncSP_offset(irn, ofs);
2511 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2512 ir_type *frame_type = get_irg_frame_type(irg);
2513 ofs = - (int)get_type_size_bytes(frame_type);
2514 be_set_IncSP_offset(irn, ofs);
2516 if (be_get_IncSP_align(irn)) {
2517 /* patch IncSP to produce an aligned stack pointer */
2518 ir_type *between_type = layout->between_type;
2519 int between_size = get_type_size_bytes(between_type);
2520 int alignment = 1 << arch_env->stack_alignment;
2521 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2524 be_set_IncSP_offset(irn, ofs + alignment - delta);
2525 real_bias += alignment - delta;
2528 /* adjust so real_bias corresponds with wanted_bias */
2529 int delta = wanted_bias - real_bias;
2532 be_set_IncSP_offset(irn, ofs + delta);
2543 assert(real_bias == wanted_bias);
2548 * A helper struct for the bias walker.
2551 be_abi_irg_t *env; /**< The ABI irg environment. */
2552 int start_block_bias; /**< The bias at the end of the start block. */
2554 ir_node *start_block; /**< The start block of the current graph. */
2558 * Block-Walker: fix all stack offsets for all blocks
2559 * except the start block
2561 static void stack_bias_walker(ir_node *bl, void *data)
2563 struct bias_walk *bw = data;
2564 if (bl != bw->start_block) {
2565 process_stack_bias(bw->env, bl, bw->start_block_bias);
2570 * Walker: finally lower all Sels of outer frame or parameter
2573 static void lower_outer_frame_sels(ir_node *sel, void *ctx)
2578 be_stack_layout_t *layout;
2585 ent = get_Sel_entity(sel);
2586 owner = get_entity_owner(ent);
2587 ptr = get_Sel_ptr(sel);
2588 irg = get_irn_irg(sel);
2589 layout = be_get_irg_stack_layout(irg);
2591 if (owner == layout->frame_type || owner == layout->arg_type) {
2592 /* found access to outer frame or arguments */
2593 int offset = get_stack_entity_offset(layout, ent, 0);
2596 ir_node *bl = get_nodes_block(sel);
2597 dbg_info *dbgi = get_irn_dbg_info(sel);
2598 ir_mode *mode = get_irn_mode(sel);
2599 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2600 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2602 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2608 void be_abi_fix_stack_bias(ir_graph *irg)
2610 be_abi_irg_t *env = be_get_irg_abi(irg);
2611 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
2614 struct bias_walk bw;
2616 stack_frame_compute_initial_offset(stack_layout);
2617 // stack_layout_dump(stdout, stack_layout);
2619 /* Determine the stack bias at the end of the start block. */
2620 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg),
2621 stack_layout->initial_bias);
2622 bw.between_size = get_type_size_bytes(stack_layout->between_type);
2624 /* fix the bias is all other blocks */
2626 bw.start_block = get_irg_start_block(irg);
2627 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2629 /* fix now inner functions: these still have Sel node to outer
2630 frame and parameter entities */
2631 frame_tp = get_irg_frame_type(irg);
2632 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2633 ir_entity *ent = get_class_member(frame_tp, i);
2634 ir_graph *irg = get_entity_irg(ent);
2637 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2642 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2644 assert(arch_register_type_is(reg, callee_save));
2645 assert(pmap_contains(abi->regs, (void *) reg));
2646 return pmap_get(abi->regs, (void *) reg);
2649 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2651 assert(arch_register_type_is(reg, ignore));
2652 assert(pmap_contains(abi->regs, (void *) reg));
2653 return pmap_get(abi->regs, (void *) reg);
2657 * Returns non-zero if the ABI has omitted the frame pointer in
2658 * the current graph.
2660 int be_abi_omit_fp(const be_abi_irg_t *abi)
2662 return abi->call->flags.bits.try_omit_fp;
2665 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);
2666 void be_init_abi(void)
2668 FIRM_DBG_REGISTER(dbg, "firm.be.abi");