2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
33 #include "irgraph_t.h"
36 #include "iredges_t.h"
39 #include "irprintf_t.h"
45 #include "raw_bitset.h"
56 #include "bessaconstr.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 typedef struct _be_abi_call_arg_t {
62 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
64 unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
67 const arch_register_t *reg;
70 unsigned alignment; /**< stack alignment */
71 unsigned space_before; /**< allocate space before */
72 unsigned space_after; /**< allocate space after */
75 struct _be_abi_call_t {
76 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
77 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
78 const be_abi_callbacks_t *cb;
79 ir_type *between_type;
81 const arch_register_class_t *cls_addr; /**< register class of the call address */
85 * The ABI information for the current birg.
87 struct _be_abi_irg_t {
89 be_irg_t *birg; /**< The back end IRG. */
90 const arch_env_t *arch_env;
91 survive_dce_t *dce_survivor;
93 be_abi_call_t *call; /**< The ABI call information. */
94 ir_type *method_type; /**< The type of the method of the IRG. */
96 ir_node *init_sp; /**< The node representing the stack pointer
97 at the start of the function. */
99 ir_node *start; /**< The be_Start params node. */
100 pmap *regs; /**< A map of all callee-save and ignore regs to
101 their Projs to the RegParams node. */
103 int start_block_bias; /**< The stack bias at the end of the start block. */
105 void *cb; /**< ABI Callback self pointer. */
107 pmap *keep_map; /**< mapping blocks to keep nodes. */
108 pset *ignore_regs; /**< Additional registers which shall be ignored. */
110 ir_node **calls; /**< flexible array containing all be_Call nodes */
112 arch_register_req_t *sp_req;
114 be_stack_layout_t frame; /**< The stack frame model. */
117 static heights_t *ir_heights;
119 /** Flag: if set, try to omit the frame pointer in all routines. */
120 static int be_omit_fp = 1;
122 /** Flag: if set, try to omit the frame pointer in leaf routines only. */
123 static int be_omit_leaf_fp = 1;
126 _ ____ ___ ____ _ _ _ _
127 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
128 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
129 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
130 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
132 These callbacks are used by the backend to set the parameters
133 for a specific call type.
137 * Set compare function: compares two ABI call object arguments.
139 static int cmp_call_arg(const void *a, const void *b, size_t n)
141 const be_abi_call_arg_t *p = a, *q = b;
143 return !(p->is_res == q->is_res && p->pos == q->pos);
147 * Get an ABI call object argument.
149 * @param call the abi call
150 * @param is_res true for call results, false for call arguments
151 * @param pos position of the argument
153 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
155 be_abi_call_arg_t arg;
158 memset(&arg, 0, sizeof(arg));
162 hash = is_res * 128 + pos;
164 return set_find(call->params, &arg, sizeof(arg), hash);
168 * Set an ABI call object argument.
170 * @param call the abi call
171 * @param is_res true for call results, false for call arguments
172 * @param pos position of the argument
174 static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
176 be_abi_call_arg_t arg;
179 memset(&arg, 0, sizeof(arg));
183 hash = is_res * 128 + pos;
185 return set_insert(call->params, &arg, sizeof(arg), hash);
188 /* Set the flags for a call. */
189 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
195 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
196 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
202 /* Set register class for call address */
203 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
205 call->cls_addr = cls;
209 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
211 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
213 arg->load_mode = load_mode;
214 arg->alignment = alignment;
215 arg->space_before = space_before;
216 arg->space_after = space_after;
217 assert(alignment > 0 && "Alignment must be greater than 0");
220 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
222 be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
227 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
229 be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
234 /* Get the flags of a ABI call object. */
235 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
241 * Constructor for a new ABI call object.
243 * @param cls_addr register class of the call address
245 * @return the new ABI call object
247 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
249 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
252 call->params = new_set(cmp_call_arg, 16);
254 call->cls_addr = cls_addr;
256 call->flags.bits.try_omit_fp = be_omit_fp | be_omit_leaf_fp;
262 * Destructor for an ABI call object.
264 static void be_abi_call_free(be_abi_call_t *call)
266 del_set(call->params);
272 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
273 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
274 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
275 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
278 Handling of the stack frame. It is composed of three types:
279 1) The type of the arguments which are pushed on the stack.
280 2) The "between type" which consists of stuff the call of the
281 function pushes on the stack (like the return address and
282 the old base pointer for ia32).
283 3) The Firm frame type which consists of all local variables
287 static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
290 ir_type *t = get_entity_owner(ent);
291 int ofs = get_entity_offset(ent);
295 /* Find the type the entity is contained in. */
296 for (index = 0; index < N_FRAME_TYPES; ++index) {
297 if (frame->order[index] == t)
299 /* Add the size of all the types below the one of the entity to the entity's offset */
300 ofs += get_type_size_bytes(frame->order[index]);
303 /* correct the offset by the initial position of the frame pointer */
304 ofs -= frame->initial_offset;
306 /* correct the offset with the current bias. */
313 * Retrieve the entity with given offset from a frame type.
315 static ir_entity *search_ent_with_offset(ir_type *t, int offset)
319 for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
320 ir_entity *ent = get_compound_member(t, i);
321 if (get_entity_offset(ent) == offset)
328 static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
330 ir_type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
331 ir_entity *ent = search_ent_with_offset(base, 0);
334 frame->initial_offset
335 = frame->stack_dir < 0 ? get_type_size_bytes(frame->frame_type) : get_type_size_bytes(frame->between_type);
337 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
340 return frame->initial_offset;
344 * Initializes the frame layout from parts
346 * @param frame the stack layout that will be initialized
347 * @param args the stack argument layout type
348 * @param between the between layout type
349 * @param locals the method frame type
350 * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses
351 * @param param_map an array mapping method argument positions to the stack argument type
353 * @return the initialized stack layout
355 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
356 ir_type *between, ir_type *locals, int stack_dir,
357 ir_entity *param_map[])
359 frame->arg_type = args;
360 frame->between_type = between;
361 frame->frame_type = locals;
362 frame->initial_offset = 0;
363 frame->initial_bias = 0;
364 frame->stack_dir = stack_dir;
365 frame->order[1] = between;
366 frame->param_map = param_map;
369 frame->order[0] = args;
370 frame->order[2] = locals;
373 /* typical decreasing stack: locals have the
374 * lowest addresses, arguments the highest */
375 frame->order[0] = locals;
376 frame->order[2] = args;
382 * Returns non-zero if the call argument at given position
383 * is transfered on the stack.
385 static inline int is_on_stack(be_abi_call_t *call, int pos)
387 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
388 return arg && !arg->in_reg;
398 Adjustment of the calls inside a graph.
403 * Transform a call node into a be_Call node.
405 * @param env The ABI environment for the current irg.
406 * @param irn The call node.
407 * @param curr_sp The stack pointer node to use.
408 * @return The stack pointer after the call.
410 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
412 ir_graph *irg = env->birg->irg;
413 const arch_env_t *arch_env = env->birg->main_env->arch_env;
414 ir_type *call_tp = get_Call_type(irn);
415 ir_node *call_ptr = get_Call_ptr(irn);
416 int n_params = get_method_n_params(call_tp);
417 ir_node *curr_mem = get_Call_mem(irn);
418 ir_node *bl = get_nodes_block(irn);
420 int stack_dir = arch_env->stack_dir;
421 const arch_register_t *sp = arch_env->sp;
422 be_abi_call_t *call = be_abi_call_new(sp->reg_class);
423 ir_mode *mach_mode = sp->reg_class->mode;
424 struct obstack *obst = &env->obst;
425 int no_alloc = call->flags.bits.frame_is_setup_on_call;
426 int n_res = get_method_n_ress(call_tp);
427 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
429 ir_node *res_proj = NULL;
430 int n_reg_params = 0;
431 int n_stack_params = 0;
434 pset_new_t destroyed_regs, states;
435 pset_new_iterator_t iter;
439 int n_reg_results = 0;
440 const arch_register_t *reg;
441 const ir_edge_t *edge;
443 int *stack_param_idx;
444 int i, n, destroy_all_regs;
447 pset_new_init(&destroyed_regs);
448 pset_new_init(&states);
450 /* Let the isa fill out the abi description for that call node. */
451 arch_env_get_call_abi(arch_env, call_tp, call);
453 /* Insert code to put the stack arguments on the stack. */
454 assert(get_Call_n_params(irn) == n_params);
455 for (i = 0; i < n_params; ++i) {
456 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
459 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i));
461 stack_size += round_up2(arg->space_before, arg->alignment);
462 stack_size += round_up2(arg_size, arg->alignment);
463 stack_size += round_up2(arg->space_after, arg->alignment);
464 obstack_int_grow(obst, i);
468 stack_param_idx = obstack_finish(obst);
470 /* Collect all arguments which are passed in registers. */
471 for (i = 0; i < n_params; ++i) {
472 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
473 if (arg && arg->in_reg) {
474 obstack_int_grow(obst, i);
478 reg_param_idxs = obstack_finish(obst);
481 * If the stack is decreasing and we do not want to store sequentially,
482 * or someone else allocated the call frame
483 * we allocate as much space on the stack all parameters need, by
484 * moving the stack pointer along the stack's direction.
486 * Note: we also have to do this for stack_size == 0, because we may have
487 * to adjust stack alignment for the call.
489 if (stack_dir < 0 && !do_seq && !no_alloc) {
490 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
493 dbgi = get_irn_dbg_info(irn);
494 /* If there are some parameters which shall be passed on the stack. */
495 if (n_stack_params > 0) {
499 * Reverse list of stack parameters if call arguments are from left to right.
500 * We must them reverse again if they are pushed (not stored) and the stack
501 * direction is downwards.
503 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
504 for (i = 0; i < n_stack_params >> 1; ++i) {
505 int other = n_stack_params - i - 1;
506 int tmp = stack_param_idx[i];
507 stack_param_idx[i] = stack_param_idx[other];
508 stack_param_idx[other] = tmp;
512 curr_mem = get_Call_mem(irn);
514 obstack_ptr_grow(obst, curr_mem);
517 for (i = 0; i < n_stack_params; ++i) {
518 int p = stack_param_idx[i];
519 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
520 ir_node *param = get_Call_param(irn, p);
521 ir_node *addr = curr_sp;
523 ir_type *param_type = get_method_param_type(call_tp, p);
524 int param_size = get_type_size_bytes(param_type) + arg->space_after;
527 * If we wanted to build the arguments sequentially,
528 * the stack pointer for the next must be incremented,
529 * and the memory value propagated.
533 addr = curr_sp = be_new_IncSP(sp, bl, curr_sp, param_size + arg->space_before, 0);
534 add_irn_dep(curr_sp, curr_mem);
537 curr_ofs += arg->space_before;
538 curr_ofs = round_up2(curr_ofs, arg->alignment);
540 /* Make the expression to compute the argument's offset. */
542 ir_mode *constmode = mach_mode;
543 if (mode_is_reference(mach_mode)) {
546 addr = new_r_Const_long(irg, constmode, curr_ofs);
547 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
551 /* Insert a store for primitive arguments. */
552 if (is_atomic_type(param_type)) {
554 ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
555 store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
556 mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
559 /* Make a mem copy for compound arguments. */
563 assert(mode_is_reference(get_irn_mode(param)));
564 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
565 mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
568 curr_ofs += param_size;
573 obstack_ptr_grow(obst, mem);
576 in = (ir_node **) obstack_finish(obst);
578 /* We need the sync only, if we didn't build the stores sequentially. */
580 if (n_stack_params >= 1) {
581 curr_mem = new_r_Sync(bl, n_stack_params + 1, in);
583 curr_mem = get_Call_mem(irn);
586 obstack_free(obst, in);
589 /* check for the return_twice property */
590 destroy_all_regs = 0;
591 if (is_SymConst_addr_ent(call_ptr)) {
592 ir_entity *ent = get_SymConst_entity(call_ptr);
594 if (get_entity_additional_properties(ent) & mtp_property_returns_twice)
595 destroy_all_regs = 1;
597 ir_type *call_tp = get_Call_type(irn);
599 if (get_method_additional_properties(call_tp) & mtp_property_returns_twice)
600 destroy_all_regs = 1;
603 /* Put caller save into the destroyed set and state registers in the states set */
604 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
606 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
607 for (j = 0; j < cls->n_regs; ++j) {
608 const arch_register_t *reg = arch_register_for_index(cls, j);
610 if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
611 if (! arch_register_type_is(reg, ignore))
612 pset_new_insert(&destroyed_regs, (void *) reg);
614 if (arch_register_type_is(reg, state)) {
615 pset_new_insert(&destroyed_regs, (void*) reg);
616 pset_new_insert(&states, (void*) reg);
621 if (destroy_all_regs) {
622 /* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
623 pset_new_remove(&destroyed_regs, arch_env->sp);
624 pset_new_remove(&destroyed_regs, arch_env->bp);
627 /* search the largest result proj number */
628 res_projs = ALLOCANZ(ir_node*, n_res);
630 foreach_out_edge(irn, edge) {
631 const ir_edge_t *res_edge;
632 ir_node *irn = get_edge_src_irn(edge);
634 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
637 foreach_out_edge(irn, res_edge) {
639 ir_node *res = get_edge_src_irn(res_edge);
641 assert(is_Proj(res));
643 proj = get_Proj_proj(res);
644 assert(proj < n_res);
645 assert(res_projs[proj] == NULL);
646 res_projs[proj] = res;
652 /** TODO: this is not correct for cases where return values are passed
653 * on the stack, but no known ABI does this currently...
655 n_reg_results = n_res;
657 /* make the back end call node and set its register requirements. */
658 for (i = 0; i < n_reg_params; ++i) {
659 obstack_ptr_grow(obst, get_Call_param(irn, reg_param_idxs[i]));
662 /* add state registers ins */
663 foreach_pset_new(&states, reg, iter) {
664 const arch_register_class_t *cls = arch_register_get_class(reg);
666 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
667 ir_fprintf(stderr, "Adding %+F\n", regnode);
669 ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
670 obstack_ptr_grow(obst, regnode);
672 n_ins = n_reg_params + pset_new_size(&states);
674 in = obstack_finish(obst);
676 /* ins collected, build the call */
677 if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
679 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
680 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
681 n_ins, in, get_Call_type(irn));
682 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
685 low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
686 n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
687 n_ins, in, get_Call_type(irn));
689 be_Call_set_pop(low_call, call->pop);
691 /* put the call into the list of all calls for later processing */
692 ARR_APP1(ir_node *, env->calls, low_call);
694 /* create new stack pointer */
695 curr_sp = new_r_Proj(bl, low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
696 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
697 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
698 arch_set_irn_register(curr_sp, sp);
700 /* now handle results */
701 for (i = 0; i < n_res; ++i) {
703 ir_node *proj = res_projs[i];
704 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
706 /* returns values on stack not supported yet */
710 shift the proj number to the right, since we will drop the
711 unspeakable Proj_T from the Call. Therefore, all real argument
712 Proj numbers must be increased by pn_be_Call_first_res
714 pn = i + pn_be_Call_first_res;
717 ir_type *res_type = get_method_res_type(call_tp, i);
718 ir_mode *mode = get_type_mode(res_type);
719 proj = new_r_Proj(bl, low_call, mode, pn);
722 set_Proj_pred(proj, low_call);
723 set_Proj_proj(proj, pn);
727 pset_new_remove(&destroyed_regs, arg->reg);
732 Set the register class of the call address to
733 the backend provided class (default: stack pointer class)
735 be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
737 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
739 /* Set the register classes and constraints of the Call parameters. */
740 for (i = 0; i < n_reg_params; ++i) {
741 int index = reg_param_idxs[i];
742 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
743 assert(arg->reg != NULL);
745 be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
749 /* Set the register constraints of the results. */
750 for (i = 0; i < n_res; ++i) {
751 ir_node *proj = res_projs[i];
752 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
753 int pn = get_Proj_proj(proj);
756 be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
757 arch_set_irn_register(proj, arg->reg);
759 obstack_free(obst, in);
760 exchange(irn, low_call);
762 /* kill the ProjT node */
763 if (res_proj != NULL) {
767 /* Make additional projs for the caller save registers
768 and the Keep node which keeps them alive. */
770 const arch_register_t *reg;
774 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
775 pset_new_iterator_t iter;
777 /* also keep the stack pointer */
779 set_irn_link(curr_sp, (void*) sp);
780 obstack_ptr_grow(obst, curr_sp);
782 foreach_pset_new(&destroyed_regs, reg, iter) {
783 ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj);
785 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
786 be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
787 arch_set_irn_register(proj, reg);
789 set_irn_link(proj, (void*) reg);
790 obstack_ptr_grow(obst, proj);
795 for (i = 0; i < n_reg_results; ++i) {
796 ir_node *proj = res_projs[i];
797 const arch_register_t *reg = arch_get_irn_register(proj);
798 set_irn_link(proj, (void*) reg);
799 obstack_ptr_grow(obst, proj);
803 /* create the Keep for the caller save registers */
804 in = (ir_node **) obstack_finish(obst);
805 keep = be_new_Keep(bl, n, in);
806 for (i = 0; i < n; ++i) {
807 const arch_register_t *reg = get_irn_link(in[i]);
808 be_node_set_reg_class_in(keep, i, reg->reg_class);
810 obstack_free(obst, in);
813 /* Clean up the stack. */
814 assert(stack_size >= call->pop);
815 stack_size -= call->pop;
817 if (stack_size > 0) {
818 ir_node *mem_proj = NULL;
820 foreach_out_edge(low_call, edge) {
821 ir_node *irn = get_edge_src_irn(edge);
822 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
829 mem_proj = new_r_Proj(bl, low_call, mode_M, pn_be_Call_M_regular);
830 keep_alive(mem_proj);
833 /* Clean up the stack frame or revert alignment fixes if we allocated it */
835 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
838 be_abi_call_free(call);
839 obstack_free(obst, stack_param_idx);
841 pset_new_destroy(&states);
842 pset_new_destroy(&destroyed_regs);
848 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
850 * @param alignment the minimum stack alignment
851 * @param size the node containing the non-aligned size
852 * @param block the block where new nodes are allocated on
853 * @param dbg debug info for new nodes
855 * @return a node representing the aligned size
857 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
858 ir_node *block, dbg_info *dbg)
860 if (stack_alignment > 1) {
866 assert(is_po2(stack_alignment));
868 mode = get_irn_mode(size);
869 tv = new_tarval_from_long(stack_alignment-1, mode);
870 irg = get_Block_irg(block);
871 mask = new_r_Const(irg, tv);
872 size = new_rd_Add(dbg, block, size, mask, mode);
874 tv = new_tarval_from_long(-(long)stack_alignment, mode);
875 mask = new_r_Const(irg, tv);
876 size = new_rd_And(dbg, block, size, mask, mode);
882 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
884 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
893 const ir_edge_t *edge;
894 ir_node *new_alloc, *size, *addr, *ins[2];
895 unsigned stack_alignment;
897 assert(get_Alloc_where(alloc) == stack_alloc);
899 block = get_nodes_block(alloc);
900 irg = get_Block_irg(block);
903 type = get_Alloc_type(alloc);
905 foreach_out_edge(alloc, edge) {
906 ir_node *irn = get_edge_src_irn(edge);
908 assert(is_Proj(irn));
909 switch (get_Proj_proj(irn)) {
921 /* Beware: currently Alloc nodes without a result might happen,
922 only escape analysis kills them and this phase runs only for object
923 oriented source. We kill the Alloc here. */
924 if (alloc_res == NULL && alloc_mem) {
925 exchange(alloc_mem, get_Alloc_mem(alloc));
929 dbg = get_irn_dbg_info(alloc);
930 size = get_Alloc_size(alloc);
932 /* we might need to multiply the size with the element size */
933 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
934 ir_mode *mode = get_irn_mode(size);
935 tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
937 ir_node *cnst = new_rd_Const(dbg, irg, tv);
938 size = new_rd_Mul(dbg, block, size, cnst, mode);
941 /* The stack pointer will be modified in an unknown manner.
942 We cannot omit it. */
943 env->call->flags.bits.try_omit_fp = 0;
945 stack_alignment = 1 << env->arch_env->stack_alignment;
946 size = adjust_alloc_size(stack_alignment, size, block, dbg);
947 new_alloc = be_new_AddSP(env->arch_env->sp, block, curr_sp, size);
948 set_irn_dbg_info(new_alloc, dbg);
950 if (alloc_mem != NULL) {
954 addsp_mem = new_r_Proj(block, new_alloc, mode_M, pn_be_AddSP_M);
956 /* We need to sync the output mem of the AddSP with the input mem
957 edge into the alloc node. */
958 ins[0] = get_Alloc_mem(alloc);
960 sync = new_r_Sync(block, 2, ins);
962 exchange(alloc_mem, sync);
965 exchange(alloc, new_alloc);
967 /* fix projnum of alloca res */
968 set_Proj_proj(alloc_res, pn_be_AddSP_res);
971 curr_sp = new_r_Proj(block, new_alloc, get_irn_mode(curr_sp),
979 * The Free is transformed into a back end free node and connected to the stack nodes.
981 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
985 ir_node *subsp, *mem, *res, *size, *sync;
989 unsigned stack_alignment;
992 assert(get_Free_where(free) == stack_alloc);
994 block = get_nodes_block(free);
995 irg = get_irn_irg(block);
996 type = get_Free_type(free);
997 sp_mode = env->arch_env->sp->reg_class->mode;
998 dbg = get_irn_dbg_info(free);
1000 /* we might need to multiply the size with the element size */
1001 if (type != firm_unknown_type && get_type_size_bytes(type) != 1) {
1002 tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
1003 ir_node *cnst = new_rd_Const(dbg, irg, tv);
1004 ir_node *mul = new_rd_Mul(dbg, block, get_Free_size(free),
1008 size = get_Free_size(free);
1011 stack_alignment = 1 << env->arch_env->stack_alignment;
1012 size = adjust_alloc_size(stack_alignment, size, block, dbg);
1014 /* The stack pointer will be modified in an unknown manner.
1015 We cannot omit it. */
1016 env->call->flags.bits.try_omit_fp = 0;
1017 subsp = be_new_SubSP(env->arch_env->sp, block, curr_sp, size);
1018 set_irn_dbg_info(subsp, dbg);
1020 mem = new_r_Proj(block, subsp, mode_M, pn_be_SubSP_M);
1021 res = new_r_Proj(block, subsp, sp_mode, pn_be_SubSP_sp);
1023 /* we need to sync the memory */
1024 in[0] = get_Free_mem(free);
1026 sync = new_r_Sync(block, 2, in);
1028 /* and make the AddSP dependent on the former memory */
1029 add_irn_dep(subsp, get_Free_mem(free));
1032 exchange(free, sync);
1038 /* the following function is replaced by the usage of the heights module */
1041 * Walker for dependent_on().
1042 * This function searches a node tgt recursively from a given node
1043 * but is restricted to the given block.
1044 * @return 1 if tgt was reachable from curr, 0 if not.
1046 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
1050 if (get_nodes_block(curr) != bl)
1056 /* Phi functions stop the recursion inside a basic block */
1057 if (! is_Phi(curr)) {
1058 for (i = 0, n = get_irn_arity(curr); i < n; ++i) {
1059 if (check_dependence(get_irn_n(curr, i), tgt, bl))
1069 * Check if a node is somehow data dependent on another one.
1070 * both nodes must be in the same basic block.
1071 * @param n1 The first node.
1072 * @param n2 The second node.
1073 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
1075 static int dependent_on(ir_node *n1, ir_node *n2)
1077 assert(get_nodes_block(n1) == get_nodes_block(n2));
1079 return heights_reachable_in_block(ir_heights, n1, n2);
1082 static int cmp_call_dependency(const void *c1, const void *c2)
1084 ir_node *n1 = *(ir_node **) c1;
1085 ir_node *n2 = *(ir_node **) c2;
1088 Classical qsort() comparison function behavior:
1089 0 if both elements are equal
1090 1 if second is "smaller" that first
1091 -1 if first is "smaller" that second
1093 if (dependent_on(n1, n2))
1096 if (dependent_on(n2, n1))
1099 /* The nodes have no depth order, but we need a total order because qsort()
1101 return get_irn_idx(n1) - get_irn_idx(n2);
1105 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
1106 * Clears the irg_is_leaf flag if a Call is detected.
1108 static void link_ops_in_block_walker(ir_node *irn, void *data)
1110 be_abi_irg_t *env = data;
1111 ir_opcode code = get_irn_opcode(irn);
1113 if (code == iro_Call ||
1114 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
1115 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
1116 ir_node *bl = get_nodes_block(irn);
1117 void *save = get_irn_link(bl);
1119 if (code == iro_Call)
1120 env->call->flags.bits.irg_is_leaf = 0;
1122 set_irn_link(irn, save);
1123 set_irn_link(bl, irn);
1126 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
1127 ir_node *param = get_Builtin_param(irn, 0);
1128 tarval *tv = get_Const_tarval(param);
1129 unsigned long value = get_tarval_long(tv);
1130 /* use ebp, so the climbframe algo works... */
1132 env->call->flags.bits.try_omit_fp = 0;
1139 * Process all Call/Alloc/Free nodes inside a basic block.
1140 * Note that the link field of the block must contain a linked list of all
1141 * Call nodes inside the Block. We first order this list according to data dependency
1142 * and that connect the calls together.
1144 static void process_ops_in_block(ir_node *bl, void *data)
1146 be_abi_irg_t *env = data;
1147 ir_node *curr_sp = env->init_sp;
1151 for (irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
1152 obstack_ptr_grow(&env->obst, irn);
1154 /* If there were call nodes in the block. */
1160 nodes = obstack_finish(&env->obst);
1162 /* order the call nodes according to data dependency */
1163 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
1165 for (i = n - 1; i >= 0; --i) {
1166 ir_node *irn = nodes[i];
1168 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1169 switch (get_irn_opcode(irn)) {
1172 /* The stack pointer will be modified due to a call. */
1173 env->call->flags.bits.try_omit_fp = 0;
1175 curr_sp = adjust_call(env, irn, curr_sp);
1178 if (get_Alloc_where(irn) == stack_alloc)
1179 curr_sp = adjust_alloc(env, irn, curr_sp);
1182 if (get_Free_where(irn) == stack_alloc)
1183 curr_sp = adjust_free(env, irn, curr_sp);
1186 panic("invalid call");
1191 obstack_free(&env->obst, nodes);
1193 /* Keep the last stack state in the block by tying it to Keep node,
1194 * the proj from calls is already kept */
1195 if (curr_sp != env->init_sp &&
1196 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1198 keep = be_new_Keep(bl, 1, nodes);
1199 pmap_insert(env->keep_map, bl, keep);
1203 set_irn_link(bl, curr_sp);
1204 } /* process_ops_in_block */
1207 * Adjust all call nodes in the graph to the ABI conventions.
1209 static void process_calls(be_abi_irg_t *env)
1211 ir_graph *irg = env->birg->irg;
1213 env->call->flags.bits.irg_is_leaf = 1;
1214 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
1216 ir_heights = heights_new(env->birg->irg);
1217 irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
1218 heights_free(ir_heights);
1222 * Computes the stack argument layout type.
1223 * Changes a possibly allocated value param type by moving
1224 * entities to the stack layout type.
1226 * @param env the ABI environment
1227 * @param call the current call ABI
1228 * @param method_type the method type
1229 * @param val_param_tp the value parameter type, will be destroyed
1230 * @param param_map an array mapping method arguments to the stack layout type
1232 * @return the stack argument layout type
1234 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
1235 ir_type *method_type, ir_type *val_param_tp,
1236 ir_entity ***param_map)
1238 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1239 int inc = env->birg->main_env->arch_env->stack_dir * dir;
1240 int n = get_method_n_params(method_type);
1241 int curr = inc > 0 ? 0 : n - 1;
1247 ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
1250 *param_map = map = OALLOCN(&env->obst, ir_entity*, n);
1251 res = new_type_struct(id_mangle_u(id, new_id_from_chars("arg_type", 8)));
1252 for (i = 0; i < n; ++i, curr += inc) {
1253 ir_type *param_type = get_method_param_type(method_type, curr);
1254 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1257 if (arg->on_stack) {
1258 if (val_param_tp != NULL) {
1259 /* the entity was already created, create a copy in the param type */
1260 ir_entity *val_ent = get_method_value_param_ent(method_type, i);
1261 arg->stack_ent = copy_entity_own(val_ent, res);
1262 set_entity_link(val_ent, arg->stack_ent);
1263 set_entity_link(arg->stack_ent, NULL);
1264 /* must be automatic to set a fixed layout */
1265 set_entity_allocation(arg->stack_ent, allocation_automatic);
1267 /* create a new entity */
1268 snprintf(buf, sizeof(buf), "param_%d", i);
1269 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1271 ofs += arg->space_before;
1272 ofs = round_up2(ofs, arg->alignment);
1273 set_entity_offset(arg->stack_ent, ofs);
1274 ofs += arg->space_after;
1275 ofs += get_type_size_bytes(param_type);
1276 map[i] = arg->stack_ent;
1279 set_type_size_bytes(res, ofs);
1280 set_type_state(res, layout_fixed);
1285 const arch_register_t *reg;
1289 static int cmp_regs(const void *a, const void *b)
1291 const reg_node_map_t *p = a;
1292 const reg_node_map_t *q = b;
1294 if (p->reg->reg_class == q->reg->reg_class)
1295 return p->reg->index - q->reg->index;
1297 return p->reg->reg_class - q->reg->reg_class;
1300 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1303 int n = pmap_count(reg_map);
1305 reg_node_map_t *res = OALLOCN(obst, reg_node_map_t, n);
1307 foreach_pmap(reg_map, ent) {
1308 res[i].reg = ent->key;
1309 res[i].irn = ent->value;
1313 qsort(res, n, sizeof(res[0]), cmp_regs);
1318 * Creates a barrier.
1320 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1322 int n_regs = pmap_count(regs);
1328 rm = reg_map_to_arr(&env->obst, regs);
1330 for (n = 0; n < n_regs; ++n)
1331 obstack_ptr_grow(&env->obst, rm[n].irn);
1334 obstack_ptr_grow(&env->obst, *mem);
1338 in = (ir_node **) obstack_finish(&env->obst);
1339 irn = be_new_Barrier(bl, n, in);
1340 obstack_free(&env->obst, in);
1342 for (n = 0; n < n_regs; ++n) {
1343 ir_node *pred = rm[n].irn;
1344 const arch_register_t *reg = rm[n].reg;
1345 arch_register_type_t add_type = 0;
1347 const backend_info_t *info;
1349 /* stupid workaround for now... as not all nodes report register
1351 info = be_get_info(skip_Proj(pred));
1352 if (info != NULL && info->out_infos != NULL) {
1353 const arch_register_req_t *ireq = arch_get_register_req_out(pred);
1354 if (ireq->type & arch_register_req_type_ignore)
1355 add_type |= arch_register_req_type_ignore;
1356 if (ireq->type & arch_register_req_type_produces_sp)
1357 add_type |= arch_register_req_type_produces_sp;
1360 proj = new_r_Proj(bl, irn, get_irn_mode(pred), n);
1361 be_node_set_reg_class_in(irn, n, reg->reg_class);
1363 be_set_constr_single_reg_in(irn, n, reg, 0);
1364 be_set_constr_single_reg_out(irn, n, reg, add_type);
1365 arch_set_irn_register(proj, reg);
1367 pmap_insert(regs, (void *) reg, proj);
1371 *mem = new_r_Proj(bl, irn, mode_M, n);
1374 obstack_free(&env->obst, rm);
1379 * Creates a be_Return for a Return node.
1381 * @param @env the abi environment
1382 * @param irn the Return node or NULL if there was none
1383 * @param bl the block where the be_Retun should be placed
1384 * @param mem the current memory
1385 * @param n_res number of return results
1387 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1388 ir_node *mem, int n_res)
1390 be_abi_call_t *call = env->call;
1391 const arch_env_t *arch_env = env->birg->main_env->arch_env;
1393 pmap *reg_map = pmap_create();
1394 ir_node *keep = pmap_get(env->keep_map, bl);
1401 const arch_register_t **regs;
1405 get the valid stack node in this block.
1406 If we had a call in that block there is a Keep constructed by process_calls()
1407 which points to the last stack modification in that block. we'll use
1408 it then. Else we use the stack from the start block and let
1409 the ssa construction fix the usage.
1411 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1413 stack = get_irn_n(keep, 0);
1415 remove_End_keepalive(get_irg_end(env->birg->irg), keep);
1418 /* Insert results for Return into the register map. */
1419 for (i = 0; i < n_res; ++i) {
1420 ir_node *res = get_Return_res(irn, i);
1421 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1422 assert(arg->in_reg && "return value must be passed in register");
1423 pmap_insert(reg_map, (void *) arg->reg, res);
1426 /* Add uses of the callee save registers. */
1427 foreach_pmap(env->regs, ent) {
1428 const arch_register_t *reg = ent->key;
1429 if (arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1430 pmap_insert(reg_map, ent->key, ent->value);
1433 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1435 /* Make the Epilogue node and call the arch's epilogue maker. */
1436 create_barrier(env, bl, &mem, reg_map, 1);
1437 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1440 Maximum size of the in array for Return nodes is
1441 return args + callee save/ignore registers + memory + stack pointer
1443 in_max = pmap_count(reg_map) + n_res + 2;
1445 in = OALLOCN(&env->obst, ir_node*, in_max);
1446 regs = OALLOCN(&env->obst, arch_register_t const*, in_max);
1449 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1451 regs[1] = arch_env->sp;
1454 /* clear SP entry, since it has already been grown. */
1455 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1456 for (i = 0; i < n_res; ++i) {
1457 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1459 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1460 regs[n++] = arg->reg;
1462 /* Clear the map entry to mark the register as processed. */
1463 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1466 /* grow the rest of the stuff. */
1467 foreach_pmap(reg_map, ent) {
1470 regs[n++] = ent->key;
1474 /* The in array for the new back end return is now ready. */
1476 dbgi = get_irn_dbg_info(irn);
1480 /* we have to pop the shadow parameter in in case of struct returns */
1482 ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
1484 /* Set the register classes of the return's parameter accordingly. */
1485 for (i = 0; i < n; ++i) {
1486 if (regs[i] == NULL)
1489 be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
1492 /* Free the space of the Epilog's in array and the register <-> proj map. */
1493 obstack_free(&env->obst, in);
1494 pmap_destroy(reg_map);
1499 typedef struct ent_pos_pair ent_pos_pair;
1500 struct ent_pos_pair {
1501 ir_entity *ent; /**< a value param entity */
1502 int pos; /**< its parameter number */
1503 ent_pos_pair *next; /**< for linking */
1506 typedef struct lower_frame_sels_env_t {
1507 ent_pos_pair *value_param_list; /**< the list of all value param entities */
1508 ir_node *frame; /**< the current frame */
1509 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1510 const arch_register_class_t *link_class; /**< register class of the link pointer */
1511 ir_type *value_tp; /**< the value type if any */
1512 ir_type *frame_tp; /**< the frame type */
1513 int static_link_pos; /**< argument number of the hidden static link */
1514 } lower_frame_sels_env_t;
1517 * Return an entity from the backend for an value param entity.
1519 * @param ent an value param type entity
1520 * @param ctx context
1522 static ir_entity *get_argument_entity(ir_entity *ent, lower_frame_sels_env_t *ctx)
1524 ir_entity *argument_ent = get_entity_link(ent);
1526 if (argument_ent == NULL) {
1527 /* we have NO argument entity yet: This is bad, as we will
1528 * need one for backing store.
1531 ir_type *frame_tp = ctx->frame_tp;
1532 unsigned offset = get_type_size_bytes(frame_tp);
1533 ir_type *tp = get_entity_type(ent);
1534 unsigned align = get_type_alignment_bytes(tp);
1536 offset += align - 1;
1537 offset &= ~(align - 1);
1539 argument_ent = copy_entity_own(ent, frame_tp);
1541 /* must be automatic to set a fixed layout */
1542 set_entity_allocation(argument_ent, allocation_automatic);
1543 set_entity_offset(argument_ent, offset);
1544 offset += get_type_size_bytes(tp);
1546 set_type_size_bytes(frame_tp, offset);
1547 set_entity_link(ent, argument_ent);
1549 return argument_ent;
1552 * Walker: Replaces Sels of frame type and
1553 * value param type entities by FrameAddress.
1554 * Links all used entities.
1556 static void lower_frame_sels_walker(ir_node *irn, void *data)
1558 lower_frame_sels_env_t *ctx = data;
1561 ir_node *ptr = get_Sel_ptr(irn);
1563 if (ptr == ctx->frame) {
1564 ir_entity *ent = get_Sel_entity(irn);
1565 ir_node *bl = get_nodes_block(irn);
1568 int is_value_param = 0;
1570 if (get_entity_owner(ent) == ctx->value_tp) {
1573 /* replace by its copy from the argument type */
1574 pos = get_struct_member_index(ctx->value_tp, ent);
1575 ent = get_argument_entity(ent, ctx);
1578 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1581 /* check, if it's a param Sel and if have not seen this entity before */
1582 if (is_value_param && get_entity_link(ent) == NULL) {
1588 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1590 set_entity_link(ent, ctx->value_param_list);
1597 * Check if a value parameter is transmitted as a register.
1598 * This might happen if the address of an parameter is taken which is
1599 * transmitted in registers.
1601 * Note that on some architectures this case must be handled specially
1602 * because the place of the backing store is determined by their ABI.
1604 * In the default case we move the entity to the frame type and create
1605 * a backing store into the first block.
1607 static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
1609 be_abi_call_t *call = env->call;
1610 ir_graph *irg = env->birg->irg;
1611 ent_pos_pair *entry, *new_list;
1613 int i, n = ARR_LEN(value_param_list);
1616 for (i = 0; i < n; ++i) {
1617 int pos = value_param_list[i].pos;
1618 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
1621 DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", pos));
1622 value_param_list[i].next = new_list;
1623 new_list = &value_param_list[i];
1626 if (new_list != NULL) {
1627 /* ok, change the graph */
1628 ir_node *start_bl = get_irg_start_block(irg);
1629 ir_node *first_bl = NULL;
1630 ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
1631 const ir_edge_t *edge;
1632 optimization_state_t state;
1635 foreach_block_succ(start_bl, edge) {
1636 first_bl = get_edge_src_irn(edge);
1639 assert(first_bl && first_bl != start_bl);
1640 /* we had already removed critical edges, so the following
1641 assertion should be always true. */
1642 assert(get_Block_n_cfgpreds(first_bl) == 1);
1644 /* now create backing stores */
1645 frame = get_irg_frame(irg);
1646 imem = get_irg_initial_mem(irg);
1648 save_optimization_state(&state);
1650 nmem = new_r_Proj(start_bl, get_irg_start(irg), mode_M, pn_Start_M);
1651 restore_optimization_state(&state);
1653 /* reroute all edges to the new memory source */
1654 edges_reroute(imem, nmem, irg);
1658 args = get_irg_args(irg);
1659 args_bl = get_nodes_block(args);
1660 for (entry = new_list; entry != NULL; entry = entry->next) {
1662 ir_type *tp = get_entity_type(entry->ent);
1663 ir_mode *mode = get_type_mode(tp);
1666 /* address for the backing store */
1667 addr = be_new_FrameAddr(env->arch_env->sp->reg_class, first_bl, frame, entry->ent);
1670 mem = new_r_Proj(first_bl, store, mode_M, pn_Store_M);
1672 /* the backing store itself */
1673 store = new_r_Store(first_bl, mem, addr,
1674 new_r_Proj(args_bl, args, mode, i), 0);
1676 /* the new memory Proj gets the last Proj from store */
1677 set_Proj_pred(nmem, store);
1678 set_Proj_proj(nmem, pn_Store_M);
1680 /* move all entities to the frame type */
1681 frame_tp = get_irg_frame_type(irg);
1682 offset = get_type_size_bytes(frame_tp);
1684 /* we will add new entities: set the layout to undefined */
1685 assert(get_type_state(frame_tp) == layout_fixed);
1686 set_type_state(frame_tp, layout_undefined);
1687 for (entry = new_list; entry != NULL; entry = entry->next) {
1688 ir_entity *ent = entry->ent;
1690 /* If the entity is still on the argument type, move it to the frame type.
1691 This happens if the value_param type was build due to compound
1693 if (get_entity_owner(ent) != frame_tp) {
1694 ir_type *tp = get_entity_type(ent);
1695 unsigned align = get_type_alignment_bytes(tp);
1697 offset += align - 1;
1698 offset &= ~(align - 1);
1699 set_entity_owner(ent, frame_tp);
1700 add_class_member(frame_tp, ent);
1701 /* must be automatic to set a fixed layout */
1702 set_entity_allocation(ent, allocation_automatic);
1703 set_entity_offset(ent, offset);
1704 offset += get_type_size_bytes(tp);
1707 set_type_size_bytes(frame_tp, offset);
1708 /* fix the layout again */
1709 set_type_state(frame_tp, layout_fixed);
1714 * The start block has no jump, instead it has an initial exec Proj.
1715 * The backend wants to handle all blocks the same way, so we replace
1716 * the out cfg edge with a real jump.
1718 static void fix_start_block(ir_graph *irg)
1720 ir_node *initial_X = get_irg_initial_exec(irg);
1721 ir_node *start_block = get_irg_start_block(irg);
1722 const ir_edge_t *edge;
1724 assert(is_Proj(initial_X));
1726 foreach_out_edge(initial_X, edge) {
1727 ir_node *block = get_edge_src_irn(edge);
1729 if (is_Anchor(block))
1731 if (block != start_block) {
1732 ir_node *jmp = new_r_Jmp(start_block);
1733 set_Block_cfgpred(block, get_edge_src_pos(edge), jmp);
1734 set_irg_initial_exec(irg, jmp);
1738 panic("Initial exec has no follow block in %+F", irg);
1742 * Update the entity of Sels to the outer value parameters.
1744 static void update_outer_frame_sels(ir_node *irn, void *env) {
1745 lower_frame_sels_env_t *ctx = env;
1752 ptr = get_Sel_ptr(irn);
1753 if (! is_arg_Proj(ptr))
1755 if (get_Proj_proj(ptr) != ctx->static_link_pos)
1757 ent = get_Sel_entity(irn);
1759 if (get_entity_owner(ent) == ctx->value_tp) {
1760 /* replace by its copy from the argument type */
1761 pos = get_struct_member_index(ctx->value_tp, ent);
1762 ent = get_argument_entity(ent, ctx);
1763 set_Sel_entity(irn, ent);
1765 /* check, if we have not seen this entity before */
1766 if (get_entity_link(ent) == NULL) {
1772 ARR_APP1(ent_pos_pair, ctx->value_param_list, pair);
1774 set_entity_link(ent, ctx->value_param_list);
1780 * Fix access to outer local variables.
1782 static void fix_outer_variable_access(be_abi_irg_t *env,
1783 lower_frame_sels_env_t *ctx)
1789 for (i = get_class_n_members(ctx->frame_tp) - 1; i >= 0; --i) {
1790 ir_entity *ent = get_class_member(ctx->frame_tp, i);
1792 if (! is_method_entity(ent))
1794 if (get_entity_peculiarity(ent) == peculiarity_description)
1798 * FIXME: find the number of the static link parameter
1799 * for now we assume 0 here
1801 ctx->static_link_pos = 0;
1803 irg = get_entity_irg(ent);
1804 irg_walk_graph(irg, NULL, update_outer_frame_sels, ctx);
1809 * Modify the irg itself and the frame type.
1811 static void modify_irg(be_abi_irg_t *env)
1813 be_abi_call_t *call = env->call;
1814 const arch_env_t *arch_env= env->birg->main_env->arch_env;
1815 const arch_register_t *sp = arch_env->sp;
1816 ir_graph *irg = env->birg->irg;
1819 ir_node *new_mem_proj;
1821 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1826 unsigned frame_size;
1829 const arch_register_t *fp_reg;
1830 ir_node *frame_pointer;
1834 const ir_edge_t *edge;
1835 ir_type *arg_type, *bet_type, *tp;
1836 lower_frame_sels_env_t ctx;
1837 ir_entity **param_map;
1839 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1841 /* Must fetch memory here, otherwise the start Barrier gets the wrong
1842 * memory, which leads to loops in the DAG. */
1843 old_mem = get_irg_initial_mem(irg);
1845 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1847 /* set the links of all frame entities to NULL, we use it
1848 to detect if an entity is already linked in the value_param_list */
1849 tp = get_method_value_param_type(method_type);
1852 /* clear the links of the clone type, let the
1853 original entities point to its clones */
1854 for (i = get_struct_n_members(tp) - 1; i >= 0; --i) {
1855 ir_entity *mem = get_struct_member(tp, i);
1856 set_entity_link(mem, NULL);
1860 arg_type = compute_arg_type(env, call, method_type, tp, ¶m_map);
1862 /* Convert the Sel nodes in the irg to frame addr nodes: */
1863 ctx.value_param_list = NEW_ARR_F(ent_pos_pair, 0);
1864 ctx.frame = get_irg_frame(irg);
1865 ctx.sp_class = env->arch_env->sp->reg_class;
1866 ctx.link_class = env->arch_env->link_class;
1867 ctx.frame_tp = get_irg_frame_type(irg);
1869 /* we will possible add new entities to the frame: set the layout to undefined */
1870 assert(get_type_state(ctx.frame_tp) == layout_fixed);
1871 set_type_state(ctx.frame_tp, layout_undefined);
1873 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1875 /* fix the frame type layout again */
1876 set_type_state(ctx.frame_tp, layout_fixed);
1877 /* align stackframe to 4 byte */
1878 frame_size = get_type_size_bytes(ctx.frame_tp);
1879 if (frame_size % 4 != 0) {
1880 set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1883 env->regs = pmap_create();
1885 n_params = get_method_n_params(method_type);
1886 args = OALLOCNZ(&env->obst, ir_node*, n_params);
1889 * for inner function we must now fix access to outer frame entities.
1891 fix_outer_variable_access(env, &ctx);
1893 /* Check if a value parameter is transmitted as a register.
1894 * This might happen if the address of an parameter is taken which is
1895 * transmitted in registers.
1897 * Note that on some architectures this case must be handled specially
1898 * because the place of the backing store is determined by their ABI.
1900 * In the default case we move the entity to the frame type and create
1901 * a backing store into the first block.
1903 fix_address_of_parameter_access(env, ctx.value_param_list);
1905 DEL_ARR_F(ctx.value_param_list);
1906 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1908 /* Fill the argument vector */
1909 arg_tuple = get_irg_args(irg);
1910 foreach_out_edge(arg_tuple, edge) {
1911 ir_node *irn = get_edge_src_irn(edge);
1912 if (! is_Anchor(irn)) {
1913 int nr = get_Proj_proj(irn);
1915 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1919 bet_type = call->cb->get_between_type(env->cb);
1920 stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
1922 /* Count the register params and add them to the number of Projs for the RegParams node */
1923 for (i = 0; i < n_params; ++i) {
1924 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1925 if (arg->in_reg && args[i]) {
1926 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1927 assert(i == get_Proj_proj(args[i]));
1929 /* For now, associate the register with the old Proj from Start representing that argument. */
1930 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1931 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1935 /* Collect all callee-save registers */
1936 for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
1937 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
1938 for (j = 0; j < cls->n_regs; ++j) {
1939 const arch_register_t *reg = &cls->regs[j];
1940 if (arch_register_type_is(reg, callee_save) ||
1941 arch_register_type_is(reg, state)) {
1942 pmap_insert(env->regs, (void *) reg, NULL);
1947 /* handle start block here (place a jump in the block) */
1948 fix_start_block(irg);
1950 pmap_insert(env->regs, (void *) sp, NULL);
1951 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1952 start_bl = get_irg_start_block(irg);
1953 env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1956 * make proj nodes for the callee save registers.
1957 * memorize them, since Return nodes get those as inputs.
1959 * Note, that if a register corresponds to an argument, the regs map contains
1960 * the old Proj from start for that argument.
1963 rm = reg_map_to_arr(&env->obst, env->regs);
1964 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1965 arch_register_t *reg = (void *) rm[i].reg;
1966 ir_mode *mode = reg->reg_class->mode;
1968 arch_register_req_type_t add_type = 0;
1972 add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
1975 proj = new_r_Proj(start_bl, env->start, mode, nr + 1);
1976 pmap_insert(env->regs, (void *) reg, proj);
1977 be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1978 arch_set_irn_register(proj, reg);
1980 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1982 obstack_free(&env->obst, rm);
1984 /* create a new initial memory proj */
1985 assert(is_Proj(old_mem));
1986 arch_set_out_register_req(env->start, 0, arch_no_register_req);
1987 new_mem_proj = new_r_Proj(start_bl, env->start, mode_M, 0);
1989 set_irg_initial_mem(irg, mem);
1991 /* Generate the Prologue */
1992 fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
1994 /* do the stack allocation BEFORE the barrier, or spill code
1995 might be added before it */
1996 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1997 env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
1998 be_abi_reg_map_set(env->regs, sp, env->init_sp);
2000 create_barrier(env, start_bl, &mem, env->regs, 0);
2002 env->init_sp = be_abi_reg_map_get(env->regs, sp);
2003 arch_set_irn_register(env->init_sp, sp);
2005 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
2006 set_irg_frame(irg, frame_pointer);
2007 pset_insert_ptr(env->ignore_regs, fp_reg);
2009 /* rewire old mem users to new mem */
2010 exchange(old_mem, mem);
2012 set_irg_initial_mem(irg, mem);
2014 /* Now, introduce stack param nodes for all parameters passed on the stack */
2015 for (i = 0; i < n_params; ++i) {
2016 ir_node *arg_proj = args[i];
2017 ir_node *repl = NULL;
2019 if (arg_proj != NULL) {
2020 be_abi_call_arg_t *arg;
2021 ir_type *param_type;
2022 int nr = get_Proj_proj(arg_proj);
2025 nr = MIN(nr, n_params);
2026 arg = get_call_arg(call, 0, nr);
2027 param_type = get_method_param_type(method_type, nr);
2030 repl = pmap_get(env->regs, (void *) arg->reg);
2031 } else if (arg->on_stack) {
2032 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
2034 /* For atomic parameters which are actually used, we create a Load node. */
2035 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
2036 ir_mode *mode = get_type_mode(param_type);
2037 ir_mode *load_mode = arg->load_mode;
2039 ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
2040 repl = new_r_Proj(start_bl, load, load_mode, pn_Load_res);
2042 if (mode != load_mode) {
2043 repl = new_r_Conv(start_bl, repl, mode);
2046 /* The stack parameter is not primitive (it is a struct or array),
2047 * we thus will create a node representing the parameter's address
2053 assert(repl != NULL);
2055 /* Beware: the mode of the register parameters is always the mode of the register class
2056 which may be wrong. Add Conv's then. */
2057 mode = get_irn_mode(args[i]);
2058 if (mode != get_irn_mode(repl)) {
2059 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
2061 exchange(args[i], repl);
2065 /* the arg proj is not needed anymore now and should be only used by the anchor */
2066 assert(get_irn_n_edges(arg_tuple) == 1);
2067 kill_node(arg_tuple);
2068 set_irg_args(irg, new_r_Bad(irg));
2070 /* All Return nodes hang on the End node, so look for them there. */
2071 end = get_irg_end_block(irg);
2072 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
2073 ir_node *irn = get_Block_cfgpred(end, i);
2075 if (is_Return(irn)) {
2076 ir_node *blk = get_nodes_block(irn);
2077 ir_node *mem = get_Return_mem(irn);
2078 ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
2082 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
2083 the code is dead and will never be executed. */
2085 obstack_free(&env->obst, args);
2088 /** Fix the state inputs of calls that still hang on unknowns */
2090 void fix_call_state_inputs(be_abi_irg_t *env)
2092 const arch_env_t *arch_env = env->arch_env;
2094 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
2096 /* Collect caller save registers */
2097 n = arch_env_get_n_reg_class(arch_env);
2098 for (i = 0; i < n; ++i) {
2100 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
2101 for (j = 0; j < cls->n_regs; ++j) {
2102 const arch_register_t *reg = arch_register_for_index(cls, j);
2103 if (arch_register_type_is(reg, state)) {
2104 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
2109 n = ARR_LEN(env->calls);
2110 n_states = ARR_LEN(stateregs);
2111 for (i = 0; i < n; ++i) {
2113 ir_node *call = env->calls[i];
2115 arity = get_irn_arity(call);
2117 /* the state reg inputs are the last n inputs of the calls */
2118 for (s = 0; s < n_states; ++s) {
2119 int inp = arity - n_states + s;
2120 const arch_register_t *reg = stateregs[s];
2121 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
2123 set_irn_n(call, inp, regnode);
2127 DEL_ARR_F(stateregs);
2131 * Create a trampoline entity for the given method.
2133 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
2135 ir_type *type = get_entity_type(method);
2136 ident *old_id = get_entity_ld_ident(method);
2137 ident *id = id_mangle3("L", old_id, "$stub");
2138 ir_type *parent = be->pic_trampolines_type;
2139 ir_entity *ent = new_entity(parent, old_id, type);
2140 set_entity_ld_ident(ent, id);
2141 set_entity_visibility(ent, visibility_local);
2142 set_entity_variability(ent, variability_uninitialized);
2148 * Returns the trampoline entity for the given method.
2150 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
2152 ir_entity *result = pmap_get(env->ent_trampoline_map, method);
2153 if (result == NULL) {
2154 result = create_trampoline(env, method);
2155 pmap_insert(env->ent_trampoline_map, method, result);
2161 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
2163 ident *old_id = get_entity_ld_ident(entity);
2164 ident *id = id_mangle3("L", old_id, "$non_lazy_ptr");
2165 ir_type *e_type = get_entity_type(entity);
2166 ir_type *type = new_type_pointer(id, e_type, mode_P_data);
2167 ir_type *parent = be->pic_symbols_type;
2168 ir_entity *ent = new_entity(parent, old_id, type);
2169 set_entity_ld_ident(ent, id);
2170 set_entity_visibility(ent, visibility_local);
2171 set_entity_variability(ent, variability_uninitialized);
2176 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
2178 ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
2179 if (result == NULL) {
2180 result = create_pic_symbol(env, entity);
2181 pmap_insert(env->ent_pic_symbol_map, entity, result);
2190 * Returns non-zero if a given entity can be accessed using a relative address.
2192 static int can_address_relative(ir_entity *entity)
2194 return get_entity_visibility(entity) != visibility_external_allocated;
2197 /** patches SymConsts to work in position independent code */
2198 static void fix_pic_symconsts(ir_node *node, void *data)
2208 be_abi_irg_t *env = data;
2210 be_main_env_t *be = env->birg->main_env;
2212 arity = get_irn_arity(node);
2213 for (i = 0; i < arity; ++i) {
2215 ir_node *pred = get_irn_n(node, i);
2217 ir_entity *pic_symbol;
2218 ir_node *pic_symconst;
2220 if (!is_SymConst(pred))
2223 entity = get_SymConst_entity(pred);
2224 block = get_nodes_block(pred);
2225 irg = get_irn_irg(pred);
2227 /* calls can jump to relative addresses, so we can directly jump to
2228 the (relatively) known call address or the trampoline */
2229 if (i == 1 && is_Call(node)) {
2230 ir_entity *trampoline;
2231 ir_node *trampoline_const;
2233 if (can_address_relative(entity))
2236 dbgi = get_irn_dbg_info(pred);
2237 trampoline = get_trampoline(be, entity);
2238 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2240 set_irn_n(node, i, trampoline_const);
2244 /* everything else is accessed relative to EIP */
2245 mode = get_irn_mode(pred);
2246 unknown = new_r_Unknown(irg, mode);
2247 pic_base = arch_code_generator_get_pic_base(env->birg->cg);
2249 /* all ok now for locally constructed stuff */
2250 if (can_address_relative(entity)) {
2251 ir_node *add = new_r_Add(block, pic_base, pred, mode);
2253 /* make sure the walker doesn't visit this add again */
2254 mark_irn_visited(add);
2255 set_irn_n(node, i, add);
2259 /* get entry from pic symbol segment */
2260 dbgi = get_irn_dbg_info(pred);
2261 pic_symbol = get_pic_symbol(be, entity);
2262 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
2264 add = new_r_Add(block, pic_base, pic_symconst, mode);
2265 mark_irn_visited(add);
2267 /* we need an extra indirection for global data outside our current
2268 module. The loads are always safe and can therefore float
2269 and need no memory input */
2270 load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
2271 load_res = new_r_Proj(block, load, mode, pn_Load_res);
2273 set_irn_n(node, i, load_res);
2277 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
2279 be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
2280 ir_node *old_frame = get_irg_frame(birg->irg);
2281 ir_graph *irg = birg->irg;
2285 optimization_state_t state;
2286 unsigned *limited_bitset;
2287 arch_register_req_t *sp_req;
2289 be_omit_fp = birg->main_env->options->omit_fp;
2290 be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
2292 obstack_init(&env->obst);
2294 env->arch_env = birg->main_env->arch_env;
2295 env->method_type = get_entity_type(get_irg_entity(irg));
2296 env->call = be_abi_call_new(env->arch_env->sp->reg_class);
2297 arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
2299 env->ignore_regs = pset_new_ptr_default();
2300 env->keep_map = pmap_create();
2301 env->dce_survivor = new_survive_dce();
2304 sp_req = OALLOCZ(&env->obst, arch_register_req_t);
2305 env->sp_req = sp_req;
2307 sp_req->type = arch_register_req_type_limited
2308 | arch_register_req_type_produces_sp;
2309 sp_req->cls = arch_register_get_class(env->arch_env->sp);
2311 limited_bitset = rbitset_obstack_alloc(&env->obst, sp_req->cls->n_regs);
2312 rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
2313 sp_req->limited = limited_bitset;
2314 if (env->arch_env->sp->type & arch_register_type_ignore) {
2315 sp_req->type |= arch_register_req_type_ignore;
2318 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
2319 to another Unknown or the stack pointer gets used */
2320 save_optimization_state(&state);
2322 env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
2323 restore_optimization_state(&state);
2325 env->calls = NEW_ARR_F(ir_node*, 0);
2327 if (birg->main_env->options->pic) {
2328 irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
2331 /* Lower all call nodes in the IRG. */
2335 Beware: init backend abi call object after processing calls,
2336 otherwise some information might be not yet available.
2338 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
2340 /* Process the IRG */
2343 /* fix call inputs for state registers */
2344 fix_call_state_inputs(env);
2346 /* We don't need the keep map anymore. */
2347 pmap_destroy(env->keep_map);
2348 env->keep_map = NULL;
2350 /* calls array is not needed anymore */
2351 DEL_ARR_F(env->calls);
2354 /* reroute the stack origin of the calls to the true stack origin. */
2355 exchange(dummy, env->init_sp);
2356 exchange(old_frame, get_irg_frame(irg));
2358 /* Make some important node pointers survive the dead node elimination. */
2359 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
2360 foreach_pmap(env->regs, ent) {
2361 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
2364 env->call->cb->done(env->cb);
2369 void be_abi_free(be_abi_irg_t *env)
2371 be_abi_call_free(env->call);
2372 free_survive_dce(env->dce_survivor);
2373 del_pset(env->ignore_regs);
2374 pmap_destroy(env->regs);
2375 obstack_free(&env->obst, NULL);
2379 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
2381 arch_register_t *reg;
2383 for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
2384 if (reg->reg_class == cls)
2385 bitset_set(bs, reg->index);
2388 void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
2391 arch_register_t *reg;
2393 for (i = 0; i < cls->n_regs; ++i) {
2394 if (arch_register_type_is(&cls->regs[i], ignore))
2397 rbitset_set(raw_bitset, i);
2400 for (reg = pset_first(abi->ignore_regs); reg != NULL;
2401 reg = pset_next(abi->ignore_regs)) {
2402 if (reg->reg_class != cls)
2405 rbitset_clear(raw_bitset, reg->index);
2409 /* Returns the stack layout from a abi environment. */
2410 const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi)
2418 | ___(_)_ __ / ___|| |_ __ _ ___| | __
2419 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
2420 | _| | |> < ___) | || (_| | (__| <
2421 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
2425 typedef ir_node **node_array;
2427 typedef struct fix_stack_walker_env_t {
2428 node_array sp_nodes;
2429 } fix_stack_walker_env_t;
2432 * Walker. Collect all stack modifying nodes.
2434 static void collect_stack_nodes_walker(ir_node *node, void *data)
2436 ir_node *insn = node;
2437 fix_stack_walker_env_t *env = data;
2438 const arch_register_req_t *req;
2440 if (is_Proj(node)) {
2441 insn = get_Proj_pred(node);
2444 if (arch_irn_get_n_outs(insn) == 0)
2447 req = arch_get_register_req_out(node);
2448 if (! (req->type & arch_register_req_type_produces_sp))
2451 ARR_APP1(ir_node*, env->sp_nodes, node);
2454 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
2456 be_ssa_construction_env_t senv;
2459 be_irg_t *birg = env->birg;
2460 be_lv_t *lv = be_get_birg_liveness(birg);
2461 fix_stack_walker_env_t walker_env;
2463 walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
2465 irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
2467 /* nothing to be done if we didn't find any node, in fact we mustn't
2468 * continue, as for endless loops incsp might have had no users and is bad
2471 len = ARR_LEN(walker_env.sp_nodes);
2473 DEL_ARR_F(walker_env.sp_nodes);
2477 be_ssa_construction_init(&senv, birg);
2478 be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
2479 ARR_LEN(walker_env.sp_nodes));
2480 be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
2481 ARR_LEN(walker_env.sp_nodes));
2484 len = ARR_LEN(walker_env.sp_nodes);
2485 for (i = 0; i < len; ++i) {
2486 be_liveness_update(lv, walker_env.sp_nodes[i]);
2488 be_ssa_construction_update_liveness_phis(&senv, lv);
2491 phis = be_ssa_construction_get_new_phis(&senv);
2493 /* set register requirements for stack phis */
2494 len = ARR_LEN(phis);
2495 for (i = 0; i < len; ++i) {
2496 ir_node *phi = phis[i];
2497 be_set_phi_reg_req(phi, env->sp_req);
2498 arch_set_irn_register(phi, env->arch_env->sp);
2500 be_ssa_construction_destroy(&senv);
2502 DEL_ARR_F(walker_env.sp_nodes);
2506 * Fix all stack accessing operations in the block bl.
2508 * @param env the abi environment
2509 * @param bl the block to process
2510 * @param real_bias the bias value
2512 * @return the bias at the end of this block
2514 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
2516 int omit_fp = env->call->flags.bits.try_omit_fp;
2518 int wanted_bias = real_bias;
2520 sched_foreach(bl, irn) {
2524 Check, if the node relates to an entity on the stack frame.
2525 If so, set the true offset (including the bias) for that
2528 ir_entity *ent = arch_get_frame_entity(irn);
2530 int bias = omit_fp ? real_bias : 0;
2531 int offset = get_stack_entity_offset(&env->frame, ent, bias);
2532 arch_set_frame_offset(irn, offset);
2533 DBG((dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
2534 ent, offset, bias));
2538 * If the node modifies the stack pointer by a constant offset,
2539 * record that in the bias.
2541 ofs = arch_get_sp_bias(irn);
2543 if (be_is_IncSP(irn)) {
2544 /* fill in real stack frame size */
2545 if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
2546 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2547 ofs = (int) get_type_size_bytes(frame_type);
2548 be_set_IncSP_offset(irn, ofs);
2549 } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
2550 ir_type *frame_type = get_irg_frame_type(env->birg->irg);
2551 ofs = - (int)get_type_size_bytes(frame_type);
2552 be_set_IncSP_offset(irn, ofs);
2554 if (be_get_IncSP_align(irn)) {
2555 /* patch IncSP to produce an aligned stack pointer */
2556 ir_type *between_type = env->frame.between_type;
2557 int between_size = get_type_size_bytes(between_type);
2558 int alignment = 1 << env->arch_env->stack_alignment;
2559 int delta = (real_bias + ofs + between_size) & (alignment - 1);
2562 be_set_IncSP_offset(irn, ofs + alignment - delta);
2563 real_bias += alignment - delta;
2566 /* adjust so real_bias corresponds with wanted_bias */
2567 int delta = wanted_bias - real_bias;
2570 be_set_IncSP_offset(irn, ofs + delta);
2581 assert(real_bias == wanted_bias);
2586 * A helper struct for the bias walker.
2589 be_abi_irg_t *env; /**< The ABI irg environment. */
2590 int start_block_bias; /**< The bias at the end of the start block. */
2592 ir_node *start_block; /**< The start block of the current graph. */
2596 * Block-Walker: fix all stack offsets for all blocks
2597 * except the start block
2599 static void stack_bias_walker(ir_node *bl, void *data)
2601 struct bias_walk *bw = data;
2602 if (bl != bw->start_block) {
2603 process_stack_bias(bw->env, bl, bw->start_block_bias);
2608 * Walker: finally lower all Sels of outer frame or parameter
2611 static void lower_outer_frame_sels(ir_node *sel, void *ctx) {
2612 be_abi_irg_t *env = ctx;
2620 ent = get_Sel_entity(sel);
2621 owner = get_entity_owner(ent);
2622 ptr = get_Sel_ptr(sel);
2624 if (owner == env->frame.frame_type || owner == env->frame.arg_type) {
2625 /* found access to outer frame or arguments */
2626 int offset = get_stack_entity_offset(&env->frame, ent, 0);
2629 ir_node *bl = get_nodes_block(sel);
2630 dbg_info *dbgi = get_irn_dbg_info(sel);
2631 ir_mode *mode = get_irn_mode(sel);
2632 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
2633 ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
2635 ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
2641 void be_abi_fix_stack_bias(be_abi_irg_t *env)
2643 ir_graph *irg = env->birg->irg;
2646 struct bias_walk bw;
2648 stack_frame_compute_initial_offset(&env->frame);
2649 // stack_layout_dump(stdout, frame);
2651 /* Determine the stack bias at the end of the start block. */
2652 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
2653 bw.between_size = get_type_size_bytes(env->frame.between_type);
2655 /* fix the bias is all other blocks */
2657 bw.start_block = get_irg_start_block(irg);
2658 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
2660 /* fix now inner functions: these still have Sel node to outer
2661 frame and parameter entities */
2662 frame_tp = get_irg_frame_type(irg);
2663 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
2664 ir_entity *ent = get_class_member(frame_tp, i);
2666 if (is_method_entity(ent) && get_entity_peculiarity(ent) != peculiarity_description) {
2667 ir_graph *irg = get_entity_irg(ent);
2669 irg_walk_graph(irg, NULL, lower_outer_frame_sels, env);
2674 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2676 assert(arch_register_type_is(reg, callee_save));
2677 assert(pmap_contains(abi->regs, (void *) reg));
2678 return pmap_get(abi->regs, (void *) reg);
2681 ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
2683 assert(arch_register_type_is(reg, ignore));
2684 assert(pmap_contains(abi->regs, (void *) reg));
2685 return pmap_get(abi->regs, (void *) reg);
2689 * Returns non-zero if the ABI has omitted the frame pointer in
2690 * the current graph.
2692 int be_abi_omit_fp(const be_abi_irg_t *abi)
2694 return abi->call->flags.bits.try_omit_fp;
2697 void be_init_abi(void)
2699 FIRM_DBG_REGISTER(dbg, "firm.be.abi");
2702 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi);