4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
32 #include "besched_t.h"
34 #define MAX(x, y) ((x) > (y) ? (x) : (y))
35 #define MIN(x, y) ((x) < (y) ? (x) : (y))
37 typedef struct _be_abi_call_arg_t {
40 unsigned on_stack : 1;
43 const arch_register_t *reg;
46 unsigned space_before;
50 struct _be_abi_call_t {
51 be_abi_call_flags_t flags;
52 const be_abi_callbacks_t *cb;
57 #define N_FRAME_TYPES 3
59 typedef struct _be_stack_frame_t {
64 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
70 struct _be_stack_slot_t {
71 struct _be_stack_frame_t *frame;
75 struct _be_abi_irg_t {
77 be_stack_frame_t *frame; /**< The stack frame model. */
78 const be_irg_t *birg; /**< The back end IRG. */
79 const arch_isa_t *isa; /**< The isa. */
80 survive_dce_t *dce_survivor;
82 be_abi_call_t *call; /**< The ABI call information. */
83 type *method_type; /**< The type of the method of the IRG. */
85 ir_node *init_sp; /**< The node representing the stack pointer
86 at the start of the function. */
88 ir_node *reg_params; /**< The reg params node. */
89 pmap *regs; /**< A map of all callee-save and ignore regs to
90 their Projs to the RegParams node. */
92 pset *stack_phis; /**< The set of all Phi nodes inserted due to
93 stack pointer modifying nodes. */
95 int start_block_bias; /**< The stack bias at the end of the start block. */
97 void *cb; /**< ABI Callback self pointer. */
99 arch_irn_handler_t irn_handler;
100 arch_irn_ops_t irn_ops;
101 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
104 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
105 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
107 /* Forward, since be need it in be_abi_introduce(). */
108 static const arch_irn_ops_if_t abi_irn_ops;
109 static const arch_irn_handler_t abi_irn_handler;
111 /* Flag: if set, try to omit the frame pointer if alled by the backend */
115 _ ____ ___ ____ _ _ _ _
116 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
117 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
118 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
119 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
121 These callbacks are used by the backend to set the parameters
122 for a specific call type.
126 * Set compare function: compares two ABI call object arguments.
128 static int cmp_call_arg(const void *a, const void *b, size_t n)
130 const be_abi_call_arg_t *p = a, *q = b;
131 return !(p->is_res == q->is_res && p->pos == q->pos);
135 * Get or set an ABI call object argument.
137 * @param call the abi call
138 * @param is_res true for call results, false for call arguments
139 * @param pos position of the argument
140 * @param do_insert true if the argument is set, false if it's retrieved
142 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
144 be_abi_call_arg_t arg;
147 memset(&arg, 0, sizeof(arg));
151 hash = is_res * 128 + pos;
154 ? set_insert(call->params, &arg, sizeof(arg), hash)
155 : set_find(call->params, &arg, sizeof(arg), hash);
159 * Retrieve an ABI call object argument.
161 * @param call the ABI call object
162 * @param is_res true for call results, false for call arguments
163 * @param pos position of the argument
165 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
167 return get_or_set_call_arg(call, is_res, pos, 0);
170 /* Set the flags for a call. */
171 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
177 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
179 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
181 arg->alignment = alignment;
182 arg->space_before = space_before;
183 arg->space_after = space_after;
184 assert(alignment > 0 && "Alignment must be greater than 0");
187 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
189 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
194 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
196 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
201 /* Get the flags of a ABI call object. */
202 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
208 * Constructor for a new ABI call object.
210 * @return the new ABI call object
212 static be_abi_call_t *be_abi_call_new()
214 be_abi_call_t *call = xmalloc(sizeof(call[0]));
216 call->params = new_set(cmp_call_arg, 16);
219 call->flags.bits.try_omit_fp = be_omit_fp;
224 * Destructor for an ABI call object.
226 static void be_abi_call_free(be_abi_call_t *call)
228 del_set(call->params);
234 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
235 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
236 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
237 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
240 Handling of the stack frame. It is composed of three types:
241 1) The type of the arguments which are pushed on the stack.
242 2) The "between type" which consists of stuff the call of the
243 function pushes on the stack (like the return address and
244 the old base pointer for ia32).
245 3) The Firm frame type which consists of all local variables
249 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
251 type *t = get_entity_owner(ent);
252 int ofs = get_entity_offset_bytes(ent);
256 /* Find the type the entity is contained in. */
257 for(index = 0; index < N_FRAME_TYPES; ++index) {
258 if(frame->order[index] == t)
262 /* Add the size of all the types below the one of the entity to the entity's offset */
263 for(i = 0; i < index; ++i)
264 ofs += get_type_size_bytes(frame->order[i]);
266 /* correct the offset by the initial position of the frame pointer */
267 ofs -= frame->initial_offset;
269 /* correct the offset with the current bias. */
276 * Retrieve the entity with given offset from a frame type.
278 static entity *search_ent_with_offset(type *t, int offset)
282 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
283 entity *ent = get_class_member(t, i);
284 if(get_entity_offset_bytes(ent) == offset)
291 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
293 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
294 entity *ent = search_ent_with_offset(base, 0);
295 frame->initial_offset = 0;
296 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
297 return frame->initial_offset;
300 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
302 frame->arg_type = args;
303 frame->between_type = between;
304 frame->frame_type = locals;
305 frame->initial_offset = 0;
306 frame->stack_dir = stack_dir;
307 frame->order[1] = between;
310 frame->order[0] = args;
311 frame->order[2] = locals;
315 frame->order[0] = locals;
316 frame->order[2] = args;
322 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
326 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
327 for(j = 0; j < N_FRAME_TYPES; ++j) {
328 type *t = frame->order[j];
330 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
331 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
332 entity *ent = get_class_member(t, i);
333 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
339 * If irn is a Sel node computes the address of an entity
340 * on the frame type return the entity, else NULL.
342 static INLINE entity *get_sel_ent(ir_node *irn)
344 if(is_Sel(irn) && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
345 return get_Sel_entity(irn);
352 * Walker: Replaces Loads, Stores and Sels of frame type entities
353 * by FrameLoad, FrameStore and FrameAdress.
355 static void lower_frame_sels_walker(ir_node *irn, void *data)
358 entity *ent = get_sel_ent(irn);
361 be_abi_irg_t *env = data;
362 ir_node *bl = get_nodes_block(irn);
363 ir_graph *irg = get_irn_irg(bl);
364 ir_node *frame = get_irg_frame(irg);
366 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
372 * Returns non-zero if the call argument at given position
373 * is transfered on the stack.
375 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
377 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
378 return arg && !arg->in_reg;
388 Adjustment of the calls inside a graph.
393 * Transform a call node.
394 * @param env The ABI environment for the current irg.
395 * @param irn The call node.
396 * @param curr_sp The stack pointer node to use.
397 * @return The stack pointer after the call.
399 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
401 ir_graph *irg = env->birg->irg;
402 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
403 be_abi_call_t *call = be_abi_call_new();
404 ir_type *mt = get_Call_type(irn);
405 ir_node *call_ptr = get_Call_ptr(irn);
406 int n_params = get_method_n_params(mt);
407 ir_node *curr_mem = get_Call_mem(irn);
408 ir_node *bl = get_nodes_block(irn);
409 pset *results = pset_new_ptr(8);
410 pset *caller_save = pset_new_ptr(8);
412 int stack_dir = arch_isa_stack_dir(isa);
413 const arch_register_t *sp = arch_isa_sp(isa);
414 ir_mode *mach_mode = sp->reg_class->mode;
415 struct obstack *obst = &env->obst;
416 ir_node *no_mem = get_irg_no_mem(irg);
417 int no_alloc = call->flags.bits.frame_is_setup_on_call;
419 ir_node *res_proj = NULL;
420 int curr_res_proj = pn_Call_max;
427 const ir_edge_t *edge;
432 /* Let the isa fill out the abi description for that call node. */
433 arch_isa_get_call_abi(isa, mt, call);
435 /* Insert code to put the stack arguments on the stack. */
436 assert(get_Call_n_params(irn) == n_params);
437 for(i = 0; i < n_params; ++i) {
438 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
441 stack_size += arg->space_before;
442 stack_size = round_up2(stack_size, arg->alignment);
443 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
444 stack_size += arg->space_after;
445 obstack_int_grow(obst, i);
449 pos = obstack_finish(obst);
451 /* Collect all arguments which are passed in registers. */
452 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
453 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
454 if(arg && arg->in_reg) {
455 obstack_int_grow(obst, i);
459 low_args = obstack_finish(obst);
461 /* If there are some parameters which shall be passed on the stack. */
464 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
466 /* Reverse list of stack parameters if call arguments are from left to right */
467 if(call->flags.bits.left_to_right) {
468 for(i = 0; i < n_pos / 2; ++i) {
469 int other = n_pos - i - 1;
477 * If the stack is decreasing and we do not want to store sequentially,
478 * or someone else allocated the call frame
479 * we allocate as much space on the stack all parameters need, by
480 * moving the stack pointer along the stack's direction.
482 if(stack_dir < 0 && !do_seq && !no_alloc) {
483 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
486 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
487 for(i = 0; i < n_pos; ++i) {
489 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
490 ir_node *param = get_Call_param(irn, p);
491 ir_node *addr = curr_sp;
493 type *param_type = get_method_param_type(mt, p);
494 int param_size = get_type_size_bytes(param_type) + arg->space_after;
496 curr_ofs += arg->space_before;
497 curr_ofs = round_up2(curr_ofs, arg->alignment);
499 /* Make the expression to compute the argument's offset. */
501 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
502 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
505 /* Insert a store for primitive arguments. */
506 if(is_atomic_type(param_type)) {
507 mem = new_r_Store(irg, bl, curr_mem, addr, param);
508 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
511 /* Make a mem copy for compound arguments. */
513 assert(mode_is_reference(get_irn_mode(param)));
514 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
515 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
518 obstack_ptr_grow(obst, mem);
520 curr_ofs += param_size;
523 * If we wanted to build the arguments sequentially,
524 * the stack pointer for the next must be incremented,
525 * and the memory value propagated.
529 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_expand);
534 in = (ir_node **) obstack_finish(obst);
536 /* We need the sync only, if we didn't build the stores sequentially. */
538 curr_mem = new_r_Sync(irg, bl, n_pos, in);
539 obstack_free(obst, in);
542 /* Collect caller save registers */
543 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
545 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
546 for(j = 0; j < cls->n_regs; ++j) {
547 const arch_register_t *reg = arch_register_for_index(cls, j);
548 if(arch_register_type_is(reg, caller_save))
549 pset_insert_ptr(caller_save, (void *) reg);
553 /* search the greatest result proj number */
555 /* TODO: what if the result is NOT used? Currently there is
556 * no way to detect this later, especially there is no way to
557 * see this in the proj numbers.
558 * While this is ok for the register allocator, it is bad for
559 * backends which need to change the be_Call further (x87 simulator
560 * for instance. However for this particular case the call_type is
563 foreach_out_edge(irn, edge) {
564 const ir_edge_t *res_edge;
565 ir_node *irn = get_edge_src_irn(edge);
567 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
569 foreach_out_edge(irn, res_edge) {
571 be_abi_call_arg_t *arg;
572 ir_node *res = get_edge_src_irn(res_edge);
574 assert(is_Proj(res));
576 proj = get_Proj_proj(res);
577 arg = get_call_arg(call, 1, proj);
580 shift the proj number to the right, since we will drop the
581 unspeakable Proj_T from the Call. Therefore, all real argument
582 Proj numbers must be increased by pn_be_Call_first_res
584 proj += pn_be_Call_first_res;
585 set_Proj_proj(res, proj);
586 obstack_ptr_grow(obst, res);
588 if(proj > curr_res_proj)
589 curr_res_proj = proj;
591 pset_remove_ptr(caller_save, arg->reg);
592 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
599 obstack_ptr_grow(obst, NULL);
600 res_projs = obstack_finish(obst);
602 /* make the back end call node and set its register requirements. */
603 for(i = 0; i < n_low_args; ++i)
604 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
606 in = obstack_finish(obst);
608 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
609 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
610 curr_res_proj + pset_count(caller_save), n_low_args, in,
612 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
616 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
617 curr_res_proj + pset_count(caller_save), n_low_args, in,
622 Set the register class of the call address to the same as the stack pointer's.
623 That' probably buggy for some architectures.
625 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
627 /* Set the register classes and constraints of the Call parameters. */
628 for(i = 0; i < n_low_args; ++i) {
629 int index = low_args[i];
630 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
631 assert(arg->reg != NULL);
633 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
636 /* Set the register constraints of the results. */
637 for(i = 0; res_projs[i]; ++i) {
638 ir_node *irn = res_projs[i];
639 int proj = get_Proj_proj(irn);
641 /* Correct Proj number since it has been adjusted! (see above) */
642 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
645 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
647 obstack_free(obst, in);
648 exchange(irn, low_call);
650 /* redirect the result projs to the lowered call instead of the Proj_T */
651 for(i = 0; res_projs[i]; ++i)
652 set_Proj_pred(res_projs[i], low_call);
654 /* Make additional projs for the caller save registers
655 and the Keep node which keeps them alive. */
656 if(pset_count(caller_save) > 0) {
657 const arch_register_t *reg;
661 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
662 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
664 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
665 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
666 set_irn_link(proj, (void *) reg);
667 obstack_ptr_grow(obst, proj);
671 in = (ir_node **) obstack_finish(obst);
672 keep = be_new_Keep(NULL, irg, bl, n, in);
673 for(i = 0; i < n; ++i) {
674 const arch_register_t *reg = get_irn_link(in[i]);
675 be_node_set_reg_class(keep, i, reg->reg_class);
677 obstack_free(obst, in);
680 /* Clean up the stack. */
682 ir_node *mem_proj = NULL;
684 foreach_out_edge(low_call, edge) {
685 ir_node *irn = get_edge_src_irn(edge);
686 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
693 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
695 /* Clean up the stack frame if we allocated it */
697 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
700 be_abi_call_free(call);
701 obstack_free(obst, pos);
703 del_pset(caller_save);
710 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
712 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
714 if (get_Alloc_where(alloc) == stack_alloc) {
715 ir_node *bl = get_nodes_block(alloc);
716 ir_graph *irg = get_irn_irg(bl);
717 ir_node *alloc_mem = NULL;
718 ir_node *alloc_res = NULL;
720 const ir_edge_t *edge;
723 foreach_out_edge(alloc, edge) {
724 ir_node *irn = get_edge_src_irn(edge);
726 assert(is_Proj(irn));
727 switch(get_Proj_proj(irn)) {
739 /* Beware: currently Alloc nodes without a result might happen,
740 only escape analysis kills them and this phase runs only for object
741 oriented source. We kill the Alloc here. */
742 if (alloc_res == NULL) {
743 exchange(alloc_mem, get_Alloc_mem(alloc));
747 /* The stack pointer will be modified in an unknown manner.
748 We cannot omit it. */
749 env->call->flags.bits.try_omit_fp = 0;
750 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
752 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
754 if(alloc_mem != NULL)
755 exchange(alloc_mem, new_r_NoMem(irg));
764 * Walker for dependent_on().
765 * This function searches a node tgt recursively from a given node
766 * but is restricted to the given block.
767 * @return 1 if tgt was reachable from curr, 0 if not.
769 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
773 if(get_irn_visited(curr) >= visited_nr)
776 set_irn_visited(curr, visited_nr);
777 if(get_nodes_block(curr) != bl)
783 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
784 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
792 * Check if a node is somehow data dependent on another one.
793 * both nodes must be in the same basic block.
794 * @param n1 The first node.
795 * @param n2 The second node.
796 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
798 static int dependent_on(ir_node *n1, ir_node *n2)
800 ir_node *bl = get_nodes_block(n1);
801 ir_graph *irg = get_irn_irg(bl);
802 long vis_nr = get_irg_visited(irg) + 1;
804 assert(bl == get_nodes_block(n2));
805 set_irg_visited(irg, vis_nr);
806 return check_dependence(n1, n2, bl, vis_nr);
809 static int cmp_call_dependecy(const void *c1, const void *c2)
811 ir_node *n1 = *(ir_node **) c1;
812 ir_node *n2 = *(ir_node **) c2;
815 Classical qsort() comparison function behavior:
816 0 if both elements are equal
817 1 if second is "smaller" that first
818 -1 if first is "smaller" that second
820 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
823 static void link_calls_in_block_walker(ir_node *irn, void *data)
826 be_abi_irg_t *env = data;
827 ir_node *bl = get_nodes_block(irn);
828 void *save = get_irn_link(bl);
830 env->call->flags.bits.irg_is_leaf = 0;
832 set_irn_link(irn, save);
833 set_irn_link(bl, irn);
838 * Process all call nodes inside a basic block.
839 * Note that the link field of the block must contain a linked list of all
840 * Call nodes inside the block. We first order this list according to data dependency
841 * and that connect the calls together.
843 static void process_calls_in_block(ir_node *bl, void *data)
845 be_abi_irg_t *env = data;
846 ir_node *curr_sp = env->init_sp;
850 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
851 obstack_ptr_grow(&env->obst, irn);
853 /* If there were call nodes in the block. */
858 nodes = obstack_finish(&env->obst);
860 /* order the call nodes according to data dependency */
861 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
863 for(i = n - 1; i >= 0; --i) {
864 ir_node *irn = nodes[i];
866 switch(get_irn_opcode(irn)) {
868 curr_sp = adjust_call(env, irn, curr_sp);
871 curr_sp = adjust_alloc(env, irn, curr_sp);
878 obstack_free(&env->obst, nodes);
880 /* Keep the last stack state in the block by tying it to Keep node */
882 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
885 set_irn_link(bl, curr_sp);
889 * Adjust all call nodes in the graph to the ABI conventions.
891 static void process_calls(be_abi_irg_t *env)
893 ir_graph *irg = env->birg->irg;
895 env->call->flags.bits.irg_is_leaf = 1;
896 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
897 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
900 static void collect_return_walker(ir_node *irn, void *data)
902 if(get_irn_opcode(irn) == iro_Return) {
903 struct obstack *obst = data;
904 obstack_ptr_grow(obst, irn);
909 static ir_node *setup_frame(be_abi_irg_t *env)
911 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
912 const arch_register_t *sp = isa->sp;
913 const arch_register_t *bp = isa->bp;
914 be_abi_call_flags_bits_t flags = env->call->flags.bits;
915 ir_graph *irg = env->birg->irg;
916 ir_node *bl = get_irg_start_block(irg);
917 ir_node *no_mem = get_irg_no_mem(irg);
918 ir_node *old_frame = get_irg_frame(irg);
919 ir_node *stack = pmap_get(env->regs, (void *) sp);
920 ir_node *frame = pmap_get(env->regs, (void *) bp);
922 int stack_nr = get_Proj_proj(stack);
924 if(flags.try_omit_fp) {
925 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
930 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
932 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
934 be_set_constr_single_reg(frame, -1, bp);
935 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
936 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
939 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
942 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
943 env->init_sp = stack;
944 set_irg_frame(irg, frame);
945 edges_reroute(old_frame, frame, irg);
950 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
952 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
953 const arch_register_t *sp = isa->sp;
954 const arch_register_t *bp = isa->bp;
955 ir_graph *irg = env->birg->irg;
956 ir_node *ret_mem = get_Return_mem(ret);
957 ir_node *frame = get_irg_frame(irg);
958 ir_node *bl = get_nodes_block(ret);
959 ir_node *stack = get_irn_link(bl);
963 if(env->call->flags.bits.try_omit_fp) {
964 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
968 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
969 be_set_constr_single_reg(stack, -1, sp);
970 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
973 pmap_foreach(env->regs, ent) {
974 const arch_register_t *reg = ent->key;
975 ir_node *irn = ent->value;
978 obstack_ptr_grow(&env->obst, stack);
980 obstack_ptr_grow(&env->obst, frame);
981 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
982 obstack_ptr_grow(obst, irn);
988 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
990 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
991 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
992 int n = get_method_n_params(method_type);
993 int curr = inc > 0 ? 0 : n - 1;
1000 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
1001 res = new_type_class(new_id_from_str(buf));
1003 for(i = 0; i < n; ++i, curr += inc) {
1004 type *param_type = get_method_param_type(method_type, curr);
1005 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1008 snprintf(buf, sizeof(buf), "param_%d", i);
1009 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1010 ofs += arg->space_before;
1011 ofs = round_up2(ofs, arg->alignment);
1012 set_entity_offset_bytes(arg->stack_ent, ofs);
1013 ofs += arg->space_after;
1014 ofs += get_type_size_bytes(param_type);
1018 set_type_size_bytes(res, ofs);
1022 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1025 struct obstack obst;
1027 obstack_init(&obst);
1029 /* Create a Perm after the RegParams node to delimit it. */
1030 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1031 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1036 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1037 const arch_register_t *reg = &cls->regs[j];
1038 ir_node *irn = pmap_get(regs, (void *) reg);
1040 if(irn && !arch_register_type_is(reg, ignore)) {
1042 obstack_ptr_grow(&obst, irn);
1043 set_irn_link(irn, (void *) reg);
1047 obstack_ptr_grow(&obst, NULL);
1048 in = obstack_finish(&obst);
1050 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1051 for(j = 0; j < n_regs; ++j) {
1052 ir_node *arg = in[j];
1053 arch_register_t *reg = get_irn_link(arg);
1054 pmap_insert(regs, reg, arg);
1055 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1058 obstack_free(&obst, in);
1061 obstack_free(&obst, NULL);
1065 const arch_register_t *reg;
1069 static int cmp_regs(const void *a, const void *b)
1071 const reg_node_map_t *p = a;
1072 const reg_node_map_t *q = b;
1074 if(p->reg->reg_class == q->reg->reg_class)
1075 return p->reg->index - q->reg->index;
1077 return p->reg->reg_class - q->reg->reg_class;
1080 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1083 int n = pmap_count(reg_map);
1085 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1087 pmap_foreach(reg_map, ent) {
1088 res[i].reg = ent->key;
1089 res[i].irn = ent->value;
1093 qsort(res, n, sizeof(res[0]), cmp_regs);
1097 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1099 ir_graph *irg = env->birg->irg;
1101 int n_regs = pmap_count(regs);
1106 rm = reg_map_to_arr(&env->obst, regs);
1108 for(n = 0; n < n_regs; ++n)
1109 obstack_ptr_grow(&env->obst, rm[n].irn);
1112 obstack_ptr_grow(&env->obst, *mem);
1116 in = (ir_node **) obstack_finish(&env->obst);
1117 irn = be_new_Barrier(irg, bl, n, in);
1118 obstack_free(&env->obst, in);
1120 for(n = 0; n < n_regs; ++n) {
1121 int pos = BE_OUT_POS(n);
1123 const arch_register_t *reg = rm[n].reg;
1125 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1126 be_node_set_reg_class(irn, n, reg->reg_class);
1128 be_set_constr_single_reg(irn, n, reg);
1129 be_set_constr_single_reg(irn, pos, reg);
1130 be_node_set_reg_class(irn, pos, reg->reg_class);
1131 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1132 if(arch_register_type_is(reg, ignore))
1133 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
1135 pmap_insert(regs, (void *) reg, proj);
1139 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1142 obstack_free(&env->obst, rm);
1147 * Modify the irg itself and the frame type.
1149 static void modify_irg(be_abi_irg_t *env)
1151 be_abi_call_t *call = env->call;
1152 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1153 const arch_register_t *sp = arch_isa_sp(isa);
1154 ir_graph *irg = env->birg->irg;
1155 ir_node *bl = get_irg_start_block(irg);
1156 ir_node *end = get_irg_end_block(irg);
1157 ir_node *arg_tuple = get_irg_args(irg);
1158 ir_node *no_mem = get_irg_no_mem(irg);
1159 ir_node *mem = get_irg_initial_mem(irg);
1160 type *method_type = get_entity_type(get_irg_entity(irg));
1161 pset *dont_save = pset_new_ptr(8);
1162 int n_params = get_method_n_params(method_type);
1168 const arch_register_t *fp_reg;
1169 ir_node *frame_pointer;
1171 ir_node *reg_params_bl;
1173 const ir_edge_t *edge;
1174 ir_type *arg_type, *bet_type;
1177 bitset_t *used_proj_nr;
1178 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1180 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1182 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1183 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1185 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1186 env->regs = pmap_create();
1188 /* Find the maximum proj number of the argument tuple proj */
1189 foreach_out_edge(arg_tuple, edge) {
1190 ir_node *irn = get_edge_src_irn(edge);
1191 int nr = get_Proj_proj(irn);
1192 max_arg = MAX(max_arg, nr);
1195 used_proj_nr = bitset_alloca(1024);
1196 max_arg = MAX(max_arg + 1, n_params);
1197 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1198 memset(args, 0, max_arg * sizeof(args[0]));
1200 /* Fill the argument vector */
1201 foreach_out_edge(arg_tuple, edge) {
1202 ir_node *irn = get_edge_src_irn(edge);
1203 int nr = get_Proj_proj(irn);
1205 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1208 arg_type = compute_arg_type(env, call, method_type);
1209 bet_type = call->cb->get_between_type(env->cb);
1210 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1212 /* Count the register params and add them to the number of Projs for the RegParams node */
1213 for(i = 0; i < n_params; ++i) {
1214 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1215 if(arg->in_reg && args[i]) {
1216 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1217 assert(i == get_Proj_proj(args[i]));
1219 /* For now, associate the register with the old Proj from Start representing that argument. */
1220 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1221 bitset_set(used_proj_nr, i);
1222 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1226 /* Collect all callee-save registers */
1227 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1228 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1229 for(j = 0; j < cls->n_regs; ++j) {
1230 const arch_register_t *reg = &cls->regs[j];
1231 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1232 pmap_insert(env->regs, (void *) reg, NULL);
1236 pmap_insert(env->regs, (void *) sp, NULL);
1237 pmap_insert(env->regs, (void *) isa->bp, NULL);
1238 reg_params_bl = get_irg_start_block(irg);
1239 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1242 * make proj nodes for the callee save registers.
1243 * memorize them, since Return nodes get those as inputs.
1245 * Note, that if a register corresponds to an argument, the regs map contains
1246 * the old Proj from start for that argument.
1249 rm = reg_map_to_arr(&env->obst, env->regs);
1250 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1251 arch_register_t *reg = (void *) rm[i].reg;
1252 ir_node *arg_proj = rm[i].irn;
1254 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1256 int pos = BE_OUT_POS((int) nr);
1259 bitset_set(used_proj_nr, nr);
1260 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1261 pmap_insert(env->regs, (void *) reg, proj);
1262 be_set_constr_single_reg(env->reg_params, pos, reg);
1263 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1266 * If the register is an ignore register,
1267 * The Proj for that register shall also be ignored during register allocation.
1269 if(arch_register_type_is(reg, ignore))
1270 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1272 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1274 obstack_free(&env->obst, rm);
1276 /* Generate the Prologue */
1277 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1278 barrier = create_barrier(env, bl, &mem, env->regs, 0);
1280 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1281 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1282 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1283 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1284 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1285 set_irg_frame(irg, frame_pointer);
1287 assert(is_Proj(frame_pointer));
1288 be_node_set_flags(barrier, BE_OUT_POS(get_Proj_proj(frame_pointer)), arch_irn_flags_ignore);
1290 /* Now, introduce stack param nodes for all parameters passed on the stack */
1291 for(i = 0; i < max_arg; ++i) {
1292 ir_node *arg_proj = args[i];
1293 ir_node *repl = NULL;
1295 if(arg_proj != NULL) {
1296 be_abi_call_arg_t *arg;
1297 ir_type *param_type;
1298 int nr = get_Proj_proj(arg_proj);
1300 nr = MIN(nr, n_params);
1301 arg = get_call_arg(call, 0, nr);
1302 param_type = get_method_param_type(method_type, nr);
1305 repl = pmap_get(env->regs, (void *) arg->reg);
1308 else if(arg->on_stack) {
1309 /* For atomic parameters which are actually used, we create a StackParam node. */
1310 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1311 ir_mode *mode = get_type_mode(param_type);
1312 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1313 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1316 /* The stack parameter is not primitive (it is a struct or array),
1317 we thus will create a node representing the parameter's address
1320 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1324 assert(repl != NULL);
1325 edges_reroute(args[i], repl, irg);
1329 /* All Return nodes hang on the End node, so look for them there. */
1330 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1331 ir_node *irn = get_irn_n(end, i);
1333 if(get_irn_opcode(irn) == iro_Return) {
1334 ir_node *bl = get_nodes_block(irn);
1335 int n_res = get_Return_n_ress(irn);
1336 pmap *reg_map = pmap_create();
1337 ir_node *mem = get_Return_mem(irn);
1342 const arch_register_t **regs;
1344 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1346 /* Insert results for Return into the register map. */
1347 for(i = 0; i < n_res; ++i) {
1348 ir_node *res = get_Return_res(irn, i);
1349 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1350 assert(arg->in_reg && "return value must be passed in register");
1351 pmap_insert(reg_map, (void *) arg->reg, res);
1354 /* Add uses of the callee save registers. */
1355 pmap_foreach(env->regs, ent) {
1356 const arch_register_t *reg = ent->key;
1357 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1358 pmap_insert(reg_map, ent->key, ent->value);
1361 /* Make the Epilogue node and call the arch's epilogue maker. */
1362 create_barrier(env, bl, &mem, reg_map, 1);
1363 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1366 Maximum size of the in array for Return nodes is
1367 return args + callee save/ignore registers + memory + stack pointer
1369 in_max = pmap_count(reg_map) + get_Return_n_ress(irn) + 2;
1371 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1372 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1375 in[1] = be_abi_reg_map_get(reg_map, sp);
1380 /* clear SP entry, since it has already been grown. */
1381 pmap_insert(reg_map, (void *) sp, NULL);
1382 for(i = 0; i < n_res; ++i) {
1383 ir_node *res = get_Return_res(irn, i);
1384 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1386 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1387 regs[n++] = arg->reg;
1389 /* Clear the map entry to mark the register as processed. */
1390 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1393 /* grow the rest of the stuff. */
1394 pmap_foreach(reg_map, ent) {
1397 regs[n++] = ent->key;
1401 /* The in array for the new back end return is now ready. */
1402 ret = be_new_Return(get_irn_dbg_info(irn), irg, bl, n, in);
1404 /* Set the register classes of the return's parameter accordingly. */
1405 for(i = 0; i < n; ++i)
1407 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1409 /* Free the space of the Epilog's in array and the register <-> proj map. */
1410 obstack_free(&env->obst, in);
1412 pmap_destroy(reg_map);
1416 del_pset(dont_save);
1417 obstack_free(&env->obst, args);
1421 * Walker: puts all Alloc(stack_alloc) on a obstack
1423 static void collect_alloca_walker(ir_node *irn, void *data)
1425 be_abi_irg_t *env = data;
1426 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1427 obstack_ptr_grow(&env->obst, irn);
1430 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1432 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1433 ir_node *old_frame = get_irg_frame(birg->irg);
1434 ir_graph *irg = birg->irg;
1439 env->isa = birg->main_env->arch_env->isa;
1440 env->method_type = get_entity_type(get_irg_entity(irg));
1441 env->call = be_abi_call_new();
1442 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1444 env->dce_survivor = new_survive_dce();
1446 env->stack_phis = pset_new_ptr(16);
1447 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1448 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1450 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1452 obstack_init(&env->obst);
1454 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1455 env->irn_ops.impl = &abi_irn_ops;
1457 /* Lower all call nodes in the IRG. */
1460 /* Process the IRG */
1463 /* reroute the stack origin of the calls to the true stack origin. */
1464 edges_reroute(dummy, env->init_sp, irg);
1465 edges_reroute(old_frame, get_irg_frame(irg), irg);
1467 /* Make some important node pointers survive the dead node elimination. */
1468 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1469 pmap_foreach(env->regs, ent)
1470 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1472 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1474 env->call->cb->done(env->cb);
1479 void be_abi_free(be_abi_irg_t *env)
1481 free_survive_dce(env->dce_survivor);
1482 del_pset(env->stack_phis);
1483 pmap_destroy(env->regs);
1484 obstack_free(&env->obst, NULL);
1485 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1493 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1494 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1495 | _| | |> < ___) | || (_| | (__| <
1496 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1501 * Walker. Collect all stack modifying nodes.
1503 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1507 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1508 pset_insert_ptr(s, irn);
1511 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1513 dom_front_info_t *df;
1516 /* We need dominance frontiers for fix up */
1517 df = be_compute_dominance_frontiers(env->birg->irg);
1518 stack_nodes = pset_new_ptr(16);
1519 pset_insert_ptr(stack_nodes, env->init_sp);
1520 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1521 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1522 del_pset(stack_nodes);
1524 /* Liveness could have changed due to Phi nodes. */
1525 be_liveness(env->birg->irg);
1527 /* free these dominance frontiers */
1528 be_free_dominance_frontiers(df);
1532 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1533 * into -1 or 1, respectively.
1534 * @param irn The node.
1535 * @return 1, if the direction of the IncSP was along, -1 if against.
1537 static int get_dir(ir_node *irn)
1539 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1542 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1544 const arch_env_t *aenv = env->birg->main_env->arch_env;
1545 int omit_fp = env->call->flags.bits.try_omit_fp;
1548 sched_foreach(bl, irn) {
1551 If the node modifies the stack pointer by a constant offset,
1552 record that in the bias.
1554 if(be_is_IncSP(irn)) {
1555 int ofs = be_get_IncSP_offset(irn);
1556 int dir = get_dir(irn);
1558 if(ofs == BE_STACK_FRAME_SIZE) {
1559 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1560 be_set_IncSP_offset(irn, ofs);
1568 Else check, if the node relates to an entity on the stack frame.
1569 If so, set the true offset (including the bias) for that
1573 entity *ent = arch_get_frame_entity(aenv, irn);
1575 int offset = get_stack_entity_offset(env->frame, ent, bias);
1576 arch_set_frame_offset(aenv, irn, offset);
1577 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1586 * A helper struct for the bias walker.
1589 be_abi_irg_t *env; /**< The ABI irg environment. */
1590 int start_block_bias; /**< The bias at the end of the start block. */
1594 * Block-Walker: fix all stack offsets
1596 static void stack_bias_walker(ir_node *bl, void *data)
1598 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1599 struct bias_walk *bw = data;
1600 process_stack_bias(bw->env, bl, bw->start_block_bias);
1604 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1606 ir_graph *irg = env->birg->irg;
1607 struct bias_walk bw;
1609 stack_frame_compute_initial_offset(env->frame);
1610 // stack_frame_dump(stdout, env->frame);
1612 /* Determine the stack bias at the end of the start block. */
1613 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1615 /* fix the bias is all other blocks */
1617 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1620 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1622 assert(arch_register_type_is(reg, callee_save));
1623 assert(pmap_contains(abi->regs, (void *) reg));
1624 return pmap_get(abi->regs, (void *) reg);
1628 _____ _____ _ _ _ _ _ _
1629 |_ _| __ \| \ | | | | | | | | |
1630 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1631 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1632 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1633 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1635 for Phi nodes which are created due to stack modifying nodes
1636 such as IncSP, AddSP and SetSP.
1638 These Phis are always to be ignored by the reg alloc and are
1639 fixed on the SP register of the ISA.
1642 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1644 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1645 const void *res = NULL;
1647 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1648 res = &abi->irn_ops;
1653 static void be_abi_limited(void *data, bitset_t *bs)
1655 be_abi_irg_t *abi = data;
1656 bitset_clear_all(bs);
1657 bitset_set(bs, abi->isa->sp->index);
1660 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1662 be_abi_irg_t *abi = get_abi_from_ops(self);
1663 const arch_register_t *reg = abi->isa->sp;
1665 memset(req, 0, sizeof(req[0]));
1667 if(pos == BE_OUT_POS(0)) {
1668 req->cls = reg->reg_class;
1669 req->type = arch_register_req_type_limited;
1670 req->limited = be_abi_limited;
1671 req->limited_env = abi;
1674 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1675 req->cls = reg->reg_class;
1676 req->type = arch_register_req_type_normal;
1682 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1686 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1688 const be_abi_irg_t *abi = get_abi_from_ops(self);
1689 return abi->isa->sp;
1692 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1694 return arch_irn_class_normal;
1697 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1699 return arch_irn_flags_ignore;
1702 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1707 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1711 static const arch_irn_ops_if_t abi_irn_ops = {
1712 abi_get_irn_reg_req,
1717 abi_get_frame_entity,
1721 static const arch_irn_handler_t abi_irn_handler = {