4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
32 #include "besched_t.h"
34 #define MAX(x, y) ((x) > (y) ? (x) : (y))
35 #define MIN(x, y) ((x) < (y) ? (x) : (y))
37 typedef struct _be_abi_call_arg_t {
40 unsigned on_stack : 1;
43 const arch_register_t *reg;
46 unsigned space_before;
50 struct _be_abi_call_t {
51 be_abi_call_flags_t flags;
52 const be_abi_callbacks_t *cb;
57 #define N_FRAME_TYPES 3
59 typedef struct _be_stack_frame_t {
64 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
70 struct _be_stack_slot_t {
71 struct _be_stack_frame_t *frame;
75 struct _be_abi_irg_t {
77 be_stack_frame_t *frame; /**< The stack frame model. */
78 const be_irg_t *birg; /**< The back end IRG. */
79 const arch_isa_t *isa; /**< The isa. */
80 survive_dce_t *dce_survivor;
82 be_abi_call_t *call; /**< The ABI call information. */
83 type *method_type; /**< The type of the method of the IRG. */
85 ir_node *init_sp; /**< The node representing the stack pointer
86 at the start of the function. */
88 ir_node *reg_params; /**< The reg params node. */
89 pmap *regs; /**< A map of all callee-save and ignore regs to
90 their Projs to the RegParams node. */
92 pset *stack_phis; /**< The set of all Phi nodes inserted due to
93 stack pointer modifying nodes. */
95 int start_block_bias; /**< The stack bias at the end of the start block. */
97 void *cb; /**< ABI Callback self pointer. */
99 arch_irn_handler_t irn_handler;
100 arch_irn_ops_t irn_ops;
101 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
104 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
105 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
107 /* Forward, since be need it in be_abi_introduce(). */
108 static const arch_irn_ops_if_t abi_irn_ops;
109 static const arch_irn_handler_t abi_irn_handler;
112 _ ____ ___ ____ _ _ _ _
113 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
114 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
115 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
116 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
118 These callbacks are used by the backend to set the parameters
119 for a specific call type.
123 * Set compare function: compares two ABI call object arguments.
125 static int cmp_call_arg(const void *a, const void *b, size_t n)
127 const be_abi_call_arg_t *p = a, *q = b;
128 return !(p->is_res == q->is_res && p->pos == q->pos);
132 * Get or set an ABI call object argument.
134 * @param call the abi call
135 * @param is_res true for call results, false for call arguments
136 * @param pos position of the argument
137 * @param do_insert true if the argument is set, false if it's retrieved
139 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
141 be_abi_call_arg_t arg;
144 memset(&arg, 0, sizeof(arg));
148 hash = is_res * 128 + pos;
151 ? set_insert(call->params, &arg, sizeof(arg), hash)
152 : set_find(call->params, &arg, sizeof(arg), hash);
156 * Retrieve an ABI call object argument.
158 * @param call the ABI call object
159 * @param is_res true for call results, false for call arguments
160 * @param pos position of the argument
162 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
164 return get_or_set_call_arg(call, is_res, pos, 0);
167 /* Set the flags for a call. */
168 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
174 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
176 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
178 arg->alignment = alignment;
179 arg->space_before = space_before;
180 arg->space_after = space_after;
181 assert(alignment > 0 && "Alignment must be greater than 0");
184 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
186 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
191 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
193 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
198 /* Get the flags of a ABI call object. */
199 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
205 * Constructor for a new ABI call object.
207 * @return the new ABI call object
209 static be_abi_call_t *be_abi_call_new(void)
211 be_abi_call_t *call = xmalloc(sizeof(call[0]));
213 call->params = new_set(cmp_call_arg, 16);
219 * Destructor for an ABI call object.
221 static void be_abi_call_free(be_abi_call_t *call)
223 del_set(call->params);
229 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
230 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
231 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
232 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
235 Handling of the stack frame. It is composed of three types:
236 1) The type of the arguments which are pushed on the stack.
237 2) The "between type" which consists of stuff the call of the
238 function pushes on the stack (like the return address and
239 the old base pointer for ia32).
240 3) The Firm frame type which consists of all local variables
244 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
246 type *t = get_entity_owner(ent);
247 int ofs = get_entity_offset_bytes(ent);
251 /* Find the type the entity is contained in. */
252 for(index = 0; index < N_FRAME_TYPES; ++index) {
253 if(frame->order[index] == t)
257 /* Add the size of all the types below the one of the entity to the entity's offset */
258 for(i = 0; i < index; ++i)
259 ofs += get_type_size_bytes(frame->order[i]);
261 /* correct the offset by the initial position of the frame pointer */
262 ofs -= frame->initial_offset;
264 /* correct the offset with the current bias. */
271 * Retrieve the entity with given offset from a frame type.
273 static entity *search_ent_with_offset(type *t, int offset)
277 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
278 entity *ent = get_class_member(t, i);
279 if(get_entity_offset_bytes(ent) == offset)
286 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
288 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
289 entity *ent = search_ent_with_offset(base, 0);
290 frame->initial_offset = 0;
291 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
292 return frame->initial_offset;
295 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
297 frame->arg_type = args;
298 frame->between_type = between;
299 frame->frame_type = locals;
300 frame->initial_offset = 0;
301 frame->stack_dir = stack_dir;
302 frame->order[1] = between;
305 frame->order[0] = args;
306 frame->order[2] = locals;
310 frame->order[0] = locals;
311 frame->order[2] = args;
317 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
321 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
322 for(j = 0; j < N_FRAME_TYPES; ++j) {
323 type *t = frame->order[j];
325 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
326 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
327 entity *ent = get_class_member(t, i);
328 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
334 * If irn is a Sel node computes the address of an entity
335 * on the frame type return the entity, else NULL.
337 static INLINE entity *get_sel_ent(ir_node *irn)
339 if(is_Sel(irn) && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
340 return get_Sel_entity(irn);
347 * Walker: Replaces Loads, Stores and Sels of frame type entities
348 * by FrameLoad, FrameStore and FrameAdress.
350 static void lower_frame_sels_walker(ir_node *irn, void *data)
353 entity *ent = get_sel_ent(irn);
356 be_abi_irg_t *env = data;
357 ir_node *bl = get_nodes_block(irn);
358 ir_graph *irg = get_irn_irg(bl);
359 ir_node *frame = get_irg_frame(irg);
361 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
367 * Returns non-zero if the call argument at given position
368 * is transfered on the stack.
370 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
372 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
373 return arg && !arg->in_reg;
383 Adjustment of the calls inside a graph.
388 * Transform a call node.
389 * @param env The ABI environment for the current irg.
390 * @param irn The call node.
391 * @param curr_sp The stack pointer node to use.
392 * @return The stack pointer after the call.
394 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
396 ir_graph *irg = env->birg->irg;
397 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
398 be_abi_call_t *call = be_abi_call_new();
399 ir_type *mt = get_Call_type(irn);
400 ir_node *call_ptr = get_Call_ptr(irn);
401 int n_params = get_method_n_params(mt);
402 ir_node *curr_mem = get_Call_mem(irn);
403 ir_node *bl = get_nodes_block(irn);
404 pset *results = pset_new_ptr(8);
405 pset *caller_save = pset_new_ptr(8);
407 int stack_dir = arch_isa_stack_dir(isa);
408 const arch_register_t *sp = arch_isa_sp(isa);
409 ir_mode *mach_mode = sp->reg_class->mode;
410 struct obstack *obst = &env->obst;
411 ir_node *no_mem = get_irg_no_mem(irg);
412 int no_alloc = call->flags.bits.frame_is_setup_on_call;
414 ir_node *res_proj = NULL;
415 int curr_res_proj = pn_Call_max;
422 const ir_edge_t *edge;
427 /* Let the isa fill out the abi description for that call node. */
428 arch_isa_get_call_abi(isa, mt, call);
430 /* Insert code to put the stack arguments on the stack. */
431 assert(get_Call_n_params(irn) == n_params);
432 for(i = 0; i < n_params; ++i) {
433 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
436 stack_size += arg->space_before;
437 stack_size = round_up2(stack_size, arg->alignment);
438 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
439 stack_size += arg->space_after;
440 obstack_int_grow(obst, i);
444 pos = obstack_finish(obst);
446 /* Collect all arguments which are passed in registers. */
447 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
448 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
449 if(arg && arg->in_reg) {
450 obstack_int_grow(obst, i);
454 low_args = obstack_finish(obst);
456 /* If there are some parameters which shall be passed on the stack. */
459 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
461 /* Reverse list of stack parameters if call arguments are from left to right */
462 if(call->flags.bits.left_to_right) {
463 for(i = 0; i < n_pos / 2; ++i) {
464 int other = n_pos - i - 1;
472 * If the stack is decreasing and we do not want to store sequentially,
473 * or someone else allocated the call frame
474 * we allocate as much space on the stack all parameters need, by
475 * moving the stack pointer along the stack's direction.
477 if(stack_dir < 0 && !do_seq && !no_alloc) {
478 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
481 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
482 for(i = 0; i < n_pos; ++i) {
484 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
485 ir_node *param = get_Call_param(irn, p);
486 ir_node *addr = curr_sp;
488 type *param_type = get_method_param_type(mt, p);
489 int param_size = get_type_size_bytes(param_type) + arg->space_after;
491 curr_ofs += arg->space_before;
492 curr_ofs = round_up2(curr_ofs, arg->alignment);
494 /* Make the expression to compute the argument's offset. */
496 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
497 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
500 /* Insert a store for primitive arguments. */
501 if(is_atomic_type(param_type)) {
502 mem = new_r_Store(irg, bl, curr_mem, addr, param);
503 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
506 /* Make a mem copy for compound arguments. */
508 assert(mode_is_reference(get_irn_mode(param)));
509 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
510 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
513 obstack_ptr_grow(obst, mem);
515 curr_ofs += param_size;
518 * If we wanted to build the arguments sequentially,
519 * the stack pointer for the next must be incremented,
520 * and the memory value propagated.
524 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_expand);
529 in = (ir_node **) obstack_finish(obst);
531 /* We need the sync only, if we didn't build the stores sequentially. */
533 curr_mem = new_r_Sync(irg, bl, n_pos, in);
534 obstack_free(obst, in);
537 /* Collect caller save registers */
538 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
540 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
541 for(j = 0; j < cls->n_regs; ++j) {
542 const arch_register_t *reg = arch_register_for_index(cls, j);
543 if(arch_register_type_is(reg, caller_save))
544 pset_insert_ptr(caller_save, (void *) reg);
548 /* search the greatest result proj number */
550 /* TODO: what if the result is NOT used? Currently there is
551 * no way to detect this later, especially there is no way to
552 * see this in the proj numbers.
553 * While this is ok for the register allocator, it is bad for
554 * backends which need to change the be_Call further (x87 simulator
555 * for instance. However for this particular case the call_type is
558 foreach_out_edge(irn, edge) {
559 const ir_edge_t *res_edge;
560 ir_node *irn = get_edge_src_irn(edge);
562 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
564 foreach_out_edge(irn, res_edge) {
566 be_abi_call_arg_t *arg;
567 ir_node *res = get_edge_src_irn(res_edge);
569 assert(is_Proj(res));
571 proj = get_Proj_proj(res);
572 arg = get_call_arg(call, 1, proj);
575 shift the proj number to the right, since we will drop the
576 unspeakable Proj_T from the Call. Therefore, all real argument
577 Proj numbers must be increased by pn_be_Call_first_res
579 proj += pn_be_Call_first_res;
580 set_Proj_proj(res, proj);
581 obstack_ptr_grow(obst, res);
583 if(proj > curr_res_proj)
584 curr_res_proj = proj;
586 pset_remove_ptr(caller_save, arg->reg);
587 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
594 obstack_ptr_grow(obst, NULL);
595 res_projs = obstack_finish(obst);
597 /* make the back end call node and set its register requirements. */
598 for(i = 0; i < n_low_args; ++i)
599 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
601 in = obstack_finish(obst);
603 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
604 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
605 curr_res_proj + pset_count(caller_save), n_low_args, in,
607 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
611 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
612 curr_res_proj + pset_count(caller_save), n_low_args, in,
617 Set the register class of the call address to the same as the stack pointer's.
618 That' probably buggy for some architectures.
620 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
622 /* Set the register classes and constraints of the Call parameters. */
623 for(i = 0; i < n_low_args; ++i) {
624 int index = low_args[i];
625 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
626 assert(arg->reg != NULL);
628 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
631 /* Set the register constraints of the results. */
632 for(i = 0; res_projs[i]; ++i) {
633 ir_node *irn = res_projs[i];
634 int proj = get_Proj_proj(irn);
636 /* Correct Proj number since it has been adjusted! (see above) */
637 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
640 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
642 obstack_free(obst, in);
643 exchange(irn, low_call);
645 /* redirect the result projs to the lowered call instead of the Proj_T */
646 for(i = 0; res_projs[i]; ++i)
647 set_Proj_pred(res_projs[i], low_call);
649 /* Make additional projs for the caller save registers
650 and the Keep node which keeps them alive. */
651 if(pset_count(caller_save) > 0) {
652 const arch_register_t *reg;
656 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
657 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
659 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
660 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
661 set_irn_link(proj, (void *) reg);
662 obstack_ptr_grow(obst, proj);
666 in = (ir_node **) obstack_finish(obst);
667 keep = be_new_Keep(NULL, irg, bl, n, in);
668 for(i = 0; i < n; ++i) {
669 const arch_register_t *reg = get_irn_link(in[i]);
670 be_node_set_reg_class(keep, i, reg->reg_class);
672 obstack_free(obst, in);
675 /* Clean up the stack. */
677 ir_node *mem_proj = NULL;
679 foreach_out_edge(low_call, edge) {
680 ir_node *irn = get_edge_src_irn(edge);
681 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
688 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
690 /* Clean up the stack frame if we allocated it */
692 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
695 be_abi_call_free(call);
696 obstack_free(obst, pos);
698 del_pset(caller_save);
705 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
707 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
709 if (get_Alloc_where(alloc) == stack_alloc) {
710 ir_node *bl = get_nodes_block(alloc);
711 ir_graph *irg = get_irn_irg(bl);
712 ir_node *alloc_mem = NULL;
713 ir_node *alloc_res = NULL;
715 const ir_edge_t *edge;
718 env->call->flags.bits.try_omit_fp = 0;
720 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
722 foreach_out_edge(alloc, edge) {
723 ir_node *irn = get_edge_src_irn(edge);
725 assert(is_Proj(irn));
726 switch(get_Proj_proj(irn)) {
738 /* TODO: Beware: currently Alloc nodes without a result might happen,
739 only escape analysis kills them and this phase runs only for object
740 oriented source. So this must be fixed. */
741 assert(alloc_res != NULL);
742 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
744 if(alloc_mem != NULL)
745 exchange(alloc_mem, new_r_NoMem(irg));
754 * Walker for dependent_on().
755 * This function searches a node tgt recursively from a given node
756 * but is restricted to the given block.
757 * @return 1 if tgt was reachable from curr, 0 if not.
759 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
763 if(get_irn_visited(curr) >= visited_nr)
766 set_irn_visited(curr, visited_nr);
767 if(get_nodes_block(curr) != bl)
773 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
774 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
782 * Check if a node is somehow data dependent on another one.
783 * both nodes must be in the same basic block.
784 * @param n1 The first node.
785 * @param n2 The second node.
786 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
788 static int dependent_on(ir_node *n1, ir_node *n2)
790 ir_node *bl = get_nodes_block(n1);
791 ir_graph *irg = get_irn_irg(bl);
792 long vis_nr = get_irg_visited(irg) + 1;
794 assert(bl == get_nodes_block(n2));
795 set_irg_visited(irg, vis_nr);
796 return check_dependence(n1, n2, bl, vis_nr);
799 static int cmp_call_dependecy(const void *c1, const void *c2)
801 ir_node *n1 = *(ir_node **) c1;
802 ir_node *n2 = *(ir_node **) c2;
805 Classical qsort() comparison function behavior:
806 0 if both elements are equal
807 1 if second is "smaller" that first
808 -1 if first is "smaller" that second
810 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
813 static void link_calls_in_block_walker(ir_node *irn, void *data)
816 be_abi_irg_t *env = data;
817 ir_node *bl = get_nodes_block(irn);
818 void *save = get_irn_link(bl);
820 env->call->flags.bits.irg_is_leaf = 0;
822 set_irn_link(irn, save);
823 set_irn_link(bl, irn);
828 * Process all call nodes inside a basic block.
829 * Note that the link field of the block must contain a linked list of all
830 * Call nodes inside the block. We first order this list according to data dependency
831 * and that connect the calls together.
833 static void process_calls_in_block(ir_node *bl, void *data)
835 be_abi_irg_t *env = data;
836 ir_node *curr_sp = env->init_sp;
840 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
841 obstack_ptr_grow(&env->obst, irn);
843 /* If there were call nodes in the block. */
848 nodes = obstack_finish(&env->obst);
850 /* order the call nodes according to data dependency */
851 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
853 for(i = n - 1; i >= 0; --i) {
854 ir_node *irn = nodes[i];
856 switch(get_irn_opcode(irn)) {
858 curr_sp = adjust_call(env, irn, curr_sp);
861 curr_sp = adjust_alloc(env, irn, curr_sp);
868 obstack_free(&env->obst, nodes);
870 /* Keep the last stack state in the block by tying it to Keep node */
872 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
875 set_irn_link(bl, curr_sp);
879 * Adjust all call nodes in the graph to the ABI conventions.
881 static void process_calls(be_abi_irg_t *env)
883 ir_graph *irg = env->birg->irg;
885 env->call->flags.bits.irg_is_leaf = 1;
886 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
887 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
890 static void collect_return_walker(ir_node *irn, void *data)
892 if(get_irn_opcode(irn) == iro_Return) {
893 struct obstack *obst = data;
894 obstack_ptr_grow(obst, irn);
898 static ir_node *setup_frame(be_abi_irg_t *env)
900 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
901 const arch_register_t *sp = isa->sp;
902 const arch_register_t *bp = isa->bp;
903 be_abi_call_flags_bits_t flags = env->call->flags.bits;
904 ir_graph *irg = env->birg->irg;
905 ir_node *bl = get_irg_start_block(irg);
906 ir_node *no_mem = get_irg_no_mem(irg);
907 ir_node *old_frame = get_irg_frame(irg);
908 ir_node *stack = pmap_get(env->regs, (void *) sp);
909 ir_node *frame = pmap_get(env->regs, (void *) bp);
911 int stack_nr = get_Proj_proj(stack);
913 if(flags.try_omit_fp) {
914 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
919 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
921 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
923 be_set_constr_single_reg(frame, -1, bp);
924 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
925 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
928 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
931 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
932 env->init_sp = stack;
933 set_irg_frame(irg, frame);
934 edges_reroute(old_frame, frame, irg);
939 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
941 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
942 const arch_register_t *sp = isa->sp;
943 const arch_register_t *bp = isa->bp;
944 ir_graph *irg = env->birg->irg;
945 ir_node *ret_mem = get_Return_mem(ret);
946 ir_node *frame = get_irg_frame(irg);
947 ir_node *bl = get_nodes_block(ret);
948 ir_node *stack = get_irn_link(bl);
952 if(env->call->flags.bits.try_omit_fp) {
953 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
957 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
958 be_set_constr_single_reg(stack, -1, sp);
959 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
962 pmap_foreach(env->regs, ent) {
963 const arch_register_t *reg = ent->key;
964 ir_node *irn = ent->value;
967 obstack_ptr_grow(&env->obst, stack);
969 obstack_ptr_grow(&env->obst, frame);
970 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
971 obstack_ptr_grow(obst, irn);
975 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
977 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
978 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
979 int n = get_method_n_params(method_type);
980 int curr = inc > 0 ? 0 : n - 1;
987 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
988 res = new_type_class(new_id_from_str(buf));
990 for(i = 0; i < n; ++i, curr += inc) {
991 type *param_type = get_method_param_type(method_type, curr);
992 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
995 snprintf(buf, sizeof(buf), "param_%d", i);
996 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
997 ofs += arg->space_before;
998 ofs = round_up2(ofs, arg->alignment);
999 set_entity_offset_bytes(arg->stack_ent, ofs);
1000 ofs += arg->space_after;
1001 ofs += get_type_size_bytes(param_type);
1005 set_type_size_bytes(res, ofs);
1009 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1012 struct obstack obst;
1014 obstack_init(&obst);
1016 /* Create a Perm after the RegParams node to delimit it. */
1017 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1018 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1023 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1024 const arch_register_t *reg = &cls->regs[j];
1025 ir_node *irn = pmap_get(regs, (void *) reg);
1027 if(irn && !arch_register_type_is(reg, ignore)) {
1029 obstack_ptr_grow(&obst, irn);
1030 set_irn_link(irn, (void *) reg);
1034 obstack_ptr_grow(&obst, NULL);
1035 in = obstack_finish(&obst);
1037 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1038 for(j = 0; j < n_regs; ++j) {
1039 ir_node *arg = in[j];
1040 arch_register_t *reg = get_irn_link(arg);
1041 pmap_insert(regs, reg, arg);
1042 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1045 obstack_free(&obst, in);
1048 obstack_free(&obst, NULL);
1052 const arch_register_t *reg;
1056 static int cmp_regs(const void *a, const void *b)
1058 const reg_node_map_t *p = a;
1059 const reg_node_map_t *q = b;
1061 if(p->reg->reg_class == q->reg->reg_class)
1062 return p->reg->index - q->reg->index;
1064 return p->reg->reg_class - q->reg->reg_class;
1067 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1070 int n = pmap_count(reg_map);
1072 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1074 pmap_foreach(reg_map, ent) {
1075 res[i].reg = ent->key;
1076 res[i].irn = ent->value;
1080 qsort(res, n, sizeof(res[0]), cmp_regs);
1084 static void create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1086 ir_graph *irg = env->birg->irg;
1088 int n_regs = pmap_count(regs);
1093 rm = reg_map_to_arr(&env->obst, regs);
1095 for(n = 0; n < n_regs; ++n)
1096 obstack_ptr_grow(&env->obst, rm[n].irn);
1099 obstack_ptr_grow(&env->obst, *mem);
1103 in = (ir_node **) obstack_finish(&env->obst);
1104 irn = be_new_Barrier(irg, bl, n, in);
1105 obstack_free(&env->obst, in);
1107 for(n = 0; n < n_regs; ++n) {
1108 int pos = BE_OUT_POS(n);
1110 const arch_register_t *reg = rm[n].reg;
1112 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1113 be_node_set_reg_class(irn, n, reg->reg_class);
1115 be_set_constr_single_reg(irn, n, reg);
1116 be_set_constr_single_reg(irn, pos, reg);
1117 be_node_set_reg_class(irn, pos, reg->reg_class);
1118 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1119 if(arch_register_type_is(reg, ignore))
1120 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
1122 pmap_insert(regs, (void *) reg, proj);
1126 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1129 obstack_free(&env->obst, rm);
1133 * Modify the irg itself and the frame type.
1135 static void modify_irg(be_abi_irg_t *env)
1137 be_abi_call_t *call = env->call;
1138 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1139 const arch_register_t *sp = arch_isa_sp(isa);
1140 ir_graph *irg = env->birg->irg;
1141 ir_node *bl = get_irg_start_block(irg);
1142 ir_node *end = get_irg_end_block(irg);
1143 ir_node *arg_tuple = get_irg_args(irg);
1144 ir_node *no_mem = get_irg_no_mem(irg);
1145 ir_node *mem = get_irg_initial_mem(irg);
1146 type *method_type = get_entity_type(get_irg_entity(irg));
1147 pset *dont_save = pset_new_ptr(8);
1148 int n_params = get_method_n_params(method_type);
1150 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1155 const arch_register_t *fp_reg;
1156 ir_node *frame_pointer;
1157 ir_node *reg_params_bl;
1159 const ir_edge_t *edge;
1160 ir_type *arg_type, *bet_type;
1163 bitset_t *used_proj_nr;
1165 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1167 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1168 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1170 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1171 env->regs = pmap_create();
1173 /* Find the maximum proj number of the argument tuple proj */
1174 foreach_out_edge(arg_tuple, edge) {
1175 ir_node *irn = get_edge_src_irn(edge);
1176 int nr = get_Proj_proj(irn);
1177 max_arg = MAX(max_arg, nr);
1180 used_proj_nr = bitset_alloca(1024);
1181 max_arg = MAX(max_arg + 1, n_params);
1182 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1183 memset(args, 0, max_arg * sizeof(args[0]));
1185 /* Fill the argument vector */
1186 foreach_out_edge(arg_tuple, edge) {
1187 ir_node *irn = get_edge_src_irn(edge);
1188 int nr = get_Proj_proj(irn);
1190 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1193 arg_type = compute_arg_type(env, call, method_type);
1194 bet_type = call->cb->get_between_type(env->cb);
1195 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1197 /* Count the register params and add them to the number of Projs for the RegParams node */
1198 for(i = 0; i < n_params; ++i) {
1199 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1200 if(arg->in_reg && args[i]) {
1201 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1202 assert(i == get_Proj_proj(args[i]));
1204 /* For now, associate the register with the old Proj from Start representing that argument. */
1205 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1206 bitset_set(used_proj_nr, i);
1207 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1211 /* Collect all callee-save registers */
1212 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1213 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1214 for(j = 0; j < cls->n_regs; ++j) {
1215 const arch_register_t *reg = &cls->regs[j];
1216 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1217 pmap_insert(env->regs, (void *) reg, NULL);
1221 pmap_insert(env->regs, (void *) sp, NULL);
1222 pmap_insert(env->regs, (void *) isa->bp, NULL);
1223 reg_params_bl = get_irg_start_block(irg);
1224 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1227 * make proj nodes for the callee save registers.
1228 * memorize them, since Return nodes get those as inputs.
1230 * Note, that if a register corresponds to an argument, the regs map contains
1231 * the old Proj from start for that argument.
1234 rm = reg_map_to_arr(&env->obst, env->regs);
1235 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1236 arch_register_t *reg = (void *) rm[i].reg;
1237 ir_node *arg_proj = rm[i].irn;
1239 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1241 int pos = BE_OUT_POS((int) nr);
1244 bitset_set(used_proj_nr, nr);
1245 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1246 pmap_insert(env->regs, (void *) reg, proj);
1247 be_set_constr_single_reg(env->reg_params, pos, reg);
1248 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1251 * If the register is an ignore register,
1252 * The Proj for that register shall also be ignored during register allocation.
1254 if(arch_register_type_is(reg, ignore))
1255 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1257 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1259 obstack_free(&env->obst, rm);
1261 /* Generate the Prologue */
1262 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1263 create_barrier(env, bl, &mem, env->regs, 0);
1265 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1266 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1267 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1268 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1269 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1270 set_irg_frame(irg, frame_pointer);
1272 /* Now, introduce stack param nodes for all parameters passed on the stack */
1273 for(i = 0; i < max_arg; ++i) {
1274 ir_node *arg_proj = args[i];
1275 ir_node *repl = NULL;
1277 if(arg_proj != NULL) {
1278 be_abi_call_arg_t *arg;
1279 ir_type *param_type;
1280 int nr = get_Proj_proj(arg_proj);
1282 nr = MIN(nr, n_params);
1283 arg = get_call_arg(call, 0, nr);
1284 param_type = get_method_param_type(method_type, nr);
1287 repl = pmap_get(env->regs, (void *) arg->reg);
1290 else if(arg->on_stack) {
1291 /* For atomic parameters which are actually used, we create a StackParam node. */
1292 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1293 ir_mode *mode = get_type_mode(param_type);
1294 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1295 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1298 /* The stack parameter is not primitive (it is a struct or array),
1299 we thus will create a node representing the parameter's address
1302 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1306 assert(repl != NULL);
1307 edges_reroute(args[i], repl, irg);
1311 /* All Return nodes hang on the End node, so look for them there. */
1312 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1313 ir_node *irn = get_irn_n(end, i);
1315 if(get_irn_opcode(irn) == iro_Return) {
1316 ir_node *bl = get_nodes_block(irn);
1317 int n_res = get_Return_n_ress(irn);
1318 pmap *reg_map = pmap_create();
1319 ir_node *mem = get_Return_mem(irn);
1324 const arch_register_t **regs;
1326 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1328 /* Insert results for Return into the register map. */
1329 for(i = 0; i < n_res; ++i) {
1330 ir_node *res = get_Return_res(irn, i);
1331 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1332 assert(arg->in_reg && "return value must be passed in register");
1333 pmap_insert(reg_map, (void *) arg->reg, res);
1336 /* Add uses of the callee save registers. */
1337 pmap_foreach(env->regs, ent) {
1338 const arch_register_t *reg = ent->key;
1339 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1340 pmap_insert(reg_map, ent->key, ent->value);
1343 /* Make the Epilogue node and call the arch's epilogue maker. */
1344 create_barrier(env, bl, &mem, reg_map, 1);
1345 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1348 Maximum size of the in array for Return nodes is
1349 return args + callee save/ignore registers + memory + stack pointer
1351 in_max = pmap_count(reg_map) + get_Return_n_ress(irn) + 2;
1353 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1354 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1357 in[1] = be_abi_reg_map_get(reg_map, sp);
1362 /* clear SP entry, since it has already been grown. */
1363 pmap_insert(reg_map, (void *) sp, NULL);
1364 for(i = 0; i < n_res; ++i) {
1365 ir_node *res = get_Return_res(irn, i);
1366 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1368 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1369 regs[n++] = arg->reg;
1371 /* Clear the map entry to mark the register as processed. */
1372 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1375 /* grow the rest of the stuff. */
1376 pmap_foreach(reg_map, ent) {
1379 regs[n++] = ent->key;
1383 /* The in array for the new back end return is now ready. */
1384 ret = be_new_Return(get_irn_dbg_info(irn), irg, bl, n, in);
1386 /* Set the register classes of the return's parameter accordingly. */
1387 for(i = 0; i < n; ++i)
1389 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1391 /* Free the space of the Epilog's in array and the register <-> proj map. */
1392 obstack_free(&env->obst, in);
1394 pmap_destroy(reg_map);
1398 del_pset(dont_save);
1399 obstack_free(&env->obst, args);
1403 * Walker: puts all Alloc(stack_alloc) on a obstack
1405 static void collect_alloca_walker(ir_node *irn, void *data)
1407 be_abi_irg_t *env = data;
1408 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1409 obstack_ptr_grow(&env->obst, irn);
1412 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1414 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1415 ir_node *old_frame = get_irg_frame(birg->irg);
1416 ir_graph *irg = birg->irg;
1421 env->isa = birg->main_env->arch_env->isa;
1422 env->method_type = get_entity_type(get_irg_entity(irg));
1423 env->call = be_abi_call_new();
1424 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1426 env->dce_survivor = new_survive_dce();
1428 env->stack_phis = pset_new_ptr(16);
1429 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1430 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1432 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1434 obstack_init(&env->obst);
1436 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1437 env->irn_ops.impl = &abi_irn_ops;
1439 /* Lower all call nodes in the IRG. */
1442 /* Process the IRG */
1445 /* reroute the stack origin of the calls to the true stack origin. */
1446 edges_reroute(dummy, env->init_sp, irg);
1447 edges_reroute(old_frame, get_irg_frame(irg), irg);
1449 /* Make some important node pointers survive the dead node elimination. */
1450 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1451 pmap_foreach(env->regs, ent)
1452 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1454 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1456 env->call->cb->done(env->cb);
1461 void be_abi_free(be_abi_irg_t *env)
1463 free_survive_dce(env->dce_survivor);
1464 del_pset(env->stack_phis);
1465 pmap_destroy(env->regs);
1466 obstack_free(&env->obst, NULL);
1467 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1475 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1476 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1477 | _| | |> < ___) | || (_| | (__| <
1478 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1483 * Walker. Collect all stack modifying nodes.
1485 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1489 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1490 pset_insert_ptr(s, irn);
1493 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1495 dom_front_info_t *df;
1498 /* We need dominance frontiers for fix up */
1499 df = be_compute_dominance_frontiers(env->birg->irg);
1500 stack_nodes = pset_new_ptr(16);
1501 pset_insert_ptr(stack_nodes, env->init_sp);
1502 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1503 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1504 del_pset(stack_nodes);
1506 /* Liveness could have changed due to Phi nodes. */
1507 be_liveness(env->birg->irg);
1509 /* free these dominance frontiers */
1510 be_free_dominance_frontiers(df);
1514 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1515 * into -1 or 1, respectively.
1516 * @param irn The node.
1517 * @return 1, if the direction of the IncSP was along, -1 if against.
1519 static int get_dir(ir_node *irn)
1521 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1524 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1526 const arch_env_t *aenv = env->birg->main_env->arch_env;
1527 int omit_fp = env->call->flags.bits.try_omit_fp;
1530 sched_foreach(bl, irn) {
1533 If the node modifies the stack pointer by a constant offset,
1534 record that in the bias.
1536 if(be_is_IncSP(irn)) {
1537 int ofs = be_get_IncSP_offset(irn);
1538 int dir = get_dir(irn);
1540 if(ofs == BE_STACK_FRAME_SIZE) {
1541 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1542 be_set_IncSP_offset(irn, ofs);
1550 Else check, if the node relates to an entity on the stack frame.
1551 If so, set the true offset (including the bias) for that
1555 entity *ent = arch_get_frame_entity(aenv, irn);
1557 int offset = get_stack_entity_offset(env->frame, ent, bias);
1558 arch_set_frame_offset(aenv, irn, offset);
1559 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1568 * A helper struct for the bias walker.
1571 be_abi_irg_t *env; /**< The ABI irg environment. */
1572 int start_block_bias; /**< The bias at the end of the start block. */
1575 static void stack_bias_walker(ir_node *bl, void *data)
1577 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1578 struct bias_walk *bw = data;
1579 process_stack_bias(bw->env, bl, bw->start_block_bias);
1583 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1585 ir_graph *irg = env->birg->irg;
1586 struct bias_walk bw;
1588 stack_frame_compute_initial_offset(env->frame);
1589 // stack_frame_dump(stdout, env->frame);
1591 /* Determine the stack bias at the and of the start block. */
1592 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1594 /* fix the bias is all other blocks */
1596 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1599 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1601 assert(arch_register_type_is(reg, callee_save));
1602 assert(pmap_contains(abi->regs, (void *) reg));
1603 return pmap_get(abi->regs, (void *) reg);
1607 _____ _____ _ _ _ _ _ _
1608 |_ _| __ \| \ | | | | | | | | |
1609 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1610 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1611 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1612 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1614 for Phi nodes which are created due to stack modifying nodes
1615 such as IncSP, AddSP and SetSP.
1617 These Phis are always to be ignored by the reg alloc and are
1618 fixed on the SP register of the ISA.
1621 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1623 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1624 const void *res = NULL;
1626 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1627 res = &abi->irn_ops;
1632 static void be_abi_limited(void *data, bitset_t *bs)
1634 be_abi_irg_t *abi = data;
1635 bitset_clear_all(bs);
1636 bitset_set(bs, abi->isa->sp->index);
1639 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1641 be_abi_irg_t *abi = get_abi_from_ops(self);
1642 const arch_register_t *reg = abi->isa->sp;
1644 memset(req, 0, sizeof(req[0]));
1646 if(pos == BE_OUT_POS(0)) {
1647 req->cls = reg->reg_class;
1648 req->type = arch_register_req_type_limited;
1649 req->limited = be_abi_limited;
1650 req->limited_env = abi;
1653 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1654 req->cls = reg->reg_class;
1655 req->type = arch_register_req_type_normal;
1661 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1665 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1667 const be_abi_irg_t *abi = get_abi_from_ops(self);
1668 return abi->isa->sp;
1671 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1673 return arch_irn_class_normal;
1676 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1678 return arch_irn_flags_ignore;
1681 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1686 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1690 static const arch_irn_ops_if_t abi_irn_ops = {
1691 abi_get_irn_reg_req,
1696 abi_get_frame_entity,
1700 static const arch_irn_handler_t abi_irn_handler = {