4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
32 #include "besched_t.h"
34 #define MAX(x, y) ((x) > (y) ? (x) : (y))
35 #define MIN(x, y) ((x) < (y) ? (x) : (y))
37 typedef struct _be_abi_call_arg_t {
40 unsigned on_stack : 1;
43 const arch_register_t *reg;
46 unsigned space_before;
50 struct _be_abi_call_t {
51 be_abi_call_flags_t flags;
52 const be_abi_callbacks_t *cb;
57 #define N_FRAME_TYPES 3
59 typedef struct _be_stack_frame_t {
64 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
70 struct _be_stack_slot_t {
71 struct _be_stack_frame_t *frame;
75 struct _be_abi_irg_t {
77 be_stack_frame_t *frame; /**< The stack frame model. */
78 const be_irg_t *birg; /**< The back end IRG. */
79 const arch_isa_t *isa; /**< The isa. */
80 survive_dce_t *dce_survivor;
82 be_abi_call_t *call; /**< The ABI call information. */
83 type *method_type; /**< The type of the method of the IRG. */
85 ir_node *init_sp; /**< The node representing the stack pointer
86 at the start of the function. */
88 ir_node *reg_params; /**< The reg params node. */
89 pmap *regs; /**< A map of all callee-save and ignore regs to
90 their Projs to the RegParams node. */
92 pset *stack_phis; /**< The set of all Phi nodes inserted due to
93 stack pointer modifying nodes. */
95 int start_block_bias; /**< The stack bias at the end of the start block. */
97 void *cb; /**< ABI Callback self pointer. */
99 arch_irn_handler_t irn_handler;
100 arch_irn_ops_t irn_ops;
101 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
104 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
105 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
107 /* Forward, since be need it in be_abi_introduce(). */
108 static const arch_irn_ops_if_t abi_irn_ops;
109 static const arch_irn_handler_t abi_irn_handler;
111 /* Flag: if set, try to omit the frame pointer if alled by the backend */
115 _ ____ ___ ____ _ _ _ _
116 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
117 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
118 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
119 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
121 These callbacks are used by the backend to set the parameters
122 for a specific call type.
126 * Set compare function: compares two ABI call object arguments.
128 static int cmp_call_arg(const void *a, const void *b, size_t n)
130 const be_abi_call_arg_t *p = a, *q = b;
131 return !(p->is_res == q->is_res && p->pos == q->pos);
135 * Get or set an ABI call object argument.
137 * @param call the abi call
138 * @param is_res true for call results, false for call arguments
139 * @param pos position of the argument
140 * @param do_insert true if the argument is set, false if it's retrieved
142 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
144 be_abi_call_arg_t arg;
147 memset(&arg, 0, sizeof(arg));
151 hash = is_res * 128 + pos;
154 ? set_insert(call->params, &arg, sizeof(arg), hash)
155 : set_find(call->params, &arg, sizeof(arg), hash);
159 * Retrieve an ABI call object argument.
161 * @param call the ABI call object
162 * @param is_res true for call results, false for call arguments
163 * @param pos position of the argument
165 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
167 return get_or_set_call_arg(call, is_res, pos, 0);
170 /* Set the flags for a call. */
171 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
177 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
179 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
181 arg->alignment = alignment;
182 arg->space_before = space_before;
183 arg->space_after = space_after;
184 assert(alignment > 0 && "Alignment must be greater than 0");
187 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
189 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
194 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
196 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
201 /* Get the flags of a ABI call object. */
202 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
208 * Constructor for a new ABI call object.
210 * @return the new ABI call object
212 static be_abi_call_t *be_abi_call_new()
214 be_abi_call_t *call = xmalloc(sizeof(call[0]));
216 call->params = new_set(cmp_call_arg, 16);
219 call->flags.bits.try_omit_fp = be_omit_fp;
224 * Destructor for an ABI call object.
226 static void be_abi_call_free(be_abi_call_t *call)
228 del_set(call->params);
234 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
235 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
236 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
237 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
240 Handling of the stack frame. It is composed of three types:
241 1) The type of the arguments which are pushed on the stack.
242 2) The "between type" which consists of stuff the call of the
243 function pushes on the stack (like the return address and
244 the old base pointer for ia32).
245 3) The Firm frame type which consists of all local variables
249 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
251 type *t = get_entity_owner(ent);
252 int ofs = get_entity_offset_bytes(ent);
256 /* Find the type the entity is contained in. */
257 for(index = 0; index < N_FRAME_TYPES; ++index) {
258 if(frame->order[index] == t)
262 /* Add the size of all the types below the one of the entity to the entity's offset */
263 for(i = 0; i < index; ++i)
264 ofs += get_type_size_bytes(frame->order[i]);
266 /* correct the offset by the initial position of the frame pointer */
267 ofs -= frame->initial_offset;
269 /* correct the offset with the current bias. */
276 * Retrieve the entity with given offset from a frame type.
278 static entity *search_ent_with_offset(type *t, int offset)
282 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
283 entity *ent = get_class_member(t, i);
284 if(get_entity_offset_bytes(ent) == offset)
291 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
293 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
294 entity *ent = search_ent_with_offset(base, 0);
295 frame->initial_offset = 0;
296 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
297 return frame->initial_offset;
300 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
302 frame->arg_type = args;
303 frame->between_type = between;
304 frame->frame_type = locals;
305 frame->initial_offset = 0;
306 frame->stack_dir = stack_dir;
307 frame->order[1] = between;
310 frame->order[0] = args;
311 frame->order[2] = locals;
315 frame->order[0] = locals;
316 frame->order[2] = args;
322 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
326 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
327 for(j = 0; j < N_FRAME_TYPES; ++j) {
328 type *t = frame->order[j];
330 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
331 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
332 entity *ent = get_class_member(t, i);
333 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
339 * If irn is a Sel node computes the address of an entity
340 * on the frame type return the entity, else NULL.
342 static INLINE entity *get_sel_ent(ir_node *irn)
344 if(is_Sel(irn) && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
345 return get_Sel_entity(irn);
352 * Walker: Replaces Loads, Stores and Sels of frame type entities
353 * by FrameLoad, FrameStore and FrameAdress.
355 static void lower_frame_sels_walker(ir_node *irn, void *data)
358 entity *ent = get_sel_ent(irn);
361 be_abi_irg_t *env = data;
362 ir_node *bl = get_nodes_block(irn);
363 ir_graph *irg = get_irn_irg(bl);
364 ir_node *frame = get_irg_frame(irg);
366 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
372 * Returns non-zero if the call argument at given position
373 * is transfered on the stack.
375 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
377 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
378 return arg && !arg->in_reg;
388 Adjustment of the calls inside a graph.
393 * Transform a call node.
394 * @param env The ABI environment for the current irg.
395 * @param irn The call node.
396 * @param curr_sp The stack pointer node to use.
397 * @return The stack pointer after the call.
399 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
401 ir_graph *irg = env->birg->irg;
402 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
403 be_abi_call_t *call = be_abi_call_new();
404 ir_type *mt = get_Call_type(irn);
405 ir_node *call_ptr = get_Call_ptr(irn);
406 int n_params = get_method_n_params(mt);
407 ir_node *curr_mem = get_Call_mem(irn);
408 ir_node *bl = get_nodes_block(irn);
409 pset *results = pset_new_ptr(8);
410 pset *caller_save = pset_new_ptr(8);
412 int stack_dir = arch_isa_stack_dir(isa);
413 const arch_register_t *sp = arch_isa_sp(isa);
414 ir_mode *mach_mode = sp->reg_class->mode;
415 struct obstack *obst = &env->obst;
416 ir_node *no_mem = get_irg_no_mem(irg);
417 int no_alloc = call->flags.bits.frame_is_setup_on_call;
419 ir_node *res_proj = NULL;
420 int curr_res_proj = pn_Call_max;
427 const ir_edge_t *edge;
432 /* Let the isa fill out the abi description for that call node. */
433 arch_isa_get_call_abi(isa, mt, call);
435 /* Insert code to put the stack arguments on the stack. */
436 assert(get_Call_n_params(irn) == n_params);
437 for(i = 0; i < n_params; ++i) {
438 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
441 stack_size += arg->space_before;
442 stack_size = round_up2(stack_size, arg->alignment);
443 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
444 stack_size += arg->space_after;
445 obstack_int_grow(obst, i);
449 pos = obstack_finish(obst);
451 /* Collect all arguments which are passed in registers. */
452 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
453 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
454 if(arg && arg->in_reg) {
455 obstack_int_grow(obst, i);
459 low_args = obstack_finish(obst);
461 /* If there are some parameters which shall be passed on the stack. */
464 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
467 * Reverse list of stack parameters if call arguments are from left to right.
468 * We must them reverse again in they are pushed (not stored) and the stack
469 * direction is downwards.
471 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
472 for(i = 0; i < n_pos >> 1; ++i) {
473 int other = n_pos - i - 1;
481 * If the stack is decreasing and we do not want to store sequentially,
482 * or someone else allocated the call frame
483 * we allocate as much space on the stack all parameters need, by
484 * moving the stack pointer along the stack's direction.
486 if(stack_dir < 0 && !do_seq && !no_alloc) {
487 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
490 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
491 for(i = 0; i < n_pos; ++i) {
493 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
494 ir_node *param = get_Call_param(irn, p);
495 ir_node *addr = curr_sp;
497 type *param_type = get_method_param_type(mt, p);
498 int param_size = get_type_size_bytes(param_type) + arg->space_after;
500 curr_ofs += arg->space_before;
501 curr_ofs = round_up2(curr_ofs, arg->alignment);
503 /* Make the expression to compute the argument's offset. */
505 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
506 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
509 /* Insert a store for primitive arguments. */
510 if(is_atomic_type(param_type)) {
511 mem = new_r_Store(irg, bl, curr_mem, addr, param);
512 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
515 /* Make a mem copy for compound arguments. */
517 assert(mode_is_reference(get_irn_mode(param)));
518 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
519 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
522 obstack_ptr_grow(obst, mem);
524 curr_ofs += param_size;
527 * If we wanted to build the arguments sequentially,
528 * the stack pointer for the next must be incremented,
529 * and the memory value propagated.
533 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem, param_size, be_stack_dir_expand);
538 in = (ir_node **) obstack_finish(obst);
540 /* We need the sync only, if we didn't build the stores sequentially. */
542 curr_mem = new_r_Sync(irg, bl, n_pos, in);
543 obstack_free(obst, in);
546 /* Collect caller save registers */
547 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
549 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
550 for(j = 0; j < cls->n_regs; ++j) {
551 const arch_register_t *reg = arch_register_for_index(cls, j);
552 if(arch_register_type_is(reg, caller_save))
553 pset_insert_ptr(caller_save, (void *) reg);
557 /* search the greatest result proj number */
559 /* TODO: what if the result is NOT used? Currently there is
560 * no way to detect this later, especially there is no way to
561 * see this in the proj numbers.
562 * While this is ok for the register allocator, it is bad for
563 * backends which need to change the be_Call further (x87 simulator
564 * for instance. However for this particular case the call_type is
567 foreach_out_edge(irn, edge) {
568 const ir_edge_t *res_edge;
569 ir_node *irn = get_edge_src_irn(edge);
571 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
573 foreach_out_edge(irn, res_edge) {
575 be_abi_call_arg_t *arg;
576 ir_node *res = get_edge_src_irn(res_edge);
578 assert(is_Proj(res));
580 proj = get_Proj_proj(res);
581 arg = get_call_arg(call, 1, proj);
584 shift the proj number to the right, since we will drop the
585 unspeakable Proj_T from the Call. Therefore, all real argument
586 Proj numbers must be increased by pn_be_Call_first_res
588 proj += pn_be_Call_first_res;
589 set_Proj_proj(res, proj);
590 obstack_ptr_grow(obst, res);
592 if(proj > curr_res_proj)
593 curr_res_proj = proj;
595 pset_remove_ptr(caller_save, arg->reg);
596 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
603 obstack_ptr_grow(obst, NULL);
604 res_projs = obstack_finish(obst);
606 /* make the back end call node and set its register requirements. */
607 for(i = 0; i < n_low_args; ++i)
608 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
610 in = obstack_finish(obst);
612 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
613 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
614 curr_res_proj + pset_count(caller_save), n_low_args, in,
616 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
620 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
621 curr_res_proj + pset_count(caller_save), n_low_args, in,
626 Set the register class of the call address to the same as the stack pointer's.
627 That' probably buggy for some architectures.
629 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
631 /* Set the register classes and constraints of the Call parameters. */
632 for(i = 0; i < n_low_args; ++i) {
633 int index = low_args[i];
634 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
635 assert(arg->reg != NULL);
637 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
640 /* Set the register constraints of the results. */
641 for(i = 0; res_projs[i]; ++i) {
642 ir_node *irn = res_projs[i];
643 int proj = get_Proj_proj(irn);
645 /* Correct Proj number since it has been adjusted! (see above) */
646 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
649 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
651 obstack_free(obst, in);
652 exchange(irn, low_call);
654 /* redirect the result projs to the lowered call instead of the Proj_T */
655 for(i = 0; res_projs[i]; ++i)
656 set_Proj_pred(res_projs[i], low_call);
658 /* Make additional projs for the caller save registers
659 and the Keep node which keeps them alive. */
660 if(pset_count(caller_save) > 0) {
661 const arch_register_t *reg;
665 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
666 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
668 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
669 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
670 set_irn_link(proj, (void *) reg);
671 obstack_ptr_grow(obst, proj);
675 in = (ir_node **) obstack_finish(obst);
676 keep = be_new_Keep(NULL, irg, bl, n, in);
677 for(i = 0; i < n; ++i) {
678 const arch_register_t *reg = get_irn_link(in[i]);
679 be_node_set_reg_class(keep, i, reg->reg_class);
681 obstack_free(obst, in);
684 /* Clean up the stack. */
686 ir_node *mem_proj = NULL;
688 foreach_out_edge(low_call, edge) {
689 ir_node *irn = get_edge_src_irn(edge);
690 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
697 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
699 /* Clean up the stack frame if we allocated it */
701 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
704 be_abi_call_free(call);
705 obstack_free(obst, pos);
707 del_pset(caller_save);
714 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
716 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
718 if (get_Alloc_where(alloc) == stack_alloc) {
719 ir_node *bl = get_nodes_block(alloc);
720 ir_graph *irg = get_irn_irg(bl);
721 ir_node *alloc_mem = NULL;
722 ir_node *alloc_res = NULL;
724 const ir_edge_t *edge;
727 foreach_out_edge(alloc, edge) {
728 ir_node *irn = get_edge_src_irn(edge);
730 assert(is_Proj(irn));
731 switch(get_Proj_proj(irn)) {
743 /* Beware: currently Alloc nodes without a result might happen,
744 only escape analysis kills them and this phase runs only for object
745 oriented source. We kill the Alloc here. */
746 if (alloc_res == NULL) {
747 exchange(alloc_mem, get_Alloc_mem(alloc));
751 /* The stack pointer will be modified in an unknown manner.
752 We cannot omit it. */
753 env->call->flags.bits.try_omit_fp = 0;
754 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
756 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
758 if(alloc_mem != NULL)
759 exchange(alloc_mem, new_r_NoMem(irg));
768 * Walker for dependent_on().
769 * This function searches a node tgt recursively from a given node
770 * but is restricted to the given block.
771 * @return 1 if tgt was reachable from curr, 0 if not.
773 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
777 if(get_irn_visited(curr) >= visited_nr)
780 set_irn_visited(curr, visited_nr);
781 if(get_nodes_block(curr) != bl)
787 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
788 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
796 * Check if a node is somehow data dependent on another one.
797 * both nodes must be in the same basic block.
798 * @param n1 The first node.
799 * @param n2 The second node.
800 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
802 static int dependent_on(ir_node *n1, ir_node *n2)
804 ir_node *bl = get_nodes_block(n1);
805 ir_graph *irg = get_irn_irg(bl);
806 long vis_nr = get_irg_visited(irg) + 1;
808 assert(bl == get_nodes_block(n2));
809 set_irg_visited(irg, vis_nr);
810 return check_dependence(n1, n2, bl, vis_nr);
813 static int cmp_call_dependecy(const void *c1, const void *c2)
815 ir_node *n1 = *(ir_node **) c1;
816 ir_node *n2 = *(ir_node **) c2;
819 Classical qsort() comparison function behavior:
820 0 if both elements are equal
821 1 if second is "smaller" that first
822 -1 if first is "smaller" that second
824 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
827 static void link_calls_in_block_walker(ir_node *irn, void *data)
830 be_abi_irg_t *env = data;
831 ir_node *bl = get_nodes_block(irn);
832 void *save = get_irn_link(bl);
834 env->call->flags.bits.irg_is_leaf = 0;
836 set_irn_link(irn, save);
837 set_irn_link(bl, irn);
842 * Process all call nodes inside a basic block.
843 * Note that the link field of the block must contain a linked list of all
844 * Call nodes inside the block. We first order this list according to data dependency
845 * and that connect the calls together.
847 static void process_calls_in_block(ir_node *bl, void *data)
849 be_abi_irg_t *env = data;
850 ir_node *curr_sp = env->init_sp;
854 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
855 obstack_ptr_grow(&env->obst, irn);
857 /* If there were call nodes in the block. */
862 nodes = obstack_finish(&env->obst);
864 /* order the call nodes according to data dependency */
865 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
867 for(i = n - 1; i >= 0; --i) {
868 ir_node *irn = nodes[i];
870 switch(get_irn_opcode(irn)) {
872 curr_sp = adjust_call(env, irn, curr_sp);
875 curr_sp = adjust_alloc(env, irn, curr_sp);
882 obstack_free(&env->obst, nodes);
884 /* Keep the last stack state in the block by tying it to Keep node */
886 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
889 set_irn_link(bl, curr_sp);
893 * Adjust all call nodes in the graph to the ABI conventions.
895 static void process_calls(be_abi_irg_t *env)
897 ir_graph *irg = env->birg->irg;
899 env->call->flags.bits.irg_is_leaf = 1;
900 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
901 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
904 static void collect_return_walker(ir_node *irn, void *data)
906 if(get_irn_opcode(irn) == iro_Return) {
907 struct obstack *obst = data;
908 obstack_ptr_grow(obst, irn);
913 static ir_node *setup_frame(be_abi_irg_t *env)
915 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
916 const arch_register_t *sp = isa->sp;
917 const arch_register_t *bp = isa->bp;
918 be_abi_call_flags_bits_t flags = env->call->flags.bits;
919 ir_graph *irg = env->birg->irg;
920 ir_node *bl = get_irg_start_block(irg);
921 ir_node *no_mem = get_irg_no_mem(irg);
922 ir_node *old_frame = get_irg_frame(irg);
923 ir_node *stack = pmap_get(env->regs, (void *) sp);
924 ir_node *frame = pmap_get(env->regs, (void *) bp);
926 int stack_nr = get_Proj_proj(stack);
928 if(flags.try_omit_fp) {
929 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
934 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
936 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
938 be_set_constr_single_reg(frame, -1, bp);
939 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
940 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
943 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
946 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
947 env->init_sp = stack;
948 set_irg_frame(irg, frame);
949 edges_reroute(old_frame, frame, irg);
954 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
956 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
957 const arch_register_t *sp = isa->sp;
958 const arch_register_t *bp = isa->bp;
959 ir_graph *irg = env->birg->irg;
960 ir_node *ret_mem = get_Return_mem(ret);
961 ir_node *frame = get_irg_frame(irg);
962 ir_node *bl = get_nodes_block(ret);
963 ir_node *stack = get_irn_link(bl);
967 if(env->call->flags.bits.try_omit_fp) {
968 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
972 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
973 be_set_constr_single_reg(stack, -1, sp);
974 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
977 pmap_foreach(env->regs, ent) {
978 const arch_register_t *reg = ent->key;
979 ir_node *irn = ent->value;
982 obstack_ptr_grow(&env->obst, stack);
984 obstack_ptr_grow(&env->obst, frame);
985 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
986 obstack_ptr_grow(obst, irn);
992 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
994 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
995 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
996 int n = get_method_n_params(method_type);
997 int curr = inc > 0 ? 0 : n - 1;
1004 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
1005 res = new_type_class(new_id_from_str(buf));
1007 for(i = 0; i < n; ++i, curr += inc) {
1008 type *param_type = get_method_param_type(method_type, curr);
1009 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1012 snprintf(buf, sizeof(buf), "param_%d", i);
1013 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1014 ofs += arg->space_before;
1015 ofs = round_up2(ofs, arg->alignment);
1016 set_entity_offset_bytes(arg->stack_ent, ofs);
1017 ofs += arg->space_after;
1018 ofs += get_type_size_bytes(param_type);
1022 set_type_size_bytes(res, ofs);
1026 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1029 struct obstack obst;
1031 obstack_init(&obst);
1033 /* Create a Perm after the RegParams node to delimit it. */
1034 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1035 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1040 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1041 const arch_register_t *reg = &cls->regs[j];
1042 ir_node *irn = pmap_get(regs, (void *) reg);
1044 if(irn && !arch_register_type_is(reg, ignore)) {
1046 obstack_ptr_grow(&obst, irn);
1047 set_irn_link(irn, (void *) reg);
1051 obstack_ptr_grow(&obst, NULL);
1052 in = obstack_finish(&obst);
1054 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1055 for(j = 0; j < n_regs; ++j) {
1056 ir_node *arg = in[j];
1057 arch_register_t *reg = get_irn_link(arg);
1058 pmap_insert(regs, reg, arg);
1059 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1062 obstack_free(&obst, in);
1065 obstack_free(&obst, NULL);
1069 const arch_register_t *reg;
1073 static int cmp_regs(const void *a, const void *b)
1075 const reg_node_map_t *p = a;
1076 const reg_node_map_t *q = b;
1078 if(p->reg->reg_class == q->reg->reg_class)
1079 return p->reg->index - q->reg->index;
1081 return p->reg->reg_class - q->reg->reg_class;
1084 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1087 int n = pmap_count(reg_map);
1089 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1091 pmap_foreach(reg_map, ent) {
1092 res[i].reg = ent->key;
1093 res[i].irn = ent->value;
1097 qsort(res, n, sizeof(res[0]), cmp_regs);
1101 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1103 ir_graph *irg = env->birg->irg;
1105 int n_regs = pmap_count(regs);
1110 rm = reg_map_to_arr(&env->obst, regs);
1112 for(n = 0; n < n_regs; ++n)
1113 obstack_ptr_grow(&env->obst, rm[n].irn);
1116 obstack_ptr_grow(&env->obst, *mem);
1120 in = (ir_node **) obstack_finish(&env->obst);
1121 irn = be_new_Barrier(irg, bl, n, in);
1122 obstack_free(&env->obst, in);
1124 for(n = 0; n < n_regs; ++n) {
1125 int pos = BE_OUT_POS(n);
1127 const arch_register_t *reg = rm[n].reg;
1129 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1130 be_node_set_reg_class(irn, n, reg->reg_class);
1132 be_set_constr_single_reg(irn, n, reg);
1133 be_set_constr_single_reg(irn, pos, reg);
1134 be_node_set_reg_class(irn, pos, reg->reg_class);
1135 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1136 if(arch_register_type_is(reg, ignore))
1137 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
1139 pmap_insert(regs, (void *) reg, proj);
1143 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1146 obstack_free(&env->obst, rm);
1151 * Modify the irg itself and the frame type.
1153 static void modify_irg(be_abi_irg_t *env)
1155 be_abi_call_t *call = env->call;
1156 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1157 const arch_register_t *sp = arch_isa_sp(isa);
1158 ir_graph *irg = env->birg->irg;
1159 ir_node *bl = get_irg_start_block(irg);
1160 ir_node *end = get_irg_end_block(irg);
1161 ir_node *arg_tuple = get_irg_args(irg);
1162 ir_node *no_mem = get_irg_no_mem(irg);
1163 ir_node *mem = get_irg_initial_mem(irg);
1164 type *method_type = get_entity_type(get_irg_entity(irg));
1165 pset *dont_save = pset_new_ptr(8);
1166 int n_params = get_method_n_params(method_type);
1172 const arch_register_t *fp_reg;
1173 ir_node *frame_pointer;
1175 ir_node *reg_params_bl;
1177 const ir_edge_t *edge;
1178 ir_type *arg_type, *bet_type;
1181 bitset_t *used_proj_nr;
1182 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1184 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1186 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1187 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1189 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1190 env->regs = pmap_create();
1192 /* Find the maximum proj number of the argument tuple proj */
1193 foreach_out_edge(arg_tuple, edge) {
1194 ir_node *irn = get_edge_src_irn(edge);
1195 int nr = get_Proj_proj(irn);
1196 max_arg = MAX(max_arg, nr);
1199 used_proj_nr = bitset_alloca(1024);
1200 max_arg = MAX(max_arg + 1, n_params);
1201 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1202 memset(args, 0, max_arg * sizeof(args[0]));
1204 /* Fill the argument vector */
1205 foreach_out_edge(arg_tuple, edge) {
1206 ir_node *irn = get_edge_src_irn(edge);
1207 int nr = get_Proj_proj(irn);
1209 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1212 arg_type = compute_arg_type(env, call, method_type);
1213 bet_type = call->cb->get_between_type(env->cb);
1214 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1216 /* Count the register params and add them to the number of Projs for the RegParams node */
1217 for(i = 0; i < n_params; ++i) {
1218 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1219 if(arg->in_reg && args[i]) {
1220 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1221 assert(i == get_Proj_proj(args[i]));
1223 /* For now, associate the register with the old Proj from Start representing that argument. */
1224 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1225 bitset_set(used_proj_nr, i);
1226 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1230 /* Collect all callee-save registers */
1231 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1232 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1233 for(j = 0; j < cls->n_regs; ++j) {
1234 const arch_register_t *reg = &cls->regs[j];
1235 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1236 pmap_insert(env->regs, (void *) reg, NULL);
1240 pmap_insert(env->regs, (void *) sp, NULL);
1241 pmap_insert(env->regs, (void *) isa->bp, NULL);
1242 reg_params_bl = get_irg_start_block(irg);
1243 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1246 * make proj nodes for the callee save registers.
1247 * memorize them, since Return nodes get those as inputs.
1249 * Note, that if a register corresponds to an argument, the regs map contains
1250 * the old Proj from start for that argument.
1253 rm = reg_map_to_arr(&env->obst, env->regs);
1254 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1255 arch_register_t *reg = (void *) rm[i].reg;
1256 ir_node *arg_proj = rm[i].irn;
1258 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1260 int pos = BE_OUT_POS((int) nr);
1263 bitset_set(used_proj_nr, nr);
1264 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1265 pmap_insert(env->regs, (void *) reg, proj);
1266 be_set_constr_single_reg(env->reg_params, pos, reg);
1267 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1270 * If the register is an ignore register,
1271 * The Proj for that register shall also be ignored during register allocation.
1273 if(arch_register_type_is(reg, ignore))
1274 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1276 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1278 obstack_free(&env->obst, rm);
1280 /* Generate the Prologue */
1281 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1282 barrier = create_barrier(env, bl, &mem, env->regs, 0);
1284 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1285 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1286 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1287 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1288 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1289 set_irg_frame(irg, frame_pointer);
1291 if (is_Proj(frame_pointer) && get_Proj_pred(frame_pointer) == barrier)
1292 be_node_set_flags(barrier, BE_OUT_POS(get_Proj_proj(frame_pointer)), arch_irn_flags_ignore);
1294 /* Now, introduce stack param nodes for all parameters passed on the stack */
1295 for(i = 0; i < max_arg; ++i) {
1296 ir_node *arg_proj = args[i];
1297 ir_node *repl = NULL;
1299 if(arg_proj != NULL) {
1300 be_abi_call_arg_t *arg;
1301 ir_type *param_type;
1302 int nr = get_Proj_proj(arg_proj);
1304 nr = MIN(nr, n_params);
1305 arg = get_call_arg(call, 0, nr);
1306 param_type = get_method_param_type(method_type, nr);
1309 repl = pmap_get(env->regs, (void *) arg->reg);
1312 else if(arg->on_stack) {
1313 /* For atomic parameters which are actually used, we create a StackParam node. */
1314 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1315 ir_mode *mode = get_type_mode(param_type);
1316 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1317 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1320 /* The stack parameter is not primitive (it is a struct or array),
1321 we thus will create a node representing the parameter's address
1324 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1328 assert(repl != NULL);
1329 edges_reroute(args[i], repl, irg);
1333 /* All Return nodes hang on the End node, so look for them there. */
1334 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1335 ir_node *irn = get_irn_n(end, i);
1337 if(get_irn_opcode(irn) == iro_Return) {
1338 ir_node *bl = get_nodes_block(irn);
1339 int n_res = get_Return_n_ress(irn);
1340 pmap *reg_map = pmap_create();
1341 ir_node *mem = get_Return_mem(irn);
1346 const arch_register_t **regs;
1348 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1350 /* Insert results for Return into the register map. */
1351 for(i = 0; i < n_res; ++i) {
1352 ir_node *res = get_Return_res(irn, i);
1353 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1354 assert(arg->in_reg && "return value must be passed in register");
1355 pmap_insert(reg_map, (void *) arg->reg, res);
1358 /* Add uses of the callee save registers. */
1359 pmap_foreach(env->regs, ent) {
1360 const arch_register_t *reg = ent->key;
1361 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1362 pmap_insert(reg_map, ent->key, ent->value);
1365 /* Make the Epilogue node and call the arch's epilogue maker. */
1366 create_barrier(env, bl, &mem, reg_map, 1);
1367 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1370 Maximum size of the in array for Return nodes is
1371 return args + callee save/ignore registers + memory + stack pointer
1373 in_max = pmap_count(reg_map) + get_Return_n_ress(irn) + 2;
1375 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1376 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1379 in[1] = be_abi_reg_map_get(reg_map, sp);
1384 /* clear SP entry, since it has already been grown. */
1385 pmap_insert(reg_map, (void *) sp, NULL);
1386 for(i = 0; i < n_res; ++i) {
1387 ir_node *res = get_Return_res(irn, i);
1388 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1390 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1391 regs[n++] = arg->reg;
1393 /* Clear the map entry to mark the register as processed. */
1394 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1397 /* grow the rest of the stuff. */
1398 pmap_foreach(reg_map, ent) {
1401 regs[n++] = ent->key;
1405 /* The in array for the new back end return is now ready. */
1406 ret = be_new_Return(get_irn_dbg_info(irn), irg, bl, n, in);
1408 /* Set the register classes of the return's parameter accordingly. */
1409 for(i = 0; i < n; ++i)
1411 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1413 /* Free the space of the Epilog's in array and the register <-> proj map. */
1414 obstack_free(&env->obst, in);
1416 pmap_destroy(reg_map);
1420 del_pset(dont_save);
1421 obstack_free(&env->obst, args);
1425 * Walker: puts all Alloc(stack_alloc) on a obstack
1427 static void collect_alloca_walker(ir_node *irn, void *data)
1429 be_abi_irg_t *env = data;
1430 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1431 obstack_ptr_grow(&env->obst, irn);
1434 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1436 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1437 ir_node *old_frame = get_irg_frame(birg->irg);
1438 ir_graph *irg = birg->irg;
1443 env->isa = birg->main_env->arch_env->isa;
1444 env->method_type = get_entity_type(get_irg_entity(irg));
1445 env->call = be_abi_call_new();
1446 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1448 env->dce_survivor = new_survive_dce();
1450 env->stack_phis = pset_new_ptr(16);
1451 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1452 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1454 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1456 obstack_init(&env->obst);
1458 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1459 env->irn_ops.impl = &abi_irn_ops;
1461 /* Lower all call nodes in the IRG. */
1464 /* Process the IRG */
1467 /* reroute the stack origin of the calls to the true stack origin. */
1468 edges_reroute(dummy, env->init_sp, irg);
1469 edges_reroute(old_frame, get_irg_frame(irg), irg);
1471 /* Make some important node pointers survive the dead node elimination. */
1472 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1473 pmap_foreach(env->regs, ent)
1474 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1476 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1478 env->call->cb->done(env->cb);
1483 void be_abi_free(be_abi_irg_t *env)
1485 free_survive_dce(env->dce_survivor);
1486 del_pset(env->stack_phis);
1487 pmap_destroy(env->regs);
1488 obstack_free(&env->obst, NULL);
1489 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1497 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1498 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1499 | _| | |> < ___) | || (_| | (__| <
1500 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1505 * Walker. Collect all stack modifying nodes.
1507 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1511 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1512 pset_insert_ptr(s, irn);
1515 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1517 dom_front_info_t *df;
1520 /* We need dominance frontiers for fix up */
1521 df = be_compute_dominance_frontiers(env->birg->irg);
1522 stack_nodes = pset_new_ptr(16);
1523 pset_insert_ptr(stack_nodes, env->init_sp);
1524 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1525 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1526 del_pset(stack_nodes);
1528 /* Liveness could have changed due to Phi nodes. */
1529 be_liveness(env->birg->irg);
1531 /* free these dominance frontiers */
1532 be_free_dominance_frontiers(df);
1536 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1537 * into -1 or 1, respectively.
1538 * @param irn The node.
1539 * @return 1, if the direction of the IncSP was along, -1 if against.
1541 static int get_dir(ir_node *irn)
1543 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1546 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1548 const arch_env_t *aenv = env->birg->main_env->arch_env;
1549 int omit_fp = env->call->flags.bits.try_omit_fp;
1552 sched_foreach(bl, irn) {
1555 If the node modifies the stack pointer by a constant offset,
1556 record that in the bias.
1558 if(be_is_IncSP(irn)) {
1559 int ofs = be_get_IncSP_offset(irn);
1560 int dir = get_dir(irn);
1562 if(ofs == BE_STACK_FRAME_SIZE) {
1563 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1564 be_set_IncSP_offset(irn, ofs);
1572 Else check, if the node relates to an entity on the stack frame.
1573 If so, set the true offset (including the bias) for that
1577 entity *ent = arch_get_frame_entity(aenv, irn);
1579 int offset = get_stack_entity_offset(env->frame, ent, bias);
1580 arch_set_frame_offset(aenv, irn, offset);
1581 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1590 * A helper struct for the bias walker.
1593 be_abi_irg_t *env; /**< The ABI irg environment. */
1594 int start_block_bias; /**< The bias at the end of the start block. */
1598 * Block-Walker: fix all stack offsets
1600 static void stack_bias_walker(ir_node *bl, void *data)
1602 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1603 struct bias_walk *bw = data;
1604 process_stack_bias(bw->env, bl, bw->start_block_bias);
1608 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1610 ir_graph *irg = env->birg->irg;
1611 struct bias_walk bw;
1613 stack_frame_compute_initial_offset(env->frame);
1614 // stack_frame_dump(stdout, env->frame);
1616 /* Determine the stack bias at the end of the start block. */
1617 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1619 /* fix the bias is all other blocks */
1621 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1624 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1626 assert(arch_register_type_is(reg, callee_save));
1627 assert(pmap_contains(abi->regs, (void *) reg));
1628 return pmap_get(abi->regs, (void *) reg);
1632 _____ _____ _ _ _ _ _ _
1633 |_ _| __ \| \ | | | | | | | | |
1634 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1635 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1636 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1637 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1639 for Phi nodes which are created due to stack modifying nodes
1640 such as IncSP, AddSP and SetSP.
1642 These Phis are always to be ignored by the reg alloc and are
1643 fixed on the SP register of the ISA.
1646 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1648 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1649 const void *res = NULL;
1651 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1652 res = &abi->irn_ops;
1657 static void be_abi_limited(void *data, bitset_t *bs)
1659 be_abi_irg_t *abi = data;
1660 bitset_clear_all(bs);
1661 bitset_set(bs, abi->isa->sp->index);
1664 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1666 be_abi_irg_t *abi = get_abi_from_ops(self);
1667 const arch_register_t *reg = abi->isa->sp;
1669 memset(req, 0, sizeof(req[0]));
1671 if(pos == BE_OUT_POS(0)) {
1672 req->cls = reg->reg_class;
1673 req->type = arch_register_req_type_limited;
1674 req->limited = be_abi_limited;
1675 req->limited_env = abi;
1678 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1679 req->cls = reg->reg_class;
1680 req->type = arch_register_req_type_normal;
1686 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1690 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1692 const be_abi_irg_t *abi = get_abi_from_ops(self);
1693 return abi->isa->sp;
1696 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1698 return arch_irn_class_normal;
1701 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1703 return arch_irn_flags_ignore;
1706 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1711 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1715 static const arch_irn_ops_if_t abi_irn_ops = {
1716 abi_get_irn_reg_req,
1721 abi_get_frame_entity,
1725 static const arch_irn_handler_t abi_irn_handler = {