4 * @author Sebastian Hack
19 #include "irgraph_t.h"
22 #include "iredges_t.h"
25 #include "irprintf_t.h"
33 #include "besched_t.h"
35 #define MAX(x, y) ((x) > (y) ? (x) : (y))
36 #define MIN(x, y) ((x) < (y) ? (x) : (y))
38 typedef struct _be_abi_call_arg_t {
41 unsigned on_stack : 1;
44 const arch_register_t *reg;
47 unsigned space_before;
51 struct _be_abi_call_t {
52 be_abi_call_flags_t flags;
53 const be_abi_callbacks_t *cb;
58 #define N_FRAME_TYPES 3
60 typedef struct _be_stack_frame_t {
65 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
71 struct _be_stack_slot_t {
72 struct _be_stack_frame_t *frame;
76 struct _be_abi_irg_t {
78 be_stack_frame_t *frame; /**< The stack frame model. */
79 const be_irg_t *birg; /**< The back end IRG. */
80 const arch_isa_t *isa; /**< The isa. */
81 survive_dce_t *dce_survivor;
83 be_abi_call_t *call; /**< The ABI call information. */
84 type *method_type; /**< The type of the method of the IRG. */
86 ir_node *init_sp; /**< The node representing the stack pointer
87 at the start of the function. */
89 ir_node *reg_params; /**< The reg params node. */
90 pmap *regs; /**< A map of all callee-save and ignore regs to
91 their Projs to the RegParams node. */
93 pset *stack_phis; /**< The set of all Phi nodes inserted due to
94 stack pointer modifying nodes. */
96 int start_block_bias; /**< The stack bias at the end of the start block. */
98 void *cb; /**< ABI Callback self pointer. */
100 pmap *keep_map; /**< mapping blocks to keep nodes. */
101 pset *ignore_regs; /**< Additional registers which shall be ignored. */
103 arch_irn_handler_t irn_handler;
104 arch_irn_ops_t irn_ops;
105 DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
108 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
109 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
111 /* Forward, since be need it in be_abi_introduce(). */
112 static const arch_irn_ops_if_t abi_irn_ops;
113 static const arch_irn_handler_t abi_irn_handler;
115 /* Flag: if set, try to omit the frame pointer if called by the backend */
119 _ ____ ___ ____ _ _ _ _
120 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
121 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
122 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
123 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
125 These callbacks are used by the backend to set the parameters
126 for a specific call type.
130 * Set compare function: compares two ABI call object arguments.
132 static int cmp_call_arg(const void *a, const void *b, size_t n)
134 const be_abi_call_arg_t *p = a, *q = b;
135 return !(p->is_res == q->is_res && p->pos == q->pos);
139 * Get or set an ABI call object argument.
141 * @param call the abi call
142 * @param is_res true for call results, false for call arguments
143 * @param pos position of the argument
144 * @param do_insert true if the argument is set, false if it's retrieved
146 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
148 be_abi_call_arg_t arg;
151 memset(&arg, 0, sizeof(arg));
155 hash = is_res * 128 + pos;
158 ? set_insert(call->params, &arg, sizeof(arg), hash)
159 : set_find(call->params, &arg, sizeof(arg), hash);
163 * Retrieve an ABI call object argument.
165 * @param call the ABI call object
166 * @param is_res true for call results, false for call arguments
167 * @param pos position of the argument
169 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
171 return get_or_set_call_arg(call, is_res, pos, 0);
174 /* Set the flags for a call. */
175 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
181 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
183 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
185 arg->alignment = alignment;
186 arg->space_before = space_before;
187 arg->space_after = space_after;
188 assert(alignment > 0 && "Alignment must be greater than 0");
191 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
193 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
198 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
200 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
205 /* Get the flags of a ABI call object. */
206 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
212 * Constructor for a new ABI call object.
214 * @return the new ABI call object
216 static be_abi_call_t *be_abi_call_new()
218 be_abi_call_t *call = xmalloc(sizeof(call[0]));
220 call->params = new_set(cmp_call_arg, 16);
223 call->flags.bits.try_omit_fp = be_omit_fp;
228 * Destructor for an ABI call object.
230 static void be_abi_call_free(be_abi_call_t *call)
232 del_set(call->params);
238 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
239 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
240 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
241 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
244 Handling of the stack frame. It is composed of three types:
245 1) The type of the arguments which are pushed on the stack.
246 2) The "between type" which consists of stuff the call of the
247 function pushes on the stack (like the return address and
248 the old base pointer for ia32).
249 3) The Firm frame type which consists of all local variables
253 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
255 type *t = get_entity_owner(ent);
256 int ofs = get_entity_offset_bytes(ent);
260 /* Find the type the entity is contained in. */
261 for(index = 0; index < N_FRAME_TYPES; ++index) {
262 if(frame->order[index] == t)
266 /* Add the size of all the types below the one of the entity to the entity's offset */
267 for(i = 0; i < index; ++i)
268 ofs += get_type_size_bytes(frame->order[i]);
270 /* correct the offset by the initial position of the frame pointer */
271 ofs -= frame->initial_offset;
273 /* correct the offset with the current bias. */
280 * Retrieve the entity with given offset from a frame type.
282 static entity *search_ent_with_offset(type *t, int offset)
286 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
287 entity *ent = get_class_member(t, i);
288 if(get_entity_offset_bytes(ent) == offset)
295 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
297 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
298 entity *ent = search_ent_with_offset(base, 0);
299 frame->initial_offset = 0;
300 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
301 return frame->initial_offset;
304 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
306 frame->arg_type = args;
307 frame->between_type = between;
308 frame->frame_type = locals;
309 frame->initial_offset = 0;
310 frame->stack_dir = stack_dir;
311 frame->order[1] = between;
314 frame->order[0] = args;
315 frame->order[2] = locals;
319 frame->order[0] = locals;
320 frame->order[2] = args;
326 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
330 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
331 for(j = 0; j < N_FRAME_TYPES; ++j) {
332 type *t = frame->order[j];
334 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
335 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
336 entity *ent = get_class_member(t, i);
337 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
343 * If irn is a Sel node computes the address of an entity
344 * on the frame type return the entity, else NULL.
346 static INLINE entity *get_sel_ent(ir_node *irn)
348 if(is_Sel(irn) && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
349 return get_Sel_entity(irn);
356 * Walker: Replaces Loads, Stores and Sels of frame type entities
357 * by FrameLoad, FrameStore and FrameAddress.
359 static void lower_frame_sels_walker(ir_node *irn, void *data)
362 entity *ent = get_sel_ent(irn);
365 be_abi_irg_t *env = data;
366 ir_node *bl = get_nodes_block(irn);
367 ir_graph *irg = get_irn_irg(bl);
368 ir_node *frame = get_irg_frame(irg);
370 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
376 * Returns non-zero if the call argument at given position
377 * is transfered on the stack.
379 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
381 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
382 return arg && !arg->in_reg;
392 Adjustment of the calls inside a graph.
397 * Transform a call node.
398 * @param env The ABI environment for the current irg.
399 * @param irn The call node.
400 * @param curr_sp The stack pointer node to use.
401 * @return The stack pointer after the call.
403 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
405 ir_graph *irg = env->birg->irg;
406 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
407 be_abi_call_t *call = be_abi_call_new();
408 ir_type *mt = get_Call_type(irn);
409 ir_node *call_ptr = get_Call_ptr(irn);
410 int n_params = get_method_n_params(mt);
411 ir_node *curr_mem = get_Call_mem(irn);
412 ir_node *bl = get_nodes_block(irn);
413 pset *results = pset_new_ptr(8);
414 pset *caller_save = pset_new_ptr(8);
416 int stack_dir = arch_isa_stack_dir(isa);
417 const arch_register_t *sp = arch_isa_sp(isa);
418 ir_mode *mach_mode = sp->reg_class->mode;
419 struct obstack *obst = &env->obst;
420 ir_node *no_mem = get_irg_no_mem(irg);
421 int no_alloc = call->flags.bits.frame_is_setup_on_call;
423 ir_node *res_proj = NULL;
424 int curr_res_proj = pn_Call_max;
431 const ir_edge_t *edge;
436 /* Let the isa fill out the abi description for that call node. */
437 arch_isa_get_call_abi(isa, mt, call);
439 /* Insert code to put the stack arguments on the stack. */
440 assert(get_Call_n_params(irn) == n_params);
441 for(i = 0; i < n_params; ++i) {
442 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
445 stack_size += arg->space_before;
446 stack_size = round_up2(stack_size, arg->alignment);
447 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
448 stack_size += arg->space_after;
449 obstack_int_grow(obst, i);
453 pos = obstack_finish(obst);
455 /* Collect all arguments which are passed in registers. */
456 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
457 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
458 if(arg && arg->in_reg) {
459 obstack_int_grow(obst, i);
463 low_args = obstack_finish(obst);
465 /* If there are some parameters which shall be passed on the stack. */
468 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
471 * Reverse list of stack parameters if call arguments are from left to right.
472 * We must them reverse again in they are pushed (not stored) and the stack
473 * direction is downwards.
475 if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
476 for(i = 0; i < n_pos >> 1; ++i) {
477 int other = n_pos - i - 1;
485 * If the stack is decreasing and we do not want to store sequentially,
486 * or someone else allocated the call frame
487 * we allocate as much space on the stack all parameters need, by
488 * moving the stack pointer along the stack's direction.
490 if(stack_dir < 0 && !do_seq && !no_alloc) {
491 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
494 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
495 for(i = 0; i < n_pos; ++i) {
497 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
498 ir_node *param = get_Call_param(irn, p);
499 ir_node *addr = curr_sp;
501 type *param_type = get_method_param_type(mt, p);
502 int param_size = get_type_size_bytes(param_type) + arg->space_after;
505 * If we wanted to build the arguments sequentially,
506 * the stack pointer for the next must be incremented,
507 * and the memory value propagated.
511 addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
512 param_size + arg->space_before, be_stack_dir_expand);
515 curr_ofs += arg->space_before;
516 curr_ofs = round_up2(curr_ofs, arg->alignment);
518 /* Make the expression to compute the argument's offset. */
520 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
521 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
525 /* Insert a store for primitive arguments. */
526 if (is_atomic_type(param_type)) {
527 mem = new_r_Store(irg, bl, curr_mem, addr, param);
528 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
531 /* Make a mem copy for compound arguments. */
533 assert(mode_is_reference(get_irn_mode(param)));
534 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
535 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
538 curr_ofs += param_size;
543 obstack_ptr_grow(obst, mem);
546 in = (ir_node **) obstack_finish(obst);
548 /* We need the sync only, if we didn't build the stores sequentially. */
550 curr_mem = new_r_Sync(irg, bl, n_pos, in);
551 obstack_free(obst, in);
554 /* Collect caller save registers */
555 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
557 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
558 for(j = 0; j < cls->n_regs; ++j) {
559 const arch_register_t *reg = arch_register_for_index(cls, j);
560 if(arch_register_type_is(reg, caller_save))
561 pset_insert_ptr(caller_save, (void *) reg);
565 /* search the greatest result proj number */
567 /* TODO: what if the result is NOT used? Currently there is
568 * no way to detect this later, especially there is no way to
569 * see this in the proj numbers.
570 * While this is ok for the register allocator, it is bad for
571 * backends which need to change the be_Call further (x87 simulator
572 * for instance. However for this particular case the call_type is
575 foreach_out_edge(irn, edge) {
576 const ir_edge_t *res_edge;
577 ir_node *irn = get_edge_src_irn(edge);
579 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
581 foreach_out_edge(irn, res_edge) {
583 be_abi_call_arg_t *arg;
584 ir_node *res = get_edge_src_irn(res_edge);
586 assert(is_Proj(res));
588 proj = get_Proj_proj(res);
589 arg = get_call_arg(call, 1, proj);
592 shift the proj number to the right, since we will drop the
593 unspeakable Proj_T from the Call. Therefore, all real argument
594 Proj numbers must be increased by pn_be_Call_first_res
596 proj += pn_be_Call_first_res;
597 set_Proj_proj(res, proj);
598 obstack_ptr_grow(obst, res);
600 if(proj > curr_res_proj)
601 curr_res_proj = proj;
603 pset_remove_ptr(caller_save, arg->reg);
604 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
611 obstack_ptr_grow(obst, NULL);
612 res_projs = obstack_finish(obst);
614 /* make the back end call node and set its register requirements. */
615 for(i = 0; i < n_low_args; ++i)
616 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
618 in = obstack_finish(obst);
620 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
621 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, curr_sp,
622 curr_res_proj + pset_count(caller_save), n_low_args, in,
624 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
628 low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem, curr_sp, call_ptr,
629 curr_res_proj + pset_count(caller_save), n_low_args, in,
634 Set the register class of the call address to the same as the stack pointer's.
635 That' probably buggy for some architectures.
637 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
639 /* Set the register classes and constraints of the Call parameters. */
640 for(i = 0; i < n_low_args; ++i) {
641 int index = low_args[i];
642 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
643 assert(arg->reg != NULL);
645 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
648 /* Set the register constraints of the results. */
649 for(i = 0; res_projs[i]; ++i) {
650 ir_node *irn = res_projs[i];
651 int proj = get_Proj_proj(irn);
653 /* Correct Proj number since it has been adjusted! (see above) */
654 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
657 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
659 obstack_free(obst, in);
660 exchange(irn, low_call);
662 /* redirect the result projs to the lowered call instead of the Proj_T */
663 for(i = 0; res_projs[i]; ++i)
664 set_Proj_pred(res_projs[i], low_call);
666 /* Make additional projs for the caller save registers
667 and the Keep node which keeps them alive. */
668 if(pset_count(caller_save) > 0) {
669 const arch_register_t *reg;
673 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
674 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
676 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
677 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
678 set_irn_link(proj, (void *) reg);
679 obstack_ptr_grow(obst, proj);
683 in = (ir_node **) obstack_finish(obst);
684 keep = be_new_Keep(NULL, irg, bl, n, in);
685 for(i = 0; i < n; ++i) {
686 const arch_register_t *reg = get_irn_link(in[i]);
687 be_node_set_reg_class(keep, i, reg->reg_class);
689 obstack_free(obst, in);
692 /* Clean up the stack. */
694 ir_node *mem_proj = NULL;
696 foreach_out_edge(low_call, edge) {
697 ir_node *irn = get_edge_src_irn(edge);
698 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
705 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
707 /* Clean up the stack frame if we allocated it */
709 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
712 be_abi_call_free(call);
713 obstack_free(obst, pos);
715 del_pset(caller_save);
722 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
724 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
726 if (get_Alloc_where(alloc) == stack_alloc) {
727 ir_node *bl = get_nodes_block(alloc);
728 ir_graph *irg = get_irn_irg(bl);
729 ir_node *alloc_mem = NULL;
730 ir_node *alloc_res = NULL;
732 const ir_edge_t *edge;
735 foreach_out_edge(alloc, edge) {
736 ir_node *irn = get_edge_src_irn(edge);
738 assert(is_Proj(irn));
739 switch(get_Proj_proj(irn)) {
751 /* Beware: currently Alloc nodes without a result might happen,
752 only escape analysis kills them and this phase runs only for object
753 oriented source. We kill the Alloc here. */
754 if (alloc_res == NULL) {
755 exchange(alloc_mem, get_Alloc_mem(alloc));
759 /* The stack pointer will be modified in an unknown manner.
760 We cannot omit it. */
761 env->call->flags.bits.try_omit_fp = 0;
762 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
764 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
766 if(alloc_mem != NULL)
767 exchange(alloc_mem, new_r_NoMem(irg));
776 * Walker for dependent_on().
777 * This function searches a node tgt recursively from a given node
778 * but is restricted to the given block.
779 * @return 1 if tgt was reachable from curr, 0 if not.
781 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
785 if(get_irn_visited(curr) >= visited_nr)
788 set_irn_visited(curr, visited_nr);
789 if(get_nodes_block(curr) != bl)
795 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
796 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
804 * Check if a node is somehow data dependent on another one.
805 * both nodes must be in the same basic block.
806 * @param n1 The first node.
807 * @param n2 The second node.
808 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
810 static int dependent_on(ir_node *n1, ir_node *n2)
812 ir_node *bl = get_nodes_block(n1);
813 ir_graph *irg = get_irn_irg(bl);
814 long vis_nr = get_irg_visited(irg) + 1;
816 assert(bl == get_nodes_block(n2));
817 set_irg_visited(irg, vis_nr);
818 return check_dependence(n1, n2, bl, vis_nr);
821 static int cmp_call_dependecy(const void *c1, const void *c2)
823 ir_node *n1 = *(ir_node **) c1;
824 ir_node *n2 = *(ir_node **) c2;
827 Classical qsort() comparison function behavior:
828 0 if both elements are equal
829 1 if second is "smaller" that first
830 -1 if first is "smaller" that second
832 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
836 * Walker: links all Call nodes to the Block they are contained.
838 static void link_calls_in_block_walker(ir_node *irn, void *data)
841 be_abi_irg_t *env = data;
842 ir_node *bl = get_nodes_block(irn);
843 void *save = get_irn_link(bl);
845 env->call->flags.bits.irg_is_leaf = 0;
847 set_irn_link(irn, save);
848 set_irn_link(bl, irn);
854 * Process all Call nodes inside a basic block.
855 * Note that the link field of the block must contain a linked list of all
856 * Call nodes inside the Block. We first order this list according to data dependency
857 * and that connect the calls together.
859 static void process_calls_in_block(ir_node *bl, void *data)
861 be_abi_irg_t *env = data;
862 ir_node *curr_sp = env->init_sp;
866 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
867 obstack_ptr_grow(&env->obst, irn);
869 /* If there were call nodes in the block. */
875 nodes = obstack_finish(&env->obst);
877 /* order the call nodes according to data dependency */
878 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
880 for(i = n - 1; i >= 0; --i) {
881 ir_node *irn = nodes[i];
883 switch(get_irn_opcode(irn)) {
885 curr_sp = adjust_call(env, irn, curr_sp);
888 curr_sp = adjust_alloc(env, irn, curr_sp);
895 obstack_free(&env->obst, nodes);
897 /* Keep the last stack state in the block by tying it to Keep node */
899 keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
900 pmap_insert(env->keep_map, bl, keep);
903 set_irn_link(bl, curr_sp);
907 * Adjust all call nodes in the graph to the ABI conventions.
909 static void process_calls(be_abi_irg_t *env)
911 ir_graph *irg = env->birg->irg;
913 env->call->flags.bits.irg_is_leaf = 1;
914 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
915 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
918 static void collect_return_walker(ir_node *irn, void *data)
920 if(get_irn_opcode(irn) == iro_Return) {
921 struct obstack *obst = data;
922 obstack_ptr_grow(obst, irn);
927 static ir_node *setup_frame(be_abi_irg_t *env)
929 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
930 const arch_register_t *sp = isa->sp;
931 const arch_register_t *bp = isa->bp;
932 be_abi_call_flags_bits_t flags = env->call->flags.bits;
933 ir_graph *irg = env->birg->irg;
934 ir_node *bl = get_irg_start_block(irg);
935 ir_node *no_mem = get_irg_no_mem(irg);
936 ir_node *old_frame = get_irg_frame(irg);
937 ir_node *stack = pmap_get(env->regs, (void *) sp);
938 ir_node *frame = pmap_get(env->regs, (void *) bp);
940 int stack_nr = get_Proj_proj(stack);
942 if(flags.try_omit_fp) {
943 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
948 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
950 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
952 be_set_constr_single_reg(frame, -1, bp);
953 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
954 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
957 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
960 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
961 env->init_sp = stack;
962 set_irg_frame(irg, frame);
963 edges_reroute(old_frame, frame, irg);
968 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
970 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
971 const arch_register_t *sp = isa->sp;
972 const arch_register_t *bp = isa->bp;
973 ir_graph *irg = env->birg->irg;
974 ir_node *ret_mem = get_Return_mem(ret);
975 ir_node *frame = get_irg_frame(irg);
976 ir_node *bl = get_nodes_block(ret);
977 ir_node *stack = get_irn_link(bl);
981 if(env->call->flags.bits.try_omit_fp) {
982 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
986 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
987 be_set_constr_single_reg(stack, -1, sp);
988 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
991 pmap_foreach(env->regs, ent) {
992 const arch_register_t *reg = ent->key;
993 ir_node *irn = ent->value;
996 obstack_ptr_grow(&env->obst, stack);
998 obstack_ptr_grow(&env->obst, frame);
999 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1000 obstack_ptr_grow(obst, irn);
1006 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
1008 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
1009 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
1010 int n = get_method_n_params(method_type);
1011 int curr = inc > 0 ? 0 : n - 1;
1018 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
1019 res = new_type_class(new_id_from_str(buf));
1021 for(i = 0; i < n; ++i, curr += inc) {
1022 type *param_type = get_method_param_type(method_type, curr);
1023 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
1026 snprintf(buf, sizeof(buf), "param_%d", i);
1027 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
1028 ofs += arg->space_before;
1029 ofs = round_up2(ofs, arg->alignment);
1030 set_entity_offset_bytes(arg->stack_ent, ofs);
1031 ofs += arg->space_after;
1032 ofs += get_type_size_bytes(param_type);
1036 set_type_size_bytes(res, ofs);
1040 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
1043 struct obstack obst;
1045 obstack_init(&obst);
1047 /* Create a Perm after the RegParams node to delimit it. */
1048 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1049 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1054 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
1055 const arch_register_t *reg = &cls->regs[j];
1056 ir_node *irn = pmap_get(regs, (void *) reg);
1058 if(irn && !arch_register_type_is(reg, ignore)) {
1060 obstack_ptr_grow(&obst, irn);
1061 set_irn_link(irn, (void *) reg);
1065 obstack_ptr_grow(&obst, NULL);
1066 in = obstack_finish(&obst);
1068 perm = be_new_Perm(cls, irg, bl, n_regs, in);
1069 for(j = 0; j < n_regs; ++j) {
1070 ir_node *arg = in[j];
1071 arch_register_t *reg = get_irn_link(arg);
1072 pmap_insert(regs, reg, arg);
1073 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1076 obstack_free(&obst, in);
1079 obstack_free(&obst, NULL);
1083 const arch_register_t *reg;
1087 static int cmp_regs(const void *a, const void *b)
1089 const reg_node_map_t *p = a;
1090 const reg_node_map_t *q = b;
1092 if(p->reg->reg_class == q->reg->reg_class)
1093 return p->reg->index - q->reg->index;
1095 return p->reg->reg_class - q->reg->reg_class;
1098 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1101 int n = pmap_count(reg_map);
1103 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1105 pmap_foreach(reg_map, ent) {
1106 res[i].reg = ent->key;
1107 res[i].irn = ent->value;
1111 qsort(res, n, sizeof(res[0]), cmp_regs);
1116 * Creates a barrier.
1118 static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1120 ir_graph *irg = env->birg->irg;
1121 int n_regs = pmap_count(regs);
1127 rm = reg_map_to_arr(&env->obst, regs);
1129 for(n = 0; n < n_regs; ++n)
1130 obstack_ptr_grow(&env->obst, rm[n].irn);
1133 obstack_ptr_grow(&env->obst, *mem);
1137 in = (ir_node **) obstack_finish(&env->obst);
1138 irn = be_new_Barrier(irg, bl, n, in);
1139 obstack_free(&env->obst, in);
1141 for(n = 0; n < n_regs; ++n) {
1142 const arch_register_t *reg = rm[n].reg;
1144 int pos = BE_OUT_POS(n);
1147 proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
1148 be_node_set_reg_class(irn, n, reg->reg_class);
1150 be_set_constr_single_reg(irn, n, reg);
1151 be_set_constr_single_reg(irn, pos, reg);
1152 be_node_set_reg_class(irn, pos, reg->reg_class);
1153 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1155 /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
1156 if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
1157 flags |= arch_irn_flags_ignore;
1159 if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
1160 flags |= arch_irn_flags_modify_sp;
1162 be_node_set_flags(irn, pos, flags);
1164 pmap_insert(regs, (void *) reg, proj);
1168 *mem = new_r_Proj(irg, bl, irn, mode_M, n);
1171 obstack_free(&env->obst, rm);
1176 * Creates a be_Return for a Return node.
1178 * @param @env the abi environment
1179 * @param irn the Return node or NULL if there was none
1180 * @param bl the block where the be_Retun should be placed
1181 * @param mem the current memory
1182 * @param n_res number of return results
1184 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
1185 be_abi_call_t *call = env->call;
1186 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1188 pmap *reg_map = pmap_create();
1189 ir_node *keep = pmap_get(env->keep_map, bl);
1195 const arch_register_t **regs;
1199 get the valid stack node in this block.
1200 If we had a call in that block there is a Keep constructed by process_calls()
1201 which points to the last stack modification in that block. we'll use
1202 it then. Else we use the stack from the start block and let
1203 the ssa construction fix the usage.
1205 stack = be_abi_reg_map_get(env->regs, isa->sp);
1207 ir_node *bad = new_r_Bad(env->birg->irg);
1208 stack = get_irn_n(keep, 0);
1209 set_nodes_block(keep, bad);
1210 set_irn_n(keep, 0, bad);
1211 // exchange(keep, new_r_Bad(env->birg->irg));
1214 /* Insert results for Return into the register map. */
1215 for(i = 0; i < n_res; ++i) {
1216 ir_node *res = get_Return_res(irn, i);
1217 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1218 assert(arg->in_reg && "return value must be passed in register");
1219 pmap_insert(reg_map, (void *) arg->reg, res);
1222 /* Add uses of the callee save registers. */
1223 pmap_foreach(env->regs, ent) {
1224 const arch_register_t *reg = ent->key;
1225 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1226 pmap_insert(reg_map, ent->key, ent->value);
1229 be_abi_reg_map_set(reg_map, isa->sp, stack);
1231 /* Make the Epilogue node and call the arch's epilogue maker. */
1232 create_barrier(env, bl, &mem, reg_map, 1);
1233 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1236 Maximum size of the in array for Return nodes is
1237 return args + callee save/ignore registers + memory + stack pointer
1239 in_max = pmap_count(reg_map) + n_res + 2;
1241 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1242 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1245 in[1] = be_abi_reg_map_get(reg_map, isa->sp);
1250 /* clear SP entry, since it has already been grown. */
1251 pmap_insert(reg_map, (void *) isa->sp, NULL);
1252 for(i = 0; i < n_res; ++i) {
1253 ir_node *res = get_Return_res(irn, i);
1254 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1256 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1257 regs[n++] = arg->reg;
1259 /* Clear the map entry to mark the register as processed. */
1260 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1263 /* grow the rest of the stuff. */
1264 pmap_foreach(reg_map, ent) {
1267 regs[n++] = ent->key;
1271 /* The in array for the new back end return is now ready. */
1272 ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
1274 /* Set the register classes of the return's parameter accordingly. */
1275 for(i = 0; i < n; ++i)
1277 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1279 /* Free the space of the Epilog's in array and the register <-> proj map. */
1280 obstack_free(&env->obst, in);
1281 pmap_destroy(reg_map);
1287 * Modify the irg itself and the frame type.
1289 static void modify_irg(be_abi_irg_t *env)
1291 be_abi_call_t *call = env->call;
1292 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1293 const arch_register_t *sp = arch_isa_sp(isa);
1294 ir_graph *irg = env->birg->irg;
1295 ir_node *bl = get_irg_start_block(irg);
1296 ir_node *end = get_irg_end_block(irg);
1297 ir_node *arg_tuple = get_irg_args(irg);
1298 ir_node *no_mem = get_irg_no_mem(irg);
1299 ir_node *mem = get_irg_initial_mem(irg);
1300 type *method_type = get_entity_type(get_irg_entity(irg));
1301 pset *dont_save = pset_new_ptr(8);
1302 int n_params = get_method_n_params(method_type);
1308 const arch_register_t *fp_reg;
1309 ir_node *frame_pointer;
1311 ir_node *reg_params_bl;
1313 const ir_edge_t *edge;
1314 ir_type *arg_type, *bet_type;
1316 bitset_t *used_proj_nr;
1317 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
1319 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1321 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1322 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1324 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1325 env->regs = pmap_create();
1327 /* Find the maximum proj number of the argument tuple proj */
1328 foreach_out_edge(arg_tuple, edge) {
1329 ir_node *irn = get_edge_src_irn(edge);
1330 int nr = get_Proj_proj(irn);
1331 max_arg = MAX(max_arg, nr);
1334 used_proj_nr = bitset_alloca(1024);
1335 max_arg = MAX(max_arg + 1, n_params);
1336 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1337 memset(args, 0, max_arg * sizeof(args[0]));
1339 /* Fill the argument vector */
1340 foreach_out_edge(arg_tuple, edge) {
1341 ir_node *irn = get_edge_src_irn(edge);
1342 int nr = get_Proj_proj(irn);
1344 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1347 arg_type = compute_arg_type(env, call, method_type);
1348 bet_type = call->cb->get_between_type(env->cb);
1349 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1351 /* Count the register params and add them to the number of Projs for the RegParams node */
1352 for(i = 0; i < n_params; ++i) {
1353 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1354 if(arg->in_reg && args[i]) {
1355 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1356 assert(i == get_Proj_proj(args[i]));
1358 /* For now, associate the register with the old Proj from Start representing that argument. */
1359 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1360 bitset_set(used_proj_nr, i);
1361 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1365 /* Collect all callee-save registers */
1366 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1367 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1368 for(j = 0; j < cls->n_regs; ++j) {
1369 const arch_register_t *reg = &cls->regs[j];
1370 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1371 pmap_insert(env->regs, (void *) reg, NULL);
1375 pmap_insert(env->regs, (void *) sp, NULL);
1376 pmap_insert(env->regs, (void *) isa->bp, NULL);
1377 reg_params_bl = get_irg_start_block(irg);
1378 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1381 * make proj nodes for the callee save registers.
1382 * memorize them, since Return nodes get those as inputs.
1384 * Note, that if a register corresponds to an argument, the regs map contains
1385 * the old Proj from start for that argument.
1388 rm = reg_map_to_arr(&env->obst, env->regs);
1389 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1390 arch_register_t *reg = (void *) rm[i].reg;
1391 ir_node *arg_proj = rm[i].irn;
1392 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1394 int pos = BE_OUT_POS((int) nr);
1400 bitset_set(used_proj_nr, nr);
1401 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1402 pmap_insert(env->regs, (void *) reg, proj);
1403 be_set_constr_single_reg(env->reg_params, pos, reg);
1404 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1407 * If the register is an ignore register,
1408 * The Proj for that register shall also be ignored during register allocation.
1410 if(arch_register_type_is(reg, ignore))
1411 flags |= arch_irn_flags_ignore;
1414 flags |= arch_irn_flags_modify_sp;
1416 be_node_set_flags(env->reg_params, pos, flags);
1418 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1420 obstack_free(&env->obst, rm);
1422 /* Generate the Prologue */
1423 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1425 /* do the stack allocation BEFORE the barrier, or spill code
1426 might be added before it */
1427 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1428 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1429 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1431 barrier = create_barrier(env, bl, &mem, env->regs, 0);
1433 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1434 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1436 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1437 set_irg_frame(irg, frame_pointer);
1438 pset_insert_ptr(env->ignore_regs, fp_reg);
1440 /* Now, introduce stack param nodes for all parameters passed on the stack */
1441 for(i = 0; i < max_arg; ++i) {
1442 ir_node *arg_proj = args[i];
1443 ir_node *repl = NULL;
1445 if(arg_proj != NULL) {
1446 be_abi_call_arg_t *arg;
1447 ir_type *param_type;
1448 int nr = get_Proj_proj(arg_proj);
1450 nr = MIN(nr, n_params);
1451 arg = get_call_arg(call, 0, nr);
1452 param_type = get_method_param_type(method_type, nr);
1455 repl = pmap_get(env->regs, (void *) arg->reg);
1458 else if(arg->on_stack) {
1459 /* For atomic parameters which are actually used, we create a StackParam node. */
1460 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1461 ir_mode *mode = get_type_mode(param_type);
1462 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1463 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1466 /* The stack parameter is not primitive (it is a struct or array),
1467 we thus will create a node representing the parameter's address
1470 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1474 assert(repl != NULL);
1475 edges_reroute(args[i], repl, irg);
1479 /* All Return nodes hang on the End node, so look for them there. */
1480 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1481 ir_node *irn = get_Block_cfgpred(end, i);
1483 if (is_Return(irn)) {
1484 ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
1488 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return than,
1489 the code is dead and will never be executed. */
1491 del_pset(dont_save);
1492 obstack_free(&env->obst, args);
1496 * Walker: puts all Alloc(stack_alloc) on a obstack
1498 static void collect_alloca_walker(ir_node *irn, void *data)
1500 be_abi_irg_t *env = data;
1501 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1502 obstack_ptr_grow(&env->obst, irn);
1505 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1507 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1508 ir_node *old_frame = get_irg_frame(birg->irg);
1509 ir_graph *irg = birg->irg;
1513 optimization_state_t state;
1515 obstack_init(&env->obst);
1517 env->isa = birg->main_env->arch_env->isa;
1518 env->method_type = get_entity_type(get_irg_entity(irg));
1519 env->call = be_abi_call_new();
1520 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1522 env->ignore_regs = pset_new_ptr_default();
1523 env->keep_map = pmap_create();
1524 env->dce_survivor = new_survive_dce();
1526 env->stack_phis = pset_new_ptr(16);
1527 /* Beware: later we replace this node by the real one, ensure it is not CSE'd
1528 to another Unknown or the stack pointer gets used */
1529 save_optimization_state(&state);
1531 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1532 restore_optimization_state(&state);
1533 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1535 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1537 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1538 env->irn_ops.impl = &abi_irn_ops;
1540 /* Lower all call nodes in the IRG. */
1543 /* Process the IRG */
1546 /* We don't need the keep map anymore. */
1547 pmap_destroy(env->keep_map);
1549 /* reroute the stack origin of the calls to the true stack origin. */
1550 edges_reroute(dummy, env->init_sp, irg);
1551 edges_reroute(old_frame, get_irg_frame(irg), irg);
1553 /* Make some important node pointers survive the dead node elimination. */
1554 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1555 pmap_foreach(env->regs, ent)
1556 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1558 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1560 env->call->cb->done(env->cb);
1565 void be_abi_free(be_abi_irg_t *env)
1567 free_survive_dce(env->dce_survivor);
1568 del_pset(env->stack_phis);
1569 del_pset(env->ignore_regs);
1570 pmap_destroy(env->regs);
1571 obstack_free(&env->obst, NULL);
1572 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1576 void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
1578 arch_register_t *reg;
1580 for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
1581 if(reg->reg_class == cls)
1582 bitset_set(bs, reg->index);
1589 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1590 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1591 | _| | |> < ___) | || (_| | (__| <
1592 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1596 struct fix_stack_walker_info {
1598 const arch_env_t *aenv;
1602 * Walker. Collect all stack modifying nodes.
1604 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1606 struct fix_stack_walker_info *info = data;
1608 if(arch_irn_is(info->aenv, irn, modify_sp))
1609 pset_insert_ptr(info->nodes, irn);
1612 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1614 dom_front_info_t *df;
1615 pset *stack_nodes = pset_new_ptr(16);
1616 struct fix_stack_walker_info info;
1618 info.nodes = stack_nodes;
1619 info.aenv = env->birg->main_env->arch_env;
1621 /* We need dominance frontiers for fix up */
1622 df = be_compute_dominance_frontiers(env->birg->irg);
1623 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
1624 pset_insert_ptr(stack_nodes, env->init_sp);
1625 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1626 del_pset(stack_nodes);
1628 /* Liveness could have changed due to Phi nodes. */
1629 be_liveness(env->birg->irg);
1631 /* free these dominance frontiers */
1632 be_free_dominance_frontiers(df);
1636 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1637 * into -1 or 1, respectively.
1638 * @param irn The node.
1639 * @return 1, if the direction of the IncSP was along, -1 if against.
1641 static int get_dir(ir_node *irn)
1643 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1646 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1648 const arch_env_t *aenv = env->birg->main_env->arch_env;
1649 int omit_fp = env->call->flags.bits.try_omit_fp;
1652 sched_foreach(bl, irn) {
1655 If the node modifies the stack pointer by a constant offset,
1656 record that in the bias.
1658 if(be_is_IncSP(irn)) {
1659 int ofs = be_get_IncSP_offset(irn);
1660 int dir = get_dir(irn);
1662 if(ofs == BE_STACK_FRAME_SIZE) {
1663 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1664 be_set_IncSP_offset(irn, ofs);
1672 Else check, if the node relates to an entity on the stack frame.
1673 If so, set the true offset (including the bias) for that
1677 entity *ent = arch_get_frame_entity(aenv, irn);
1679 int offset = get_stack_entity_offset(env->frame, ent, bias);
1680 arch_set_frame_offset(aenv, irn, offset);
1681 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1690 * A helper struct for the bias walker.
1693 be_abi_irg_t *env; /**< The ABI irg environment. */
1694 int start_block_bias; /**< The bias at the end of the start block. */
1698 * Block-Walker: fix all stack offsets
1700 static void stack_bias_walker(ir_node *bl, void *data)
1702 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1703 struct bias_walk *bw = data;
1704 process_stack_bias(bw->env, bl, bw->start_block_bias);
1708 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1710 ir_graph *irg = env->birg->irg;
1711 struct bias_walk bw;
1713 stack_frame_compute_initial_offset(env->frame);
1714 // stack_frame_dump(stdout, env->frame);
1716 /* Determine the stack bias at the end of the start block. */
1717 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1719 /* fix the bias is all other blocks */
1721 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1724 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1726 assert(arch_register_type_is(reg, callee_save));
1727 assert(pmap_contains(abi->regs, (void *) reg));
1728 return pmap_get(abi->regs, (void *) reg);
1732 _____ _____ _ _ _ _ _ _
1733 |_ _| __ \| \ | | | | | | | | |
1734 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1735 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1736 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1737 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1739 for Phi nodes which are created due to stack modifying nodes
1740 such as IncSP, AddSP and SetSP.
1742 These Phis are always to be ignored by the reg alloc and are
1743 fixed on the SP register of the ISA.
1746 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1748 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1749 const void *res = NULL;
1751 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1752 res = &abi->irn_ops;
1757 static void be_abi_limited(void *data, bitset_t *bs)
1759 be_abi_irg_t *abi = data;
1760 bitset_clear_all(bs);
1761 bitset_set(bs, abi->isa->sp->index);
1764 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1766 be_abi_irg_t *abi = get_abi_from_ops(self);
1767 const arch_register_t *reg = abi->isa->sp;
1769 memset(req, 0, sizeof(req[0]));
1771 if(pos == BE_OUT_POS(0)) {
1772 req->cls = reg->reg_class;
1773 req->type = arch_register_req_type_limited;
1774 req->limited = be_abi_limited;
1775 req->limited_env = abi;
1778 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1779 req->cls = reg->reg_class;
1780 req->type = arch_register_req_type_normal;
1786 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1790 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1792 const be_abi_irg_t *abi = get_abi_from_ops(self);
1793 return abi->isa->sp;
1796 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1798 return arch_irn_class_normal;
1801 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1803 return arch_irn_flags_ignore | arch_irn_flags_modify_sp;
1806 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1811 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1815 static const arch_irn_ops_if_t abi_irn_ops = {
1816 abi_get_irn_reg_req,
1821 abi_get_frame_entity,
1825 static const arch_irn_handler_t abi_irn_handler = {