4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
32 #include "besched_t.h"
34 #define MAX(x, y) ((x) > (y) ? (x) : (y))
35 #define MIN(x, y) ((x) < (y) ? (x) : (y))
37 typedef struct _be_abi_call_arg_t {
40 unsigned on_stack : 1;
43 const arch_register_t *reg;
46 unsigned space_before;
50 struct _be_abi_call_t {
51 be_abi_call_flags_t flags;
52 const be_abi_callbacks_t *cb;
57 #define N_FRAME_TYPES 3
59 typedef struct _be_stack_frame_t {
64 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
70 struct _be_stack_slot_t {
71 struct _be_stack_frame_t *frame;
75 struct _be_abi_irg_t {
77 firm_dbg_module_t *dbg; /**< The debugging module. */
78 be_stack_frame_t *frame; /**< The stack frame model. */
79 const be_irg_t *birg; /**< The back end IRG. */
80 const arch_isa_t *isa; /**< The isa. */
81 survive_dce_t *dce_survivor;
83 be_abi_call_t *call; /**< The ABI call information. */
84 type *method_type; /**< The type of the method of the IRG. */
86 ir_node *init_sp; /**< The node representing the stack pointer
87 at the start of the function. */
89 ir_node *reg_params; /**< The reg params node. */
90 pmap *regs; /**< A map of all callee-save and ignore regs to
91 their Projs to the RegParams node. */
93 pset *stack_phis; /**< The set of all Phi nodes inserted due to
94 stack pointer modifying nodes. */
96 int start_block_bias; /**< The stack bias at the end of the start block. */
98 void *cb; /**< ABI Callback self pointer. */
100 arch_irn_handler_t irn_handler;
101 arch_irn_ops_t irn_ops;
104 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
105 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
107 /* Forward, since be need it in be_abi_introduce(). */
108 static const arch_irn_ops_if_t abi_irn_ops;
109 static const arch_irn_handler_t abi_irn_handler;
112 _ ____ ___ ____ _ _ _ _
113 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
114 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
115 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
116 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
118 These callbacks are used by the backend to set the parameters
119 for a specific call type.
122 static int cmp_call_arg(const void *a, const void *b, size_t n)
124 const be_abi_call_arg_t *p = a, *q = b;
125 return !(p->is_res == q->is_res && p->pos == q->pos);
128 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
130 be_abi_call_arg_t arg;
133 memset(&arg, 0, sizeof(arg));
137 hash = is_res * 100 + pos;
140 ? set_insert(call->params, &arg, sizeof(arg), hash)
141 : set_find(call->params, &arg, sizeof(arg), hash);
144 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
146 return get_or_set_call_arg(call, is_res, pos, 0);
149 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
155 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
157 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
159 arg->alignment = alignment;
160 arg->space_before = space_before;
161 arg->space_after = space_after;
162 assert(alignment > 0 && "Alignment must be greater than 0");
165 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
167 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
172 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
174 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
179 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
184 be_abi_call_t *be_abi_call_new(void)
186 be_abi_call_t *call = xmalloc(sizeof(call[0]));
188 call->params = new_set(cmp_call_arg, 16);
193 void be_abi_call_free(be_abi_call_t *call)
195 del_set(call->params);
201 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
202 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
203 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
204 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
207 Handling of the stack frame. It is composed of three types:
208 1) The type of the arguments which are pushed on the stack.
209 2) The "between type" which consists of stuff the call of the
210 function pushes on the stack (like the return address and
211 the old base pointer for ia32).
212 3) The Firm frame type which consists of all local variables
216 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
218 type *t = get_entity_owner(ent);
219 int ofs = get_entity_offset_bytes(ent);
223 /* Find the type the entity is contained in. */
224 for(index = 0; index < N_FRAME_TYPES; ++index) {
225 if(frame->order[index] == t)
229 /* Add the size of all the types below the one of the entity to the entity's offset */
230 for(i = 0; i < index; ++i)
231 ofs += get_type_size_bytes(frame->order[i]);
233 /* correct the offset by the initial position of the frame pointer */
234 ofs -= frame->initial_offset;
236 /* correct the offset with the current bias. */
242 static entity *search_ent_with_offset(type *t, int offset)
246 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
247 entity *ent = get_class_member(t, i);
248 if(get_entity_offset_bytes(ent) == offset)
255 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
257 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
258 entity *ent = search_ent_with_offset(base, 0);
259 frame->initial_offset = 0;
260 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
261 return frame->initial_offset;
264 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
266 frame->arg_type = args;
267 frame->between_type = between;
268 frame->frame_type = locals;
269 frame->initial_offset = 0;
270 frame->stack_dir = stack_dir;
271 frame->order[1] = between;
274 frame->order[0] = args;
275 frame->order[2] = locals;
279 frame->order[0] = locals;
280 frame->order[2] = args;
286 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
290 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
291 for(j = 0; j < N_FRAME_TYPES; ++j) {
292 type *t = frame->order[j];
294 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
295 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
296 entity *ent = get_class_member(t, i);
297 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
303 * If irn is a Sel node computing the address of an entity
304 * on the frame type return the entity, else NULL.
306 static INLINE entity *get_sel_ent(ir_node *irn)
308 if(get_irn_opcode(irn) == iro_Sel
309 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
311 return get_Sel_entity(irn);
318 * Walker: Replaces Loads, Stores and Sels of frame type entities
319 * by FrameLoad, FrameStore and FrameAdress.
321 static void lower_frame_sels_walker(ir_node *irn, void *data)
324 entity *ent = get_sel_ent(irn);
327 be_abi_irg_t *env = data;
328 ir_node *bl = get_nodes_block(irn);
329 ir_graph *irg = get_irn_irg(bl);
330 ir_node *frame = get_irg_frame(irg);
332 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
339 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
341 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
342 return arg && !arg->in_reg;
352 Adjustment of the calls inside a graph.
357 * Transform a call node.
358 * @param env The ABI environment for the current irg.
359 * @param irn The call node.
360 * @param curr_sp The stack pointer node to use.
361 * @return The stack pointer after the call.
363 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
365 ir_graph *irg = env->birg->irg;
366 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
367 be_abi_call_t *call = be_abi_call_new();
368 ir_type *mt = get_Call_type(irn);
369 ir_node *call_ptr = get_Call_ptr(irn);
370 int n_params = get_method_n_params(mt);
371 ir_node *curr_mem = get_Call_mem(irn);
372 ir_node *bl = get_nodes_block(irn);
373 pset *results = pset_new_ptr(8);
374 pset *caller_save = pset_new_ptr(8);
376 int stack_dir = arch_isa_stack_dir(isa);
377 const arch_register_t *sp = arch_isa_sp(isa);
378 ir_mode *mach_mode = sp->reg_class->mode;
379 struct obstack *obst = &env->obst;
380 ir_node *no_mem = get_irg_no_mem(irg);
381 int no_alloc = call->flags.bits.frame_is_setup_on_call;
383 ir_node *res_proj = NULL;
384 int curr_res_proj = pn_Call_max;
391 const ir_edge_t *edge;
396 /* Let the isa fill out the abi description for that call node. */
397 arch_isa_get_call_abi(isa, mt, call);
399 /* Insert code to put the stack arguments on the stack. */
400 assert(get_Call_n_params(irn) == n_params);
401 for(i = 0; i < n_params; ++i) {
402 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
405 stack_size += arg->space_before;
406 stack_size = round_up2(stack_size, arg->alignment);
407 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
408 stack_size += arg->space_after;
409 obstack_int_grow(obst, i);
413 pos = obstack_finish(obst);
415 /* Collect all arguments which are passed in registers. */
416 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
417 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
418 if(arg && arg->in_reg) {
419 obstack_int_grow(obst, i);
423 low_args = obstack_finish(obst);
425 /* If there are some parameters which shall be passed on the stack. */
428 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
430 /* Reverse list of stack parameters if call arguments are from left to right */
431 if(call->flags.bits.left_to_right) {
432 for(i = 0; i < n_pos / 2; ++i) {
433 int other = n_pos - i - 1;
441 * If the stack is decreasing and we do not want to store sequentially,
442 * or someone else allocated the call frame
443 * we allocate as much space on the stack all parameters need, by
444 * moving the stack pointer along the stack's direction.
446 if(stack_dir < 0 && !do_seq && !no_alloc) {
447 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
450 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
451 for(i = 0; i < n_pos; ++i) {
453 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
454 ir_node *param = get_Call_param(irn, p);
455 ir_node *addr = curr_sp;
457 type *param_type = get_method_param_type(mt, p);
458 int param_size = get_type_size_bytes(param_type) + arg->space_after;
460 curr_ofs += arg->space_before;
461 curr_ofs = round_up2(curr_ofs, arg->alignment);
463 /* Make the expression to compute the argument's offset. */
465 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
466 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
469 /* Insert a store for primitive arguments. */
470 if(is_atomic_type(param_type)) {
471 mem = new_r_Store(irg, bl, curr_mem, addr, param);
472 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
475 /* Make a mem copy for compound arguments. */
477 assert(mode_is_reference(get_irn_mode(param)));
478 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
479 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
482 obstack_ptr_grow(obst, mem);
484 curr_ofs += param_size;
487 * If we wanted to build the arguments sequentially,
488 * the stack pointer for the next must be incremented,
489 * and the memory value propagated.
493 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
498 in = (ir_node **) obstack_finish(obst);
500 /* We need the sync only, if we didn't build the stores sequentially. */
502 curr_mem = new_r_Sync(irg, bl, n_pos, in);
503 obstack_free(obst, in);
506 /* Collect caller save registers */
507 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
509 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
510 for(j = 0; j < cls->n_regs; ++j) {
511 const arch_register_t *reg = arch_register_for_index(cls, j);
512 if(arch_register_type_is(reg, caller_save))
513 pset_insert_ptr(caller_save, (void *) reg);
517 /* search the greatest result proj number */
518 foreach_out_edge(irn, edge) {
519 const ir_edge_t *res_edge;
520 ir_node *irn = get_edge_src_irn(edge);
522 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
524 foreach_out_edge(irn, res_edge) {
526 be_abi_call_arg_t *arg;
527 ir_node *res = get_edge_src_irn(res_edge);
529 assert(is_Proj(res));
531 proj = get_Proj_proj(res);
532 arg = get_call_arg(call, 1, proj);
535 shift the proj number to the right, since we will drop the
536 unspeakable Proj_T from the Call. Therefore, all real argument
537 Proj numbers must be increased by pn_Call_max
540 set_Proj_proj(res, proj);
541 obstack_ptr_grow(obst, res);
543 if(proj > curr_res_proj)
544 curr_res_proj = proj;
546 pset_remove_ptr(caller_save, arg->reg);
547 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
554 obstack_ptr_grow(obst, NULL);
555 res_projs = obstack_finish(obst);
557 /* make the back end call node and set its register requirements. */
558 for(i = 0; i < n_low_args; ++i)
559 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
561 in = obstack_finish(obst);
563 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
564 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, curr_sp, curr_res_proj + pset_count(caller_save), n_low_args, in);
565 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
569 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, call_ptr, curr_res_proj + pset_count(caller_save), n_low_args, in);
571 set_irn_dbg_info(low_call, get_irn_dbg_info(irn));
575 Set the register class of the call address to the same as the stack pointer's.
576 That' probably buggy for some architectures.
578 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
580 /* Set the register classes and constraints of the Call parameters. */
581 for(i = 0; i < n_low_args; ++i) {
582 int index = low_args[i];
583 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
584 assert(arg->reg != NULL);
586 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
589 /* Set the register constraints of the results. */
590 for(i = 0; res_projs[i]; ++i) {
591 ir_node *irn = res_projs[i];
592 int proj = get_Proj_proj(irn);
594 /* Correct Proj number since it has been adjusted! (see above) */
595 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
598 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
600 obstack_free(obst, in);
601 exchange(irn, low_call);
603 /* redirect the result projs to the lowered call instead of the Proj_T */
604 for(i = 0; res_projs[i]; ++i)
605 set_Proj_pred(res_projs[i], low_call);
607 /* Make additional projs for the caller save registers
608 and the Keep node which keeps them alive. */
609 if(pset_count(caller_save) > 0) {
610 const arch_register_t *reg;
614 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
615 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
617 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
618 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
619 set_irn_link(proj, (void *) reg);
620 obstack_ptr_grow(obst, proj);
624 in = (ir_node **) obstack_finish(obst);
625 keep = be_new_Keep(NULL, irg, bl, n, in);
626 for(i = 0; i < n; ++i) {
627 const arch_register_t *reg = get_irn_link(in[i]);
628 be_node_set_reg_class(keep, i, reg->reg_class);
630 obstack_free(obst, in);
633 /* Clean up the stack. */
635 ir_node *mem_proj = NULL;
637 foreach_out_edge(low_call, edge) {
638 ir_node *irn = get_edge_src_irn(edge);
639 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
646 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
648 /* Clean up the stack frame if we allocated it */
650 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_against);
653 be_abi_call_free(call);
654 obstack_free(obst, pos);
656 del_pset(caller_save);
663 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
665 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
667 if(get_Alloc_where(alloc) == stack_alloc) {
668 ir_node *bl = get_nodes_block(alloc);
669 ir_graph *irg = get_irn_irg(bl);
670 ir_node *alloc_mem = NULL;
671 ir_node *alloc_res = NULL;
673 const ir_edge_t *edge;
676 env->call->flags.bits.try_omit_fp = 0;
678 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
680 foreach_out_edge(alloc, edge) {
681 ir_node *irn = get_edge_src_irn(edge);
683 assert(is_Proj(irn));
684 switch(get_Proj_proj(irn)) {
696 assert(alloc_res != NULL);
697 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
699 if(alloc_mem != NULL)
700 exchange(alloc_mem, new_r_NoMem(irg));
709 * Walker for dependent_on().
710 * This function searches a node tgt recursively from a given node
711 * but is restricted to the given block.
712 * @return 1 if tgt was reachable from curr, 0 if not.
714 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
718 if(get_irn_visited(curr) >= visited_nr)
721 set_irn_visited(curr, visited_nr);
722 if(get_nodes_block(curr) != bl)
728 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
729 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
737 * Check if a node is somehow data dependent on another one.
738 * both nodes must be in the same basic block.
739 * @param n1 The first node.
740 * @param n2 The second node.
741 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
743 static int dependent_on(ir_node *n1, ir_node *n2)
745 ir_node *bl = get_nodes_block(n1);
746 ir_graph *irg = get_irn_irg(bl);
747 long vis_nr = get_irg_visited(irg) + 1;
749 assert(bl == get_nodes_block(n2));
750 set_irg_visited(irg, vis_nr);
751 return check_dependence(n1, n2, bl, vis_nr);
754 static int cmp_call_dependecy(const void *c1, const void *c2)
756 ir_node *n1 = *(ir_node **) c1;
757 ir_node *n2 = *(ir_node **) c2;
760 Classical qsort() comparison function behavior:
761 0 if both elements are equal
762 1 if second is "smaller" that first
763 -1 if first is "smaller" that second
765 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
768 static void link_calls_in_block_walker(ir_node *irn, void *data)
771 be_abi_irg_t *env = data;
772 ir_node *bl = get_nodes_block(irn);
773 void *save = get_irn_link(bl);
775 env->call->flags.bits.irg_is_leaf = 0;
777 set_irn_link(irn, save);
778 set_irn_link(bl, irn);
783 * Process all call nodes inside a basic block.
784 * Note that the link field of the block must contain a linked list of all
785 * Call nodes inside the block. We first order this list according to data dependency
786 * and that connect the calls together.
788 static void process_calls_in_block(ir_node *bl, void *data)
790 be_abi_irg_t *env = data;
791 ir_node *curr_sp = env->init_sp;
795 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
796 obstack_ptr_grow(&env->obst, irn);
798 /* If there were call nodes in the block. */
803 nodes = obstack_finish(&env->obst);
805 /* order the call nodes according to data dependency */
806 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
808 for(i = n - 1; i >= 0; --i) {
809 ir_node *irn = nodes[i];
811 switch(get_irn_opcode(irn)) {
813 curr_sp = adjust_call(env, irn, curr_sp);
816 curr_sp = adjust_alloc(env, irn, curr_sp);
823 obstack_free(&env->obst, nodes);
825 /* Keep the last stack state in the block by tying it to Keep node */
827 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
830 set_irn_link(bl, curr_sp);
834 * Adjust all call nodes in the graph to the ABI conventions.
836 static void process_calls(be_abi_irg_t *env)
838 ir_graph *irg = env->birg->irg;
840 env->call->flags.bits.irg_is_leaf = 1;
841 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
842 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
845 static void collect_return_walker(ir_node *irn, void *data)
847 if(get_irn_opcode(irn) == iro_Return) {
848 struct obstack *obst = data;
849 obstack_ptr_grow(obst, irn);
853 static ir_node *setup_frame(be_abi_irg_t *env)
855 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
856 const arch_register_t *sp = isa->sp;
857 const arch_register_t *bp = isa->bp;
858 be_abi_call_flags_bits_t flags = env->call->flags.bits;
859 ir_graph *irg = env->birg->irg;
860 ir_node *bl = get_irg_start_block(irg);
861 ir_node *no_mem = get_irg_no_mem(irg);
862 ir_node *old_frame = get_irg_frame(irg);
863 ir_node *stack = pmap_get(env->regs, (void *) sp);
864 ir_node *frame = pmap_get(env->regs, (void *) bp);
866 int stack_nr = get_Proj_proj(stack);
868 if(flags.try_omit_fp) {
869 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
874 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
876 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
878 be_set_constr_single_reg(frame, -1, bp);
879 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
880 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
883 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_along);
886 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
887 env->init_sp = stack;
888 set_irg_frame(irg, frame);
889 edges_reroute(old_frame, frame, irg);
894 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
896 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
897 const arch_register_t *sp = isa->sp;
898 const arch_register_t *bp = isa->bp;
899 ir_graph *irg = env->birg->irg;
900 ir_node *ret_mem = get_Return_mem(ret);
901 ir_node *frame = get_irg_frame(irg);
902 ir_node *bl = get_nodes_block(ret);
903 ir_node *stack = get_irn_link(bl);
907 if(env->call->flags.bits.try_omit_fp) {
908 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
912 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
913 be_set_constr_single_reg(stack, -1, sp);
914 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
917 pmap_foreach(env->regs, ent) {
918 const arch_register_t *reg = ent->key;
919 ir_node *irn = ent->value;
922 obstack_ptr_grow(&env->obst, stack);
924 obstack_ptr_grow(&env->obst, frame);
925 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
926 obstack_ptr_grow(obst, irn);
930 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
932 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
933 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
934 int n = get_method_n_params(method_type);
935 int curr = inc > 0 ? 0 : n - 1;
942 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
943 res = new_type_class(new_id_from_str(buf));
945 for(i = 0; i < n; ++i, curr += inc) {
946 type *param_type = get_method_param_type(method_type, curr);
947 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
950 snprintf(buf, sizeof(buf), "param_%d", i);
951 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
952 ofs += arg->space_before;
953 ofs = round_up2(ofs, arg->alignment);
954 set_entity_offset_bytes(arg->stack_ent, ofs);
955 ofs += arg->space_after;
956 ofs += get_type_size_bytes(param_type);
960 set_type_size_bytes(res, ofs);
964 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
971 /* Create a Perm after the RegParams node to delimit it. */
972 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
973 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
978 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
979 const arch_register_t *reg = &cls->regs[j];
980 ir_node *irn = pmap_get(regs, (void *) reg);
982 if(irn && !arch_register_type_is(reg, ignore)) {
984 obstack_ptr_grow(&obst, irn);
985 set_irn_link(irn, (void *) reg);
989 obstack_ptr_grow(&obst, NULL);
990 in = obstack_finish(&obst);
992 perm = be_new_Perm(cls, irg, bl, n_regs, in);
993 for(j = 0; j < n_regs; ++j) {
994 ir_node *arg = in[j];
995 arch_register_t *reg = get_irn_link(arg);
996 pmap_insert(regs, reg, arg);
997 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1000 obstack_free(&obst, in);
1003 obstack_free(&obst, NULL);
1007 const arch_register_t *reg;
1011 static int cmp_regs(const void *a, const void *b)
1013 const reg_node_map_t *p = a;
1014 const reg_node_map_t *q = b;
1016 if(p->reg->reg_class == q->reg->reg_class)
1017 return p->reg->index - q->reg->index;
1019 return p->reg->reg_class - q->reg->reg_class;
1022 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1025 int n = pmap_count(reg_map);
1027 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1029 pmap_foreach(reg_map, ent) {
1030 res[i].reg = ent->key;
1031 res[i].irn = ent->value;
1035 qsort(res, n, sizeof(res[0]), cmp_regs);
1039 static void create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1041 ir_graph *irg = env->birg->irg;
1043 int n_regs = pmap_count(regs);
1048 rm = reg_map_to_arr(&env->obst, regs);
1050 for(n = 0; n < n_regs; ++n)
1051 obstack_ptr_grow(&env->obst, rm[n].irn);
1054 obstack_ptr_grow(&env->obst, *mem);
1058 in = (ir_node **) obstack_finish(&env->obst);
1059 irn = be_new_Barrier(env->birg->irg, bl, n, in);
1060 obstack_free(&env->obst, in);
1062 for(n = 0; n < n_regs; ++n) {
1063 int pos = BE_OUT_POS(n);
1065 const arch_register_t *reg = rm[n].reg;
1067 proj = new_r_Proj(env->birg->irg, bl, irn, get_irn_mode(rm[n].irn), n);
1068 be_node_set_reg_class(irn, n, reg->reg_class);
1070 be_set_constr_single_reg(irn, n, reg);
1071 be_set_constr_single_reg(irn, pos, reg);
1072 be_node_set_reg_class(irn, pos, reg->reg_class);
1073 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1074 if(arch_register_type_is(reg, ignore))
1075 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
1077 pmap_insert(regs, (void *) reg, proj);
1081 *mem = new_r_Proj(env->birg->irg, bl, irn, mode_M, n);
1084 obstack_free(&env->obst, rm);
1088 * Modify the irg itself and the frame type.
1090 static void modify_irg(be_abi_irg_t *env)
1092 firm_dbg_module_t *dbg = env->dbg;
1093 be_abi_call_t *call = env->call;
1094 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1095 const arch_register_t *sp = arch_isa_sp(isa);
1096 ir_graph *irg = env->birg->irg;
1097 ir_node *bl = get_irg_start_block(irg);
1098 ir_node *end = get_irg_end_block(irg);
1099 ir_node *arg_tuple = get_irg_args(irg);
1100 ir_node *no_mem = get_irg_no_mem(irg);
1101 ir_node *mem = get_irg_initial_mem(irg);
1102 type *method_type = get_entity_type(get_irg_entity(irg));
1103 pset *dont_save = pset_new_ptr(8);
1104 pmap *reg_proj_map = pmap_create();
1105 int n_params = get_method_n_params(method_type);
1112 const arch_register_t *fp_reg;
1113 ir_node *frame_pointer;
1114 ir_node *reg_params_bl;
1116 const ir_edge_t *edge;
1117 ir_type *arg_type, *bet_type;
1120 bitset_t *used_proj_nr;
1122 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1124 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1125 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1127 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1128 env->regs = pmap_create();
1130 /* Find the maximum proj number of the argument tuple proj */
1131 foreach_out_edge(arg_tuple, edge) {
1132 ir_node *irn = get_edge_src_irn(edge);
1133 int nr = get_Proj_proj(irn);
1134 max_arg = MAX(max_arg, nr);
1136 max_arg = MAX(max_arg + 1, n_params);
1137 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1138 memset(args, 0, max_arg * sizeof(args[0]));
1139 used_proj_nr = bitset_alloca(1024);
1141 /* Fill the argument vector */
1142 foreach_out_edge(arg_tuple, edge) {
1143 ir_node *irn = get_edge_src_irn(edge);
1144 int nr = get_Proj_proj(irn);
1146 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1149 arg_type = compute_arg_type(env, call, method_type);
1150 bet_type = call->cb->get_between_type(env->cb);
1151 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1153 /* Count the register params and add them to the number of Projs for the RegParams node */
1154 for(i = 0; i < n_params; ++i) {
1155 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1156 if(arg->in_reg && args[i]) {
1157 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1158 assert(i == get_Proj_proj(args[i]));
1160 /* For now, associate the register with the old Proj from Start representing that argument. */
1161 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1162 bitset_set(used_proj_nr, i);
1163 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1167 /* Collect all callee-save registers */
1168 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1169 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1170 for(j = 0; j < cls->n_regs; ++j) {
1171 const arch_register_t *reg = &cls->regs[j];
1172 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1173 pmap_insert(env->regs, (void *) reg, NULL);
1177 pmap_insert(env->regs, (void *) sp, NULL);
1178 pmap_insert(env->regs, (void *) isa->bp, NULL);
1179 reg_params_bl = get_irg_start_block(irg);
1180 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1183 * make proj nodes for the callee save registers.
1184 * memorize them, since Return nodes get those as inputs.
1186 * Note, that if a register corresponds to an argument, the regs map contains
1187 * the old Proj from start for that argument.
1190 rm = reg_map_to_arr(&env->obst, env->regs);
1191 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1192 arch_register_t *reg = (void *) rm[i].reg;
1193 ir_node *arg_proj = rm[i].irn;
1195 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1197 int pos = BE_OUT_POS((int) nr);
1200 bitset_set(used_proj_nr, nr);
1201 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1202 pmap_insert(env->regs, (void *) reg, proj);
1203 be_set_constr_single_reg(env->reg_params, pos, reg);
1204 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1207 * If the register is an ignore register,
1208 * The Proj for that register shall also be ignored during register allocation.
1210 if(arch_register_type_is(reg, ignore))
1211 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1213 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1215 obstack_free(&env->obst, rm);
1217 /* Generate the Prologue */
1218 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1219 create_barrier(env, bl, &mem, env->regs, 0);
1221 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1222 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
1223 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1224 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1225 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1226 set_irg_frame(irg, frame_pointer);
1228 /* Now, introduce stack param nodes for all parameters passed on the stack */
1229 for(i = 0; i < max_arg; ++i) {
1230 ir_node *arg_proj = args[i];
1231 ir_node *repl = NULL;
1233 if(arg_proj != NULL) {
1234 be_abi_call_arg_t *arg;
1235 ir_type *param_type;
1236 int nr = get_Proj_proj(arg_proj);
1238 nr = MIN(nr, n_params);
1239 arg = get_call_arg(call, 0, nr);
1240 param_type = get_method_param_type(method_type, nr);
1243 repl = pmap_get(env->regs, (void *) arg->reg);
1246 else if(arg->on_stack) {
1247 /* For atomic parameters which are actually used, we create a StackParam node. */
1248 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1249 ir_mode *mode = get_type_mode(param_type);
1250 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1251 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1254 /* The stack parameter is not primitive (it is a struct or array),
1255 we thus will create a node representing the parameter's address
1258 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1262 assert(repl != NULL);
1263 edges_reroute(args[i], repl, irg);
1267 /* All Return nodes hang on the End node, so look for them there. */
1268 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1269 ir_node *irn = get_irn_n(end, i);
1271 if(get_irn_opcode(irn) == iro_Return) {
1272 ir_node *bl = get_nodes_block(irn);
1273 int n_res = get_Return_n_ress(irn);
1274 pmap *reg_map = pmap_create();
1275 ir_node *mem = get_Return_mem(irn);
1280 const arch_register_t **regs;
1282 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1284 /* Insert results for Return into the register map. */
1285 for(i = 0; i < n_res; ++i) {
1286 ir_node *res = get_Return_res(irn, i);
1287 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1288 assert(arg->in_reg && "return value must be passed in register");
1289 pmap_insert(reg_map, (void *) arg->reg, res);
1292 /* Add uses of the callee save registers. */
1293 pmap_foreach(env->regs, ent) {
1294 const arch_register_t *reg = ent->key;
1295 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1296 pmap_insert(reg_map, ent->key, ent->value);
1299 /* Make the Epilogue node and call the arch's epilogue maker. */
1300 create_barrier(env, bl, &mem, reg_map, 1);
1301 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1304 Maximum size of the in array for Return nodes is
1305 return args + callee save/ignore registers + memory + stack pointer
1307 in_max = pmap_count(reg_map) + get_Return_n_ress(irn) + 2;
1309 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1310 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1313 in[1] = be_abi_reg_map_get(reg_map, sp);
1318 /* clear SP entry, since it has already been grown. */
1319 pmap_insert(reg_map, (void *) sp, NULL);
1320 for(i = 0; i < n_res; ++i) {
1321 ir_node *res = get_Return_res(irn, i);
1322 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1324 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1325 regs[n++] = arg->reg;
1327 /* Clear the map entry to mark the register as processed. */
1328 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1331 /* grow the rest of the stuff. */
1332 pmap_foreach(reg_map, ent) {
1335 regs[n++] = ent->key;
1339 /* The in array for the new back end return is now ready. */
1340 ret = be_new_Return(irg, bl, n, in);
1342 /* Set the register classes of the return's parameter accordingly. */
1343 for(i = 0; i < n; ++i)
1345 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1347 /* Free the space of the Epilog's in array and the register <-> proj map. */
1348 obstack_free(&env->obst, in);
1350 pmap_destroy(reg_map);
1354 del_pset(dont_save);
1355 obstack_free(&env->obst, args);
1359 * Walker: puts all Alloc(stack_alloc) on a obstack
1361 static void collect_alloca_walker(ir_node *irn, void *data)
1363 be_abi_irg_t *env = data;
1364 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1365 obstack_ptr_grow(&env->obst, irn);
1368 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1370 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1371 ir_node *old_frame = get_irg_frame(birg->irg);
1372 ir_graph *irg = birg->irg;
1377 env->isa = birg->main_env->arch_env->isa;
1378 env->method_type = get_entity_type(get_irg_entity(irg));
1379 env->call = be_abi_call_new();
1380 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1382 env->dce_survivor = new_survive_dce();
1384 env->dbg = firm_dbg_register("firm.be.abi");
1385 env->stack_phis = pset_new_ptr(16);
1386 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1388 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1390 obstack_init(&env->obst);
1392 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1393 env->irn_ops.impl = &abi_irn_ops;
1395 /* Lower all call nodes in the IRG. */
1398 /* Process the IRG */
1401 /* reroute the stack origin of the calls to the true stack origin. */
1402 edges_reroute(dummy, env->init_sp, irg);
1403 edges_reroute(old_frame, get_irg_frame(irg), irg);
1405 /* Make some important node pointers survive the dead node elimination. */
1406 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1407 pmap_foreach(env->regs, ent)
1408 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1410 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1412 env->call->cb->done(env->cb);
1417 void be_abi_free(be_abi_irg_t *env)
1419 free_survive_dce(env->dce_survivor);
1420 del_pset(env->stack_phis);
1421 pmap_destroy(env->regs);
1422 obstack_free(&env->obst, NULL);
1423 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1431 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1432 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1433 | _| | |> < ___) | || (_| | (__| <
1434 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1438 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1442 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1443 pset_insert_ptr(s, irn);
1446 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1448 dom_front_info_t *df;
1451 /* We need dominance frontiers for fix up */
1452 df = be_compute_dominance_frontiers(env->birg->irg);
1453 stack_nodes = pset_new_ptr(16);
1454 pset_insert_ptr(stack_nodes, env->init_sp);
1455 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1456 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1457 del_pset(stack_nodes);
1459 /* Liveness could have changed due to Phi nodes. */
1460 be_liveness(env->birg->irg);
1462 /* free these dominance frontiers */
1463 be_free_dominance_frontiers(df);
1467 * Translates a direction of an IncSP node (either be_stack_dir_against, or ...along)
1468 * into -1 or 1, respectively.
1469 * @param irn The node.
1470 * @return 1, if the direction of the IncSP was along, -1 if against.
1472 static int get_dir(ir_node *irn)
1474 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
1477 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1479 const arch_env_t *aenv = env->birg->main_env->arch_env;
1481 int start_bias = bias;
1482 int omit_fp = env->call->flags.bits.try_omit_fp;
1484 sched_foreach(bl, irn) {
1487 If the node modifies the stack pointer by a constant offset,
1488 record that in the bias.
1490 if(be_is_IncSP(irn)) {
1491 int ofs = be_get_IncSP_offset(irn);
1492 int dir = get_dir(irn);
1494 if(ofs == BE_STACK_FRAME_SIZE) {
1495 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1496 be_set_IncSP_offset(irn, ofs);
1504 Else check, if the node relates to an entity on the stack frame.
1505 If so, set the true offset (including the bias) for that
1509 entity *ent = arch_get_frame_entity(aenv, irn);
1511 int offset = get_stack_entity_offset(env->frame, ent, bias);
1512 arch_set_frame_offset(aenv, irn, offset);
1513 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1522 * A helper struct for the bias walker.
1525 be_abi_irg_t *env; /**< The ABI irg environment. */
1526 int start_block_bias; /**< The bias at the end of the start block. */
1529 static void stack_bias_walker(ir_node *bl, void *data)
1531 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1532 struct bias_walk *bw = data;
1533 process_stack_bias(bw->env, bl, bw->start_block_bias);
1537 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1539 ir_graph *irg = env->birg->irg;
1540 struct bias_walk bw;
1542 stack_frame_compute_initial_offset(env->frame);
1543 // stack_frame_dump(stdout, env->frame);
1545 /* Determine the stack bias at the and of the start block. */
1546 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1548 /* fix the bias is all other blocks */
1550 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1553 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1555 assert(arch_register_type_is(reg, callee_save));
1556 assert(pmap_contains(abi->regs, (void *) reg));
1557 return pmap_get(abi->regs, (void *) reg);
1561 _____ _____ _ _ _ _ _ _
1562 |_ _| __ \| \ | | | | | | | | |
1563 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1564 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1565 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1566 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1568 for Phi nodes which are created due to stack modifying nodes
1569 such as IncSP, AddSP and SetSP.
1571 These Phis are always to be ignored by the reg alloc and are
1572 fixed on the SP register of the ISA.
1575 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1577 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1578 const void *res = NULL;
1580 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1581 res = &abi->irn_ops;
1586 static void be_abi_limited(void *data, bitset_t *bs)
1588 be_abi_irg_t *abi = data;
1589 bitset_clear_all(bs);
1590 bitset_set(bs, abi->isa->sp->index);
1593 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1595 be_abi_irg_t *abi = get_abi_from_ops(self);
1596 const arch_register_t *reg = abi->isa->sp;
1598 memset(req, 0, sizeof(req[0]));
1600 if(pos == BE_OUT_POS(0)) {
1601 req->cls = reg->reg_class;
1602 req->type = arch_register_req_type_limited;
1603 req->limited = be_abi_limited;
1604 req->limited_env = abi;
1607 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1608 req->cls = reg->reg_class;
1609 req->type = arch_register_req_type_normal;
1615 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1619 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1621 const be_abi_irg_t *abi = get_abi_from_ops(self);
1622 return abi->isa->sp;
1625 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1627 return arch_irn_class_normal;
1630 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1632 return arch_irn_flags_ignore;
1635 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1640 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1644 static const arch_irn_ops_if_t abi_irn_ops = {
1645 abi_get_irn_reg_req,
1650 abi_get_frame_entity,
1654 static const arch_irn_handler_t abi_irn_handler = {