4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
32 #include "besched_t.h"
34 #define MAX(x, y) ((x) > (y) ? (x) : (y))
35 #define MIN(x, y) ((x) < (y) ? (x) : (y))
37 typedef struct _be_abi_call_arg_t {
40 unsigned on_stack : 1;
43 const arch_register_t *reg;
48 struct _be_abi_call_t {
49 be_abi_call_flags_t flags;
50 const be_abi_callbacks_t *cb;
55 #define N_FRAME_TYPES 3
57 typedef struct _be_stack_frame_t {
62 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
68 struct _be_stack_slot_t {
69 struct _be_stack_frame_t *frame;
73 struct _be_abi_irg_t {
75 firm_dbg_module_t *dbg; /**< The debugging module. */
76 be_stack_frame_t *frame; /**< The stack frame model. */
77 const be_irg_t *birg; /**< The back end IRG. */
78 const arch_isa_t *isa; /**< The isa. */
79 survive_dce_t *dce_survivor;
81 be_abi_call_t *call; /**< The ABI call information. */
82 type *method_type; /**< The type of the method of the IRG. */
84 ir_node *init_sp; /**< The node representing the stack pointer
85 at the start of the function. */
87 ir_node *reg_params; /**< The reg params node. */
88 pmap *regs; /**< A map of all callee-save and ignore regs to
89 their Projs to the RegParams node. */
91 pset *stack_phis; /**< The set of all Phi nodes inserted due to
92 stack pointer modifying nodes. */
94 int start_block_bias; /**< The stack bias at the end of the start block. */
96 void *cb; /**< ABI Callback self pointer. */
98 arch_irn_handler_t irn_handler;
99 arch_irn_ops_t irn_ops;
102 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
103 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
105 /* Forward, since be need it in be_abi_introduce(). */
106 static const arch_irn_ops_if_t abi_irn_ops;
107 static const arch_irn_handler_t abi_irn_handler;
110 _ ____ ___ ____ _ _ _ _
111 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
112 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
113 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
114 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
116 These callbacks are used by the backend to set the parameters
117 for a specific call type.
120 static int cmp_call_arg(const void *a, const void *b, size_t n)
122 const be_abi_call_arg_t *p = a, *q = b;
123 return !(p->is_res == q->is_res && p->pos == q->pos);
126 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
128 be_abi_call_arg_t arg;
131 memset(&arg, 0, sizeof(arg));
135 hash = is_res * 100 + pos;
138 ? set_insert(call->params, &arg, sizeof(arg), hash)
139 : set_find(call->params, &arg, sizeof(arg), hash);
142 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
144 return get_or_set_call_arg(call, is_res, pos, 0);
147 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
153 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment)
155 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
157 arg->alignment = alignment;
158 assert(alignment > 0 && "Alignment must be greater than 0");
161 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
163 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
168 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
170 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
175 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
180 be_abi_call_t *be_abi_call_new(void)
182 be_abi_call_t *call = xmalloc(sizeof(call[0]));
184 call->params = new_set(cmp_call_arg, 16);
189 void be_abi_call_free(be_abi_call_t *call)
191 del_set(call->params);
197 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
198 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
199 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
200 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
203 Handling of the stack frame. It is composed of three types:
204 1) The type of the arguments which are pushed on the stack.
205 2) The "between type" which consists of stuff the call of the
206 function pushes on the stack (like the return address and
207 the old base pointer for ia32).
208 3) The Firm frame type which consists of all local variables
212 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
214 type *t = get_entity_owner(ent);
215 int ofs = get_entity_offset_bytes(ent);
219 /* Find the type the entity is contained in. */
220 for(index = 0; index < N_FRAME_TYPES; ++index) {
221 if(frame->order[index] == t)
225 /* Add the size of all the types below the one of the entity to the entity's offset */
226 for(i = 0; i < index; ++i)
227 ofs += get_type_size_bytes(frame->order[i]);
229 /* correct the offset by the initial position of the frame pointer */
230 ofs -= frame->initial_offset;
232 /* correct the offset with the current bias. */
238 static entity *search_ent_with_offset(type *t, int offset)
242 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
243 entity *ent = get_class_member(t, i);
244 if(get_entity_offset_bytes(ent) == offset)
251 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
253 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
254 entity *ent = search_ent_with_offset(base, 0);
255 frame->initial_offset = 0;
256 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
257 return frame->initial_offset;
260 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
262 frame->arg_type = args;
263 frame->between_type = between;
264 frame->frame_type = locals;
265 frame->initial_offset = 0;
266 frame->stack_dir = stack_dir;
267 frame->order[1] = between;
270 frame->order[0] = args;
271 frame->order[2] = locals;
275 frame->order[0] = locals;
276 frame->order[2] = args;
282 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
286 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
287 for(j = 0; j < N_FRAME_TYPES; ++j) {
288 type *t = frame->order[j];
290 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
291 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
292 entity *ent = get_class_member(t, i);
293 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
299 * If irn is a Sel node computing the address of an entity
300 * on the frame type return the entity, else NULL.
302 static INLINE entity *get_sel_ent(ir_node *irn)
304 if(get_irn_opcode(irn) == iro_Sel
305 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
307 return get_Sel_entity(irn);
314 * Walker: Replaces Loads, Stores and Sels of frame type entities
315 * by FrameLoad, FrameStore and FrameAdress.
317 static void lower_frame_sels_walker(ir_node *irn, void *data)
320 entity *ent = get_sel_ent(irn);
323 be_abi_irg_t *env = data;
324 ir_node *bl = get_nodes_block(irn);
325 ir_graph *irg = get_irn_irg(bl);
326 ir_node *frame = get_irg_frame(irg);
328 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
335 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
337 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
338 return arg && !arg->in_reg;
348 Adjustment of the calls inside a graph.
353 * Transform a call node.
354 * @param env The ABI environment for the current irg.
355 * @param irn The call node.
356 * @param curr_sp The stack pointer node to use.
357 * @return The stack pointer after the call.
359 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
361 ir_graph *irg = env->birg->irg;
362 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
363 be_abi_call_t *call = be_abi_call_new();
364 ir_type *mt = get_Call_type(irn);
365 ir_node *call_ptr = get_Call_ptr(irn);
366 int n_params = get_method_n_params(mt);
367 ir_node *curr_mem = get_Call_mem(irn);
368 ir_node *bl = get_nodes_block(irn);
369 pset *results = pset_new_ptr(8);
370 pset *caller_save = pset_new_ptr(8);
372 int stack_dir = arch_isa_stack_dir(isa);
373 const arch_register_t *sp = arch_isa_sp(isa);
374 ir_mode *mach_mode = sp->reg_class->mode;
375 struct obstack *obst = &env->obst;
376 ir_node *no_mem = get_irg_no_mem(irg);
378 ir_node *res_proj = NULL;
379 int curr_res_proj = pn_Call_max;
386 const ir_edge_t *edge;
391 /* Let the isa fill out the abi description for that call node. */
392 arch_isa_get_call_abi(isa, mt, call);
394 /* Insert code to put the stack arguments on the stack. */
395 assert(get_Call_n_params(irn) == n_params);
396 for(i = 0; i < n_params; ++i) {
397 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
400 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
401 obstack_int_grow(obst, i);
405 pos = obstack_finish(obst);
407 /* Collect all arguments which are passed in registers. */
408 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
409 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
410 if(arg && arg->in_reg) {
411 obstack_int_grow(obst, i);
415 low_args = obstack_finish(obst);
417 /* If there are some parameters which shall be passed on the stack. */
420 int do_seq = call->flags.bits.store_args_sequential;
422 /* Reverse list of stack parameters if call arguments are from left to right */
423 if(call->flags.bits.left_to_right) {
424 for(i = 0; i < n_pos / 2; ++i) {
425 int other = n_pos - i - 1;
433 * If the stack is decreasing and we do not want to store sequentially,
434 * we allocate as much space on the stack all parameters need, by
435 * moving the stack pointer along the stack's direction.
437 if(stack_dir < 0 && !do_seq) {
438 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
441 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
442 for(i = 0; i < n_pos; ++i) {
444 ir_node *param = get_Call_param(irn, p);
445 ir_node *addr = curr_sp;
447 type *param_type = get_method_param_type(mt, p);
448 int param_size = get_type_size_bytes(param_type);
450 /* Make the expression to compute the argument's offset. */
452 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
453 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
456 /* Insert a store for primitive arguments. */
457 if(is_atomic_type(param_type)) {
458 mem = new_r_Store(irg, bl, curr_mem, addr, param);
459 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
462 /* Make a memcopy for compound arguments. */
464 assert(mode_is_reference(get_irn_mode(param)));
465 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
466 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
469 obstack_ptr_grow(obst, mem);
471 curr_ofs += param_size;
474 * If we wanted to build the arguments sequentially,
475 * the stack pointer for the next must be incremented,
476 * and the memory value propagated.
480 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
485 in = (ir_node **) obstack_finish(obst);
487 /* We need the sync only, if we didn't build the stores sequentially. */
489 curr_mem = new_r_Sync(irg, bl, n_pos, in);
490 obstack_free(obst, in);
493 /* Collect caller save registers */
494 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
496 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
497 for(j = 0; j < cls->n_regs; ++j) {
498 const arch_register_t *reg = arch_register_for_index(cls, j);
499 if(arch_register_type_is(reg, caller_save))
500 pset_insert_ptr(caller_save, (void *) reg);
504 /* search the greatest result proj number */
505 foreach_out_edge(irn, edge) {
506 const ir_edge_t *res_edge;
507 ir_node *irn = get_edge_src_irn(edge);
509 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
511 foreach_out_edge(irn, res_edge) {
513 be_abi_call_arg_t *arg;
514 ir_node *res = get_edge_src_irn(res_edge);
516 assert(is_Proj(res));
518 proj = get_Proj_proj(res);
519 arg = get_call_arg(call, 1, proj);
522 shift the proj number to the right, since we will drop the
523 unspeakable Proj_T from the Call. Therefore, all real argument
524 Proj numbers must be increased by pn_Call_max
527 set_Proj_proj(res, proj);
528 obstack_ptr_grow(obst, res);
530 if(proj > curr_res_proj)
531 curr_res_proj = proj;
533 pset_remove_ptr(caller_save, arg->reg);
534 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
541 obstack_ptr_grow(obst, NULL);
542 res_projs = obstack_finish(obst);
544 /* make the back end call node and set its register requirements. */
545 for(i = 0; i < n_low_args; ++i)
546 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
548 in = obstack_finish(obst);
550 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
551 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, curr_sp, curr_res_proj + pset_count(caller_save), n_low_args, in);
552 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
556 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, call_ptr, curr_res_proj + pset_count(caller_save), n_low_args, in);
560 Set the register class of the call address to the same as the stack pointer's.
561 That' probably buggy for some architectures.
563 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
565 /* Set the register classes and constraints of the Call parameters. */
566 for(i = 0; i < n_low_args; ++i) {
567 int index = low_args[i];
568 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
569 assert(arg->reg != NULL);
571 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
574 /* Set the register constraints of the results. */
575 for(i = 0; res_projs[i]; ++i) {
576 ir_node *irn = res_projs[i];
577 int proj = get_Proj_proj(irn);
579 /* Correct Proj number since it has been adjusted! (see above) */
580 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
583 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
585 obstack_free(obst, in);
586 exchange(irn, low_call);
588 /* redirect the result projs to the lowered call instead of the Proj_T */
589 for(i = 0; res_projs[i]; ++i)
590 set_Proj_pred(res_projs[i], low_call);
592 /* Make additional projs for the caller save registers
593 and the Keep node which keeps them alive. */
594 if(pset_count(caller_save) > 0) {
595 const arch_register_t *reg;
599 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
600 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
602 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
603 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
604 set_irn_link(proj, (void *) reg);
605 obstack_ptr_grow(obst, proj);
609 in = (ir_node **) obstack_finish(obst);
610 keep = be_new_Keep(NULL, irg, bl, n, in);
611 for(i = 0; i < n; ++i) {
612 const arch_register_t *reg = get_irn_link(in[i]);
613 be_node_set_reg_class(keep, i, reg->reg_class);
615 obstack_free(obst, in);
618 /* Clean up the stack. */
620 ir_node *mem_proj = NULL;
622 foreach_out_edge(low_call, edge) {
623 ir_node *irn = get_edge_src_irn(edge);
624 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
631 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
633 /* Make a Proj for the stack pointer. */
634 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_against);
637 be_abi_call_free(call);
638 obstack_free(obst, pos);
640 del_pset(caller_save);
647 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
649 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
651 if(get_Alloc_where(alloc) == stack_alloc) {
652 ir_node *bl = get_nodes_block(alloc);
653 ir_graph *irg = get_irn_irg(bl);
654 ir_node *alloc_mem = NULL;
655 ir_node *alloc_res = NULL;
657 const ir_edge_t *edge;
660 env->call->flags.bits.try_omit_fp = 0;
662 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
664 foreach_out_edge(alloc, edge) {
665 ir_node *irn = get_edge_src_irn(edge);
667 assert(is_Proj(irn));
668 switch(get_Proj_proj(irn)) {
680 assert(alloc_res != NULL);
681 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
683 if(alloc_mem != NULL)
684 exchange(alloc_mem, new_r_NoMem(irg));
693 * Walker for dependent_on().
694 * This function searches a node tgt recursively from a given node
695 * but is restricted to the given block.
696 * @return 1 if tgt was reachable from curr, 0 if not.
698 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
702 if(get_irn_visited(curr) >= visited_nr)
705 set_irn_visited(curr, visited_nr);
706 if(get_nodes_block(curr) != bl)
712 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
713 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
721 * Check if a node is somehow data dependent on another one.
722 * both nodes must be in the same basic block.
723 * @param n1 The first node.
724 * @param n2 The second node.
725 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
727 static int dependent_on(ir_node *n1, ir_node *n2)
729 ir_node *bl = get_nodes_block(n1);
730 ir_graph *irg = get_irn_irg(bl);
731 long vis_nr = get_irg_visited(irg) + 1;
733 assert(bl == get_nodes_block(n2));
734 set_irg_visited(irg, vis_nr);
735 return check_dependence(n1, n2, bl, vis_nr);
738 static int cmp_call_dependecy(const void *c1, const void *c2)
740 ir_node *n1 = *(ir_node **) c1;
741 ir_node *n2 = *(ir_node **) c2;
744 Classical qsort() comparison function behavior:
745 0 if both elements are equal
746 1 if second is "smaller" that first
747 -1 if first is "smaller" that second
749 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
752 static void link_calls_in_block_walker(ir_node *irn, void *data)
755 be_abi_irg_t *env = data;
756 ir_node *bl = get_nodes_block(irn);
757 void *save = get_irn_link(bl);
759 env->call->flags.bits.irg_is_leaf = 0;
761 set_irn_link(irn, save);
762 set_irn_link(bl, irn);
767 * Process all call nodes inside a basic block.
768 * Note that the link field of the block must contain a linked list of all
769 * Call nodes inside the block. We first order this list according to data dependency
770 * and that connect the calls together.
772 static void process_calls_in_block(ir_node *bl, void *data)
774 be_abi_irg_t *env = data;
775 ir_node *curr_sp = env->init_sp;
779 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
780 obstack_ptr_grow(&env->obst, irn);
782 /* If there were call nodes in the block. */
787 nodes = obstack_finish(&env->obst);
789 /* order the call nodes according to data dependency */
790 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
792 for(i = n - 1; i >= 0; --i) {
793 ir_node *irn = nodes[i];
795 switch(get_irn_opcode(irn)) {
797 curr_sp = adjust_call(env, irn, curr_sp);
800 curr_sp = adjust_alloc(env, irn, curr_sp);
807 obstack_free(&env->obst, nodes);
809 /* Keep the last stack state in the block by tying it to Keep node */
811 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
814 set_irn_link(bl, curr_sp);
818 * Adjust all call nodes in the graph to the ABI conventions.
820 static void process_calls(be_abi_irg_t *env)
822 ir_graph *irg = env->birg->irg;
824 env->call->flags.bits.irg_is_leaf = 1;
825 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
826 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
829 static void collect_return_walker(ir_node *irn, void *data)
831 if(get_irn_opcode(irn) == iro_Return) {
832 struct obstack *obst = data;
833 obstack_ptr_grow(obst, irn);
837 static ir_node *setup_frame(be_abi_irg_t *env)
839 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
840 const arch_register_t *sp = isa->sp;
841 const arch_register_t *bp = isa->bp;
842 be_abi_call_flags_bits_t flags = env->call->flags.bits;
843 ir_graph *irg = env->birg->irg;
844 ir_node *bl = get_irg_start_block(irg);
845 ir_node *no_mem = get_irg_no_mem(irg);
846 ir_node *old_frame = get_irg_frame(irg);
847 ir_node *stack = pmap_get(env->regs, (void *) sp);
848 ir_node *frame = pmap_get(env->regs, (void *) bp);
850 int stack_nr = get_Proj_proj(stack);
852 if(flags.try_omit_fp) {
853 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
858 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
860 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
862 be_set_constr_single_reg(frame, -1, bp);
863 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
864 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
867 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_along);
870 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
871 env->init_sp = stack;
872 set_irg_frame(irg, frame);
873 edges_reroute(old_frame, frame, irg);
878 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
880 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
881 const arch_register_t *sp = isa->sp;
882 const arch_register_t *bp = isa->bp;
883 ir_graph *irg = env->birg->irg;
884 ir_node *ret_mem = get_Return_mem(ret);
885 ir_node *frame = get_irg_frame(irg);
886 ir_node *bl = get_nodes_block(ret);
887 ir_node *stack = get_irn_link(bl);
891 if(env->call->flags.bits.try_omit_fp) {
892 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
896 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
897 be_set_constr_single_reg(stack, -1, sp);
898 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
901 pmap_foreach(env->regs, ent) {
902 const arch_register_t *reg = ent->key;
903 ir_node *irn = ent->value;
906 obstack_ptr_grow(&env->obst, stack);
908 obstack_ptr_grow(&env->obst, frame);
909 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
910 obstack_ptr_grow(obst, irn);
914 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
916 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
917 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
918 int n = get_method_n_params(method_type);
919 int curr = inc > 0 ? 0 : n - 1;
926 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
927 res = new_type_class(new_id_from_str(buf));
929 for(i = 0; i < n; ++i, curr += inc) {
930 type *param_type = get_method_param_type(method_type, curr);
931 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
934 snprintf(buf, sizeof(buf), "param_%d", i);
935 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
936 ofs = round_up2(ofs, arg->alignment);
937 set_entity_offset_bytes(arg->stack_ent, ofs);
938 ofs += get_type_size_bytes(param_type);
942 set_type_size_bytes(res, ofs);
946 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
953 /* Create a Perm after the RegParams node to delimit it. */
954 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
955 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
960 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
961 const arch_register_t *reg = &cls->regs[j];
962 ir_node *irn = pmap_get(regs, (void *) reg);
964 if(irn && !arch_register_type_is(reg, ignore)) {
966 obstack_ptr_grow(&obst, irn);
967 set_irn_link(irn, (void *) reg);
971 obstack_ptr_grow(&obst, NULL);
972 in = obstack_finish(&obst);
974 perm = be_new_Perm(cls, irg, bl, n_regs, in);
975 for(j = 0; j < n_regs; ++j) {
976 ir_node *arg = in[j];
977 arch_register_t *reg = get_irn_link(arg);
978 pmap_insert(regs, reg, arg);
979 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
982 obstack_free(&obst, in);
985 obstack_free(&obst, NULL);
989 const arch_register_t *reg;
993 static int cmp_regs(const void *a, const void *b)
995 const reg_node_map_t *p = a;
996 const reg_node_map_t *q = b;
998 if(p->reg->reg_class == q->reg->reg_class)
999 return p->reg->index - q->reg->index;
1001 return p->reg->reg_class - q->reg->reg_class;
1004 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1007 int n = pmap_count(reg_map);
1009 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1011 pmap_foreach(reg_map, ent) {
1012 res[i].reg = ent->key;
1013 res[i].irn = ent->value;
1017 qsort(res, n, sizeof(res[0]), cmp_regs);
1021 static void create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1023 ir_graph *irg = env->birg->irg;
1025 int n_regs = pmap_count(regs);
1030 rm = reg_map_to_arr(&env->obst, regs);
1032 for(i = 0, n = 0; i < n_regs; ++i, ++n)
1033 obstack_ptr_grow(&env->obst, rm[i].irn);
1036 obstack_ptr_grow(&env->obst, *mem);
1040 in = (ir_node **) obstack_finish(&env->obst);
1041 irn = be_new_Barrier(env->birg->irg, bl, n, in);
1042 obstack_free(&env->obst, in);
1044 for(n = 0; n < n_regs; ++n) {
1045 int pos = BE_OUT_POS(n);
1047 const arch_register_t *reg = rm[n].reg;
1049 proj = new_r_Proj(env->birg->irg, bl, irn, get_irn_mode(rm[i].irn), n);
1050 be_node_set_reg_class(irn, n, reg->reg_class);
1052 be_set_constr_single_reg(irn, n, reg);
1053 be_set_constr_single_reg(irn, pos, reg);
1054 be_node_set_reg_class(irn, pos, reg->reg_class);
1055 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1056 if(arch_register_type_is(reg, ignore))
1057 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
1059 pmap_insert(regs, (void *) reg, proj);
1063 *mem = new_r_Proj(env->birg->irg, bl, irn, mode_M, n);
1066 obstack_free(&env->obst, rm);
1070 * Modify the irg itself and the frame type.
1072 static void modify_irg(be_abi_irg_t *env)
1074 firm_dbg_module_t *dbg = env->dbg;
1075 be_abi_call_t *call = env->call;
1076 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1077 const arch_register_t *sp = arch_isa_sp(isa);
1078 ir_graph *irg = env->birg->irg;
1079 ir_node *bl = get_irg_start_block(irg);
1080 ir_node *end = get_irg_end_block(irg);
1081 ir_node *arg_tuple = get_irg_args(irg);
1082 ir_node *no_mem = get_irg_no_mem(irg);
1083 ir_node *mem = get_irg_initial_mem(irg);
1084 type *method_type = get_entity_type(get_irg_entity(irg));
1085 pset *dont_save = pset_new_ptr(8);
1086 pmap *reg_proj_map = pmap_create();
1087 int n_params = get_method_n_params(method_type);
1094 const arch_register_t *fp_reg;
1095 ir_node *frame_pointer;
1096 ir_node *reg_params_bl;
1098 const ir_edge_t *edge;
1099 ir_type *arg_type, *bet_type;
1102 bitset_t *used_proj_nr;
1104 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1106 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1107 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1109 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1110 env->regs = pmap_create();
1112 /* Find the maximum proj number of the argument tuple proj */
1113 foreach_out_edge(arg_tuple, edge) {
1114 ir_node *irn = get_edge_src_irn(edge);
1115 int nr = get_Proj_proj(irn);
1116 max_arg = MAX(max_arg, nr);
1118 max_arg = MAX(max_arg + 1, n_params);
1119 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1120 memset(args, 0, max_arg * sizeof(args[0]));
1121 used_proj_nr = bitset_alloca(1024);
1123 /* Fill the argument vector */
1124 foreach_out_edge(arg_tuple, edge) {
1125 ir_node *irn = get_edge_src_irn(edge);
1126 int nr = get_Proj_proj(irn);
1128 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1131 arg_type = compute_arg_type(env, call, method_type);
1132 bet_type = call->cb->get_between_type(env->cb);
1133 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1135 /* Count the register params and add them to the number of Projs for the RegParams node */
1136 for(i = 0; i < n_params; ++i) {
1137 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1138 if(arg->in_reg && args[i]) {
1139 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1140 assert(i == get_Proj_proj(args[i]));
1142 /* For now, associate the register with the old Proj from Start representing that argument. */
1143 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1144 bitset_set(used_proj_nr, i);
1145 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1149 /* Collect all callee-save registers */
1150 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1151 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1152 for(j = 0; j < cls->n_regs; ++j) {
1153 const arch_register_t *reg = &cls->regs[j];
1154 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1155 pmap_insert(env->regs, (void *) reg, NULL);
1159 pmap_insert(env->regs, (void *) sp, NULL);
1160 pmap_insert(env->regs, (void *) isa->bp, NULL);
1161 reg_params_bl = get_irg_start_block(irg);
1162 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1165 * make proj nodes for the callee save registers.
1166 * memorize them, since Return nodes get those as inputs.
1168 * Note, that if a register corresponds to an argument, the regs map contains
1169 * the old Proj from start for that argument.
1172 rm = reg_map_to_arr(&env->obst, env->regs);
1173 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1174 arch_register_t *reg = (void *) rm[i].reg;
1175 ir_node *arg_proj = rm[i].irn;
1177 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1179 int pos = BE_OUT_POS((int) nr);
1182 bitset_set(used_proj_nr, nr);
1183 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1184 pmap_insert(env->regs, (void *) reg, proj);
1185 be_set_constr_single_reg(env->reg_params, pos, reg);
1186 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1189 * If the register is an ignore register,
1190 * The Proj for that register shall also be ignored during register allocation.
1192 if(arch_register_type_is(reg, ignore))
1193 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1195 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1197 obstack_free(&env->obst, rm);
1199 /* Generate the Prologue */
1200 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1201 create_barrier(env, bl, &mem, env->regs, 0);
1203 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1204 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
1205 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1206 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1207 frame_pointer = be_abi_reg_map_get(env->regs, sp);
1208 set_irg_frame(irg, frame_pointer);
1210 /* Now, introduce stack param nodes for all parameters passed on the stack */
1211 for(i = 0; i < max_arg; ++i) {
1212 ir_node *arg_proj = args[i];
1213 ir_node *repl = NULL;
1215 if(arg_proj != NULL) {
1216 be_abi_call_arg_t *arg;
1217 ir_type *param_type;
1218 int nr = get_Proj_proj(arg_proj);
1220 nr = MIN(nr, n_params);
1221 arg = get_call_arg(call, 0, nr);
1222 param_type = get_method_param_type(method_type, nr);
1225 repl = pmap_get(env->regs, (void *) arg->reg);
1228 else if(arg->on_stack) {
1229 /* For atomic parameters which are actually used, we create a StackParam node. */
1230 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1231 ir_mode *mode = get_type_mode(param_type);
1232 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1233 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1236 /* The stack parameter is not primitive (it is a struct or array),
1237 we thus will create a node representing the parameter's address
1240 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1244 assert(repl != NULL);
1245 edges_reroute(args[i], repl, irg);
1249 /* All Return nodes hang on the End node, so look for them there. */
1250 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1251 ir_node *irn = get_irn_n(end, i);
1253 if(get_irn_opcode(irn) == iro_Return) {
1254 ir_node *bl = get_nodes_block(irn);
1255 int n_res = get_Return_n_ress(irn);
1256 pmap *reg_map = pmap_create();
1257 ir_node *mem = get_Return_mem(irn);
1262 const arch_register_t **regs;
1264 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1266 /* Insert results for Return into the register map. */
1267 for(i = 0; i < n_res; ++i) {
1268 ir_node *res = get_Return_res(irn, i);
1269 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1270 assert(arg->in_reg && "return value must be passed in register");
1271 pmap_insert(reg_map, (void *) arg->reg, res);
1274 /* Add uses of the callee save registers. */
1275 pmap_foreach(env->regs, ent) {
1276 const arch_register_t *reg = ent->key;
1277 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1278 pmap_insert(reg_map, ent->key, ent->value);
1281 /* Make the Epilogue node and call the arch's epilogue maker. */
1282 create_barrier(env, bl, &mem, reg_map, 1);
1283 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1286 Maximum size of the in array for Return nodes is
1287 return args + callee save/ignore registers + memory + stack pointer
1289 in_max = pmap_count(reg_map) + get_Return_n_ress(irn) + 2;
1291 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1292 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1295 in[1] = be_abi_reg_map_get(reg_map, sp);
1300 /* clear SP entry, since it has already been grown. */
1301 pmap_insert(reg_map, (void *) sp, NULL);
1302 for(i = 0; i < n_res; ++i) {
1303 ir_node *res = get_Return_res(irn, i);
1304 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1306 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1307 regs[n++] = arg->reg;
1309 /* Clear the map entry to mark the register as processed. */
1310 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1313 /* grow the rest of the stuff. */
1314 pmap_foreach(reg_map, ent) {
1317 regs[n++] = ent->key;
1321 /* The in array for the new back end return is now ready. */
1322 ret = be_new_Return(irg, bl, n, in);
1324 /* Set the register classes of the return's parameter accordingly. */
1325 for(i = 0; i < n; ++i)
1327 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1329 /* Free the space of the Epilog's in array and the register <-> proj map. */
1330 obstack_free(&env->obst, in);
1332 pmap_destroy(reg_map);
1336 obstack_free(&env->obst, args);
1340 * Walker: puts all Alloc(stack_alloc) on a obstack
1342 static void collect_alloca_walker(ir_node *irn, void *data)
1344 be_abi_irg_t *env = data;
1345 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1346 obstack_ptr_grow(&env->obst, irn);
1349 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1351 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1352 ir_node *old_frame = get_irg_frame(birg->irg);
1353 ir_graph *irg = birg->irg;
1358 env->isa = birg->main_env->arch_env->isa;
1359 env->method_type = get_entity_type(get_irg_entity(irg));
1360 env->call = be_abi_call_new();
1361 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1363 env->dce_survivor = new_survive_dce();
1365 env->dbg = firm_dbg_register("firm.be.abi");
1366 env->stack_phis = pset_new_ptr(16);
1367 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1369 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1371 obstack_init(&env->obst);
1373 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1374 env->irn_ops.impl = &abi_irn_ops;
1376 /* Lower all call nodes in the IRG. */
1379 /* Process the IRG */
1382 /* reroute the stack origin of the calls to the true stack origin. */
1383 edges_reroute(dummy, env->init_sp, irg);
1384 edges_reroute(old_frame, get_irg_frame(irg), irg);
1386 /* Make some important node pointers survive the dead node elimination. */
1387 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1388 pmap_foreach(env->regs, ent)
1389 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1391 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1393 env->call->cb->done(env->cb);
1398 void be_abi_free(be_abi_irg_t *env)
1400 free_survive_dce(env->dce_survivor);
1401 del_pset(env->stack_phis);
1402 pmap_destroy(env->regs);
1403 obstack_free(&env->obst, NULL);
1404 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1412 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1413 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1414 | _| | |> < ___) | || (_| | (__| <
1415 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1419 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1423 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1424 pset_insert_ptr(s, irn);
1427 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1429 dom_front_info_t *df;
1432 /* We need dominance frontiers for fix up */
1433 df = be_compute_dominance_frontiers(env->birg->irg);
1434 stack_nodes = pset_new_ptr(16);
1435 pset_insert_ptr(stack_nodes, env->init_sp);
1436 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1437 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1438 del_pset(stack_nodes);
1440 /* Liveness could have changed due to Phi nodes. */
1441 be_liveness(env->birg->irg);
1443 /* free these dominance frontiers */
1444 be_free_dominance_frontiers(df);
1448 * Translates a direction of an IncSP node (either be_stack_dir_against, or ...along)
1449 * into -1 or 1, respectively.
1450 * @param irn The node.
1451 * @return 1, if the direction of the IncSP was along, -1 if against.
1453 static int get_dir(ir_node *irn)
1455 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
1458 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1460 const arch_env_t *aenv = env->birg->main_env->arch_env;
1462 int start_bias = bias;
1463 int omit_fp = env->call->flags.bits.try_omit_fp;
1465 sched_foreach(bl, irn) {
1468 If the node modifies the stack pointer by a constant offset,
1469 record that in the bias.
1471 if(be_is_IncSP(irn)) {
1472 int ofs = be_get_IncSP_offset(irn);
1473 int dir = get_dir(irn);
1475 if(ofs == BE_STACK_FRAME_SIZE) {
1476 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1477 be_set_IncSP_offset(irn, ofs);
1485 Else check, if the node relates to an entity on the stack frame.
1486 If so, set the true offset (including the bias) for that
1490 entity *ent = arch_get_frame_entity(aenv, irn);
1492 int offset = get_stack_entity_offset(env->frame, ent, bias);
1493 arch_set_frame_offset(aenv, irn, offset);
1494 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1503 * A helper struct for the bias walker.
1506 be_abi_irg_t *env; /**< The ABI irg environment. */
1507 int start_block_bias; /**< The bias at the end of the start block. */
1510 static void stack_bias_walker(ir_node *bl, void *data)
1512 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1513 struct bias_walk *bw = data;
1514 process_stack_bias(bw->env, bl, bw->start_block_bias);
1518 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1520 ir_graph *irg = env->birg->irg;
1521 struct bias_walk bw;
1523 stack_frame_compute_initial_offset(env->frame);
1524 // stack_frame_dump(stdout, env->frame);
1526 /* Determine the stack bias at the and of the start block. */
1527 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1529 /* fix the bias is all other blocks */
1531 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1534 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1536 assert(arch_register_type_is(reg, callee_save));
1537 assert(pmap_contains(abi->regs, (void *) reg));
1538 return pmap_get(abi->regs, (void *) reg);
1542 _____ _____ _ _ _ _ _ _
1543 |_ _| __ \| \ | | | | | | | | |
1544 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1545 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1546 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1547 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1549 for Phi nodes which are created due to stack modifying nodes
1550 such as IncSP, AddSP and SetSP.
1552 These Phis are always to be ignored by the reg alloc and are
1553 fixed on the SP register of the ISA.
1556 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1558 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1559 const void *res = NULL;
1561 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1562 res = &abi->irn_ops;
1567 static void be_abi_limited(void *data, bitset_t *bs)
1569 be_abi_irg_t *abi = data;
1570 bitset_clear_all(bs);
1571 bitset_set(bs, abi->isa->sp->index);
1574 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1576 be_abi_irg_t *abi = get_abi_from_ops(self);
1577 const arch_register_t *reg = abi->isa->sp;
1579 memset(req, 0, sizeof(req[0]));
1581 if(pos == BE_OUT_POS(0)) {
1582 req->cls = reg->reg_class;
1583 req->type = arch_register_req_type_limited;
1584 req->limited = be_abi_limited;
1585 req->limited_env = abi;
1588 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1589 req->cls = reg->reg_class;
1590 req->type = arch_register_req_type_normal;
1596 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1600 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1602 const be_abi_irg_t *abi = get_abi_from_ops(self);
1603 return abi->isa->sp;
1606 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1608 return arch_irn_class_normal;
1611 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1613 return arch_irn_flags_ignore;
1616 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1621 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1625 static const arch_irn_ops_if_t abi_irn_ops = {
1626 abi_get_irn_reg_req,
1631 abi_get_frame_entity,
1635 static const arch_irn_handler_t abi_irn_handler = {