4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
32 #include "besched_t.h"
34 #define MAX(x, y) ((x) > (y) ? (x) : (y))
35 #define MIN(x, y) ((x) < (y) ? (x) : (y))
37 typedef struct _be_abi_call_arg_t {
40 unsigned on_stack : 1;
43 const arch_register_t *reg;
46 unsigned space_before;
50 struct _be_abi_call_t {
51 be_abi_call_flags_t flags;
52 const be_abi_callbacks_t *cb;
57 #define N_FRAME_TYPES 3
59 typedef struct _be_stack_frame_t {
64 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
70 struct _be_stack_slot_t {
71 struct _be_stack_frame_t *frame;
75 struct _be_abi_irg_t {
77 firm_dbg_module_t *dbg; /**< The debugging module. */
78 be_stack_frame_t *frame; /**< The stack frame model. */
79 const be_irg_t *birg; /**< The back end IRG. */
80 const arch_isa_t *isa; /**< The isa. */
81 survive_dce_t *dce_survivor;
83 be_abi_call_t *call; /**< The ABI call information. */
84 type *method_type; /**< The type of the method of the IRG. */
86 ir_node *init_sp; /**< The node representing the stack pointer
87 at the start of the function. */
89 ir_node *reg_params; /**< The reg params node. */
90 pmap *regs; /**< A map of all callee-save and ignore regs to
91 their Projs to the RegParams node. */
93 pset *stack_phis; /**< The set of all Phi nodes inserted due to
94 stack pointer modifying nodes. */
96 int start_block_bias; /**< The stack bias at the end of the start block. */
98 void *cb; /**< ABI Callback self pointer. */
100 arch_irn_handler_t irn_handler;
101 arch_irn_ops_t irn_ops;
104 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
105 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
107 /* Forward, since be need it in be_abi_introduce(). */
108 static const arch_irn_ops_if_t abi_irn_ops;
109 static const arch_irn_handler_t abi_irn_handler;
112 _ ____ ___ ____ _ _ _ _
113 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
114 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
115 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
116 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
118 These callbacks are used by the backend to set the parameters
119 for a specific call type.
122 static int cmp_call_arg(const void *a, const void *b, size_t n)
124 const be_abi_call_arg_t *p = a, *q = b;
125 return !(p->is_res == q->is_res && p->pos == q->pos);
128 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
130 be_abi_call_arg_t arg;
133 memset(&arg, 0, sizeof(arg));
137 hash = is_res * 100 + pos;
140 ? set_insert(call->params, &arg, sizeof(arg), hash)
141 : set_find(call->params, &arg, sizeof(arg), hash);
144 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
146 return get_or_set_call_arg(call, is_res, pos, 0);
149 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
155 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
157 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
159 arg->alignment = alignment;
160 arg->space_before = space_before;
161 arg->space_after = space_after;
162 assert(alignment > 0 && "Alignment must be greater than 0");
165 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
167 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
172 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
174 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
179 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
184 be_abi_call_t *be_abi_call_new(void)
186 be_abi_call_t *call = xmalloc(sizeof(call[0]));
188 call->params = new_set(cmp_call_arg, 16);
193 void be_abi_call_free(be_abi_call_t *call)
195 del_set(call->params);
201 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
202 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
203 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
204 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
207 Handling of the stack frame. It is composed of three types:
208 1) The type of the arguments which are pushed on the stack.
209 2) The "between type" which consists of stuff the call of the
210 function pushes on the stack (like the return address and
211 the old base pointer for ia32).
212 3) The Firm frame type which consists of all local variables
216 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
218 type *t = get_entity_owner(ent);
219 int ofs = get_entity_offset_bytes(ent);
223 /* Find the type the entity is contained in. */
224 for(index = 0; index < N_FRAME_TYPES; ++index) {
225 if(frame->order[index] == t)
229 /* Add the size of all the types below the one of the entity to the entity's offset */
230 for(i = 0; i < index; ++i)
231 ofs += get_type_size_bytes(frame->order[i]);
233 /* correct the offset by the initial position of the frame pointer */
234 ofs -= frame->initial_offset;
236 /* correct the offset with the current bias. */
242 static entity *search_ent_with_offset(type *t, int offset)
246 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
247 entity *ent = get_class_member(t, i);
248 if(get_entity_offset_bytes(ent) == offset)
255 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
257 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
258 entity *ent = search_ent_with_offset(base, 0);
259 frame->initial_offset = 0;
260 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
261 return frame->initial_offset;
264 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
266 frame->arg_type = args;
267 frame->between_type = between;
268 frame->frame_type = locals;
269 frame->initial_offset = 0;
270 frame->stack_dir = stack_dir;
271 frame->order[1] = between;
274 frame->order[0] = args;
275 frame->order[2] = locals;
279 frame->order[0] = locals;
280 frame->order[2] = args;
286 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
290 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
291 for(j = 0; j < N_FRAME_TYPES; ++j) {
292 type *t = frame->order[j];
294 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
295 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
296 entity *ent = get_class_member(t, i);
297 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
303 * If irn is a Sel node computing the address of an entity
304 * on the frame type return the entity, else NULL.
306 static INLINE entity *get_sel_ent(ir_node *irn)
308 if(get_irn_opcode(irn) == iro_Sel
309 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
311 return get_Sel_entity(irn);
318 * Walker: Replaces Loads, Stores and Sels of frame type entities
319 * by FrameLoad, FrameStore and FrameAdress.
321 static void lower_frame_sels_walker(ir_node *irn, void *data)
324 entity *ent = get_sel_ent(irn);
327 be_abi_irg_t *env = data;
328 ir_node *bl = get_nodes_block(irn);
329 ir_graph *irg = get_irn_irg(bl);
330 ir_node *frame = get_irg_frame(irg);
332 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
339 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
341 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
342 return arg && !arg->in_reg;
352 Adjustment of the calls inside a graph.
357 * Transform a call node.
358 * @param env The ABI environment for the current irg.
359 * @param irn The call node.
360 * @param curr_sp The stack pointer node to use.
361 * @return The stack pointer after the call.
363 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
365 ir_graph *irg = env->birg->irg;
366 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
367 be_abi_call_t *call = be_abi_call_new();
368 ir_type *mt = get_Call_type(irn);
369 ir_node *call_ptr = get_Call_ptr(irn);
370 int n_params = get_method_n_params(mt);
371 ir_node *curr_mem = get_Call_mem(irn);
372 ir_node *bl = get_nodes_block(irn);
373 pset *results = pset_new_ptr(8);
374 pset *caller_save = pset_new_ptr(8);
376 int stack_dir = arch_isa_stack_dir(isa);
377 const arch_register_t *sp = arch_isa_sp(isa);
378 ir_mode *mach_mode = sp->reg_class->mode;
379 struct obstack *obst = &env->obst;
380 ir_node *no_mem = get_irg_no_mem(irg);
381 int no_alloc = call->flags.bits.frame_is_setup_on_call;
383 ir_node *res_proj = NULL;
384 int curr_res_proj = pn_Call_max;
391 const ir_edge_t *edge;
396 /* Let the isa fill out the abi description for that call node. */
397 arch_isa_get_call_abi(isa, mt, call);
399 /* Insert code to put the stack arguments on the stack. */
400 assert(get_Call_n_params(irn) == n_params);
401 for(i = 0; i < n_params; ++i) {
402 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
405 stack_size += arg->space_before;
406 stack_size = round_up2(stack_size, arg->alignment);
407 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
408 stack_size += arg->space_after;
409 obstack_int_grow(obst, i);
413 pos = obstack_finish(obst);
415 /* Collect all arguments which are passed in registers. */
416 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
417 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
418 if(arg && arg->in_reg) {
419 obstack_int_grow(obst, i);
423 low_args = obstack_finish(obst);
425 /* If there are some parameters which shall be passed on the stack. */
428 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
430 /* Reverse list of stack parameters if call arguments are from left to right */
431 if(call->flags.bits.left_to_right) {
432 for(i = 0; i < n_pos / 2; ++i) {
433 int other = n_pos - i - 1;
441 * If the stack is decreasing and we do not want to store sequentially,
442 * or someone else allocated the call frame
443 * we allocate as much space on the stack all parameters need, by
444 * moving the stack pointer along the stack's direction.
446 if(stack_dir < 0 && !do_seq && !no_alloc) {
447 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
450 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
451 for(i = 0; i < n_pos; ++i) {
453 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
454 ir_node *param = get_Call_param(irn, p);
455 ir_node *addr = curr_sp;
457 type *param_type = get_method_param_type(mt, p);
458 int param_size = get_type_size_bytes(param_type) + arg->space_after;
460 curr_ofs += arg->space_before;
461 curr_ofs = round_up2(curr_ofs, arg->alignment);
463 /* Make the expression to compute the argument's offset. */
465 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
466 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
469 /* Insert a store for primitive arguments. */
470 if(is_atomic_type(param_type)) {
471 mem = new_r_Store(irg, bl, curr_mem, addr, param);
472 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
475 /* Make a mem copy for compound arguments. */
477 assert(mode_is_reference(get_irn_mode(param)));
478 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
479 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
482 obstack_ptr_grow(obst, mem);
484 curr_ofs += param_size;
487 * If we wanted to build the arguments sequentially,
488 * the stack pointer for the next must be incremented,
489 * and the memory value propagated.
493 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
498 in = (ir_node **) obstack_finish(obst);
500 /* We need the sync only, if we didn't build the stores sequentially. */
502 curr_mem = new_r_Sync(irg, bl, n_pos, in);
503 obstack_free(obst, in);
506 /* Collect caller save registers */
507 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
509 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
510 for(j = 0; j < cls->n_regs; ++j) {
511 const arch_register_t *reg = arch_register_for_index(cls, j);
512 if(arch_register_type_is(reg, caller_save))
513 pset_insert_ptr(caller_save, (void *) reg);
517 /* search the greatest result proj number */
518 foreach_out_edge(irn, edge) {
519 const ir_edge_t *res_edge;
520 ir_node *irn = get_edge_src_irn(edge);
522 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
524 foreach_out_edge(irn, res_edge) {
526 be_abi_call_arg_t *arg;
527 ir_node *res = get_edge_src_irn(res_edge);
529 assert(is_Proj(res));
531 proj = get_Proj_proj(res);
532 arg = get_call_arg(call, 1, proj);
535 shift the proj number to the right, since we will drop the
536 unspeakable Proj_T from the Call. Therefore, all real argument
537 Proj numbers must be increased by pn_Call_max
540 set_Proj_proj(res, proj);
541 obstack_ptr_grow(obst, res);
543 if(proj > curr_res_proj)
544 curr_res_proj = proj;
546 pset_remove_ptr(caller_save, arg->reg);
547 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
554 obstack_ptr_grow(obst, NULL);
555 res_projs = obstack_finish(obst);
557 /* make the back end call node and set its register requirements. */
558 for(i = 0; i < n_low_args; ++i)
559 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
561 in = obstack_finish(obst);
563 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
564 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, curr_sp, curr_res_proj + pset_count(caller_save), n_low_args, in);
565 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
569 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, call_ptr, curr_res_proj + pset_count(caller_save), n_low_args, in);
573 Set the register class of the call address to the same as the stack pointer's.
574 That' probably buggy for some architectures.
576 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
578 /* Set the register classes and constraints of the Call parameters. */
579 for(i = 0; i < n_low_args; ++i) {
580 int index = low_args[i];
581 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
582 assert(arg->reg != NULL);
584 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
587 /* Set the register constraints of the results. */
588 for(i = 0; res_projs[i]; ++i) {
589 ir_node *irn = res_projs[i];
590 int proj = get_Proj_proj(irn);
592 /* Correct Proj number since it has been adjusted! (see above) */
593 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
596 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
598 obstack_free(obst, in);
599 exchange(irn, low_call);
601 /* redirect the result projs to the lowered call instead of the Proj_T */
602 for(i = 0; res_projs[i]; ++i)
603 set_Proj_pred(res_projs[i], low_call);
605 /* Make additional projs for the caller save registers
606 and the Keep node which keeps them alive. */
607 if(pset_count(caller_save) > 0) {
608 const arch_register_t *reg;
612 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
613 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
615 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
616 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
617 set_irn_link(proj, (void *) reg);
618 obstack_ptr_grow(obst, proj);
622 in = (ir_node **) obstack_finish(obst);
623 keep = be_new_Keep(NULL, irg, bl, n, in);
624 for(i = 0; i < n; ++i) {
625 const arch_register_t *reg = get_irn_link(in[i]);
626 be_node_set_reg_class(keep, i, reg->reg_class);
628 obstack_free(obst, in);
631 /* Clean up the stack. */
633 ir_node *mem_proj = NULL;
635 foreach_out_edge(low_call, edge) {
636 ir_node *irn = get_edge_src_irn(edge);
637 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
644 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
646 /* Clean up the stack frame if we allocated it */
648 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_against);
651 be_abi_call_free(call);
652 obstack_free(obst, pos);
654 del_pset(caller_save);
661 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
663 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
665 if(get_Alloc_where(alloc) == stack_alloc) {
666 ir_node *bl = get_nodes_block(alloc);
667 ir_graph *irg = get_irn_irg(bl);
668 ir_node *alloc_mem = NULL;
669 ir_node *alloc_res = NULL;
671 const ir_edge_t *edge;
674 env->call->flags.bits.try_omit_fp = 0;
676 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
678 foreach_out_edge(alloc, edge) {
679 ir_node *irn = get_edge_src_irn(edge);
681 assert(is_Proj(irn));
682 switch(get_Proj_proj(irn)) {
694 assert(alloc_res != NULL);
695 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
697 if(alloc_mem != NULL)
698 exchange(alloc_mem, new_r_NoMem(irg));
707 * Walker for dependent_on().
708 * This function searches a node tgt recursively from a given node
709 * but is restricted to the given block.
710 * @return 1 if tgt was reachable from curr, 0 if not.
712 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
716 if(get_irn_visited(curr) >= visited_nr)
719 set_irn_visited(curr, visited_nr);
720 if(get_nodes_block(curr) != bl)
726 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
727 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
735 * Check if a node is somehow data dependent on another one.
736 * both nodes must be in the same basic block.
737 * @param n1 The first node.
738 * @param n2 The second node.
739 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
741 static int dependent_on(ir_node *n1, ir_node *n2)
743 ir_node *bl = get_nodes_block(n1);
744 ir_graph *irg = get_irn_irg(bl);
745 long vis_nr = get_irg_visited(irg) + 1;
747 assert(bl == get_nodes_block(n2));
748 set_irg_visited(irg, vis_nr);
749 return check_dependence(n1, n2, bl, vis_nr);
752 static int cmp_call_dependecy(const void *c1, const void *c2)
754 ir_node *n1 = *(ir_node **) c1;
755 ir_node *n2 = *(ir_node **) c2;
758 Classical qsort() comparison function behavior:
759 0 if both elements are equal
760 1 if second is "smaller" that first
761 -1 if first is "smaller" that second
763 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
766 static void link_calls_in_block_walker(ir_node *irn, void *data)
769 be_abi_irg_t *env = data;
770 ir_node *bl = get_nodes_block(irn);
771 void *save = get_irn_link(bl);
773 env->call->flags.bits.irg_is_leaf = 0;
775 set_irn_link(irn, save);
776 set_irn_link(bl, irn);
781 * Process all call nodes inside a basic block.
782 * Note that the link field of the block must contain a linked list of all
783 * Call nodes inside the block. We first order this list according to data dependency
784 * and that connect the calls together.
786 static void process_calls_in_block(ir_node *bl, void *data)
788 be_abi_irg_t *env = data;
789 ir_node *curr_sp = env->init_sp;
793 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
794 obstack_ptr_grow(&env->obst, irn);
796 /* If there were call nodes in the block. */
801 nodes = obstack_finish(&env->obst);
803 /* order the call nodes according to data dependency */
804 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
806 for(i = n - 1; i >= 0; --i) {
807 ir_node *irn = nodes[i];
809 switch(get_irn_opcode(irn)) {
811 curr_sp = adjust_call(env, irn, curr_sp);
814 curr_sp = adjust_alloc(env, irn, curr_sp);
821 obstack_free(&env->obst, nodes);
823 /* Keep the last stack state in the block by tying it to Keep node */
825 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
828 set_irn_link(bl, curr_sp);
832 * Adjust all call nodes in the graph to the ABI conventions.
834 static void process_calls(be_abi_irg_t *env)
836 ir_graph *irg = env->birg->irg;
838 env->call->flags.bits.irg_is_leaf = 1;
839 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
840 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
843 static void collect_return_walker(ir_node *irn, void *data)
845 if(get_irn_opcode(irn) == iro_Return) {
846 struct obstack *obst = data;
847 obstack_ptr_grow(obst, irn);
851 static ir_node *setup_frame(be_abi_irg_t *env)
853 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
854 const arch_register_t *sp = isa->sp;
855 const arch_register_t *bp = isa->bp;
856 be_abi_call_flags_bits_t flags = env->call->flags.bits;
857 ir_graph *irg = env->birg->irg;
858 ir_node *bl = get_irg_start_block(irg);
859 ir_node *no_mem = get_irg_no_mem(irg);
860 ir_node *old_frame = get_irg_frame(irg);
861 ir_node *stack = pmap_get(env->regs, (void *) sp);
862 ir_node *frame = pmap_get(env->regs, (void *) bp);
864 int stack_nr = get_Proj_proj(stack);
866 if(flags.try_omit_fp) {
867 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
872 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
874 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
876 be_set_constr_single_reg(frame, -1, bp);
877 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
878 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
881 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_along);
884 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
885 env->init_sp = stack;
886 set_irg_frame(irg, frame);
887 edges_reroute(old_frame, frame, irg);
892 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
894 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
895 const arch_register_t *sp = isa->sp;
896 const arch_register_t *bp = isa->bp;
897 ir_graph *irg = env->birg->irg;
898 ir_node *ret_mem = get_Return_mem(ret);
899 ir_node *frame = get_irg_frame(irg);
900 ir_node *bl = get_nodes_block(ret);
901 ir_node *stack = get_irn_link(bl);
905 if(env->call->flags.bits.try_omit_fp) {
906 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
910 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
911 be_set_constr_single_reg(stack, -1, sp);
912 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
915 pmap_foreach(env->regs, ent) {
916 const arch_register_t *reg = ent->key;
917 ir_node *irn = ent->value;
920 obstack_ptr_grow(&env->obst, stack);
922 obstack_ptr_grow(&env->obst, frame);
923 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
924 obstack_ptr_grow(obst, irn);
928 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
930 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
931 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
932 int n = get_method_n_params(method_type);
933 int curr = inc > 0 ? 0 : n - 1;
940 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
941 res = new_type_class(new_id_from_str(buf));
943 for(i = 0; i < n; ++i, curr += inc) {
944 type *param_type = get_method_param_type(method_type, curr);
945 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
948 snprintf(buf, sizeof(buf), "param_%d", i);
949 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
950 ofs += arg->space_before;
951 ofs = round_up2(ofs, arg->alignment);
952 set_entity_offset_bytes(arg->stack_ent, ofs);
953 ofs += arg->space_after;
954 ofs += get_type_size_bytes(param_type);
958 set_type_size_bytes(res, ofs);
962 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
969 /* Create a Perm after the RegParams node to delimit it. */
970 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
971 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
976 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
977 const arch_register_t *reg = &cls->regs[j];
978 ir_node *irn = pmap_get(regs, (void *) reg);
980 if(irn && !arch_register_type_is(reg, ignore)) {
982 obstack_ptr_grow(&obst, irn);
983 set_irn_link(irn, (void *) reg);
987 obstack_ptr_grow(&obst, NULL);
988 in = obstack_finish(&obst);
990 perm = be_new_Perm(cls, irg, bl, n_regs, in);
991 for(j = 0; j < n_regs; ++j) {
992 ir_node *arg = in[j];
993 arch_register_t *reg = get_irn_link(arg);
994 pmap_insert(regs, reg, arg);
995 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
998 obstack_free(&obst, in);
1001 obstack_free(&obst, NULL);
1005 const arch_register_t *reg;
1009 static int cmp_regs(const void *a, const void *b)
1011 const reg_node_map_t *p = a;
1012 const reg_node_map_t *q = b;
1014 if(p->reg->reg_class == q->reg->reg_class)
1015 return p->reg->index - q->reg->index;
1017 return p->reg->reg_class - q->reg->reg_class;
1020 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1023 int n = pmap_count(reg_map);
1025 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1027 pmap_foreach(reg_map, ent) {
1028 res[i].reg = ent->key;
1029 res[i].irn = ent->value;
1033 qsort(res, n, sizeof(res[0]), cmp_regs);
1037 static void create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1039 ir_graph *irg = env->birg->irg;
1041 int n_regs = pmap_count(regs);
1046 rm = reg_map_to_arr(&env->obst, regs);
1048 for(n = 0; n < n_regs; ++n)
1049 obstack_ptr_grow(&env->obst, rm[n].irn);
1052 obstack_ptr_grow(&env->obst, *mem);
1056 in = (ir_node **) obstack_finish(&env->obst);
1057 irn = be_new_Barrier(env->birg->irg, bl, n, in);
1058 obstack_free(&env->obst, in);
1060 for(n = 0; n < n_regs; ++n) {
1061 int pos = BE_OUT_POS(n);
1063 const arch_register_t *reg = rm[n].reg;
1065 proj = new_r_Proj(env->birg->irg, bl, irn, get_irn_mode(rm[n].irn), n);
1066 be_node_set_reg_class(irn, n, reg->reg_class);
1068 be_set_constr_single_reg(irn, n, reg);
1069 be_set_constr_single_reg(irn, pos, reg);
1070 be_node_set_reg_class(irn, pos, reg->reg_class);
1071 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1072 if(arch_register_type_is(reg, ignore))
1073 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
1075 pmap_insert(regs, (void *) reg, proj);
1079 *mem = new_r_Proj(env->birg->irg, bl, irn, mode_M, n);
1082 obstack_free(&env->obst, rm);
1086 * Modify the irg itself and the frame type.
1088 static void modify_irg(be_abi_irg_t *env)
1090 firm_dbg_module_t *dbg = env->dbg;
1091 be_abi_call_t *call = env->call;
1092 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1093 const arch_register_t *sp = arch_isa_sp(isa);
1094 ir_graph *irg = env->birg->irg;
1095 ir_node *bl = get_irg_start_block(irg);
1096 ir_node *end = get_irg_end_block(irg);
1097 ir_node *arg_tuple = get_irg_args(irg);
1098 ir_node *no_mem = get_irg_no_mem(irg);
1099 ir_node *mem = get_irg_initial_mem(irg);
1100 type *method_type = get_entity_type(get_irg_entity(irg));
1101 pset *dont_save = pset_new_ptr(8);
1102 pmap *reg_proj_map = pmap_create();
1103 int n_params = get_method_n_params(method_type);
1110 const arch_register_t *fp_reg;
1111 ir_node *frame_pointer;
1112 ir_node *reg_params_bl;
1114 const ir_edge_t *edge;
1115 ir_type *arg_type, *bet_type;
1118 bitset_t *used_proj_nr;
1120 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1122 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1123 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1125 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1126 env->regs = pmap_create();
1128 /* Find the maximum proj number of the argument tuple proj */
1129 foreach_out_edge(arg_tuple, edge) {
1130 ir_node *irn = get_edge_src_irn(edge);
1131 int nr = get_Proj_proj(irn);
1132 max_arg = MAX(max_arg, nr);
1134 max_arg = MAX(max_arg + 1, n_params);
1135 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1136 memset(args, 0, max_arg * sizeof(args[0]));
1137 used_proj_nr = bitset_alloca(1024);
1139 /* Fill the argument vector */
1140 foreach_out_edge(arg_tuple, edge) {
1141 ir_node *irn = get_edge_src_irn(edge);
1142 int nr = get_Proj_proj(irn);
1144 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1147 arg_type = compute_arg_type(env, call, method_type);
1148 bet_type = call->cb->get_between_type(env->cb);
1149 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1151 /* Count the register params and add them to the number of Projs for the RegParams node */
1152 for(i = 0; i < n_params; ++i) {
1153 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1154 if(arg->in_reg && args[i]) {
1155 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1156 assert(i == get_Proj_proj(args[i]));
1158 /* For now, associate the register with the old Proj from Start representing that argument. */
1159 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1160 bitset_set(used_proj_nr, i);
1161 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1165 /* Collect all callee-save registers */
1166 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1167 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1168 for(j = 0; j < cls->n_regs; ++j) {
1169 const arch_register_t *reg = &cls->regs[j];
1170 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1171 pmap_insert(env->regs, (void *) reg, NULL);
1175 pmap_insert(env->regs, (void *) sp, NULL);
1176 pmap_insert(env->regs, (void *) isa->bp, NULL);
1177 reg_params_bl = get_irg_start_block(irg);
1178 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1181 * make proj nodes for the callee save registers.
1182 * memorize them, since Return nodes get those as inputs.
1184 * Note, that if a register corresponds to an argument, the regs map contains
1185 * the old Proj from start for that argument.
1188 rm = reg_map_to_arr(&env->obst, env->regs);
1189 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1190 arch_register_t *reg = (void *) rm[i].reg;
1191 ir_node *arg_proj = rm[i].irn;
1193 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1195 int pos = BE_OUT_POS((int) nr);
1198 bitset_set(used_proj_nr, nr);
1199 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1200 pmap_insert(env->regs, (void *) reg, proj);
1201 be_set_constr_single_reg(env->reg_params, pos, reg);
1202 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1205 * If the register is an ignore register,
1206 * The Proj for that register shall also be ignored during register allocation.
1208 if(arch_register_type_is(reg, ignore))
1209 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1211 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1213 obstack_free(&env->obst, rm);
1215 /* Generate the Prologue */
1216 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1217 create_barrier(env, bl, &mem, env->regs, 0);
1219 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1220 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
1221 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1222 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1223 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1224 set_irg_frame(irg, frame_pointer);
1226 /* Now, introduce stack param nodes for all parameters passed on the stack */
1227 for(i = 0; i < max_arg; ++i) {
1228 ir_node *arg_proj = args[i];
1229 ir_node *repl = NULL;
1231 if(arg_proj != NULL) {
1232 be_abi_call_arg_t *arg;
1233 ir_type *param_type;
1234 int nr = get_Proj_proj(arg_proj);
1236 nr = MIN(nr, n_params);
1237 arg = get_call_arg(call, 0, nr);
1238 param_type = get_method_param_type(method_type, nr);
1241 repl = pmap_get(env->regs, (void *) arg->reg);
1244 else if(arg->on_stack) {
1245 /* For atomic parameters which are actually used, we create a StackParam node. */
1246 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1247 ir_mode *mode = get_type_mode(param_type);
1248 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1249 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1252 /* The stack parameter is not primitive (it is a struct or array),
1253 we thus will create a node representing the parameter's address
1256 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1260 assert(repl != NULL);
1261 edges_reroute(args[i], repl, irg);
1265 /* All Return nodes hang on the End node, so look for them there. */
1266 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1267 ir_node *irn = get_irn_n(end, i);
1269 if(get_irn_opcode(irn) == iro_Return) {
1270 ir_node *bl = get_nodes_block(irn);
1271 int n_res = get_Return_n_ress(irn);
1272 pmap *reg_map = pmap_create();
1273 ir_node *mem = get_Return_mem(irn);
1278 const arch_register_t **regs;
1280 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1282 /* Insert results for Return into the register map. */
1283 for(i = 0; i < n_res; ++i) {
1284 ir_node *res = get_Return_res(irn, i);
1285 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1286 assert(arg->in_reg && "return value must be passed in register");
1287 pmap_insert(reg_map, (void *) arg->reg, res);
1290 /* Add uses of the callee save registers. */
1291 pmap_foreach(env->regs, ent) {
1292 const arch_register_t *reg = ent->key;
1293 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1294 pmap_insert(reg_map, ent->key, ent->value);
1297 /* Make the Epilogue node and call the arch's epilogue maker. */
1298 create_barrier(env, bl, &mem, reg_map, 1);
1299 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1302 Maximum size of the in array for Return nodes is
1303 return args + callee save/ignore registers + memory + stack pointer
1305 in_max = pmap_count(reg_map) + get_Return_n_ress(irn) + 2;
1307 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1308 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1311 in[1] = be_abi_reg_map_get(reg_map, sp);
1316 /* clear SP entry, since it has already been grown. */
1317 pmap_insert(reg_map, (void *) sp, NULL);
1318 for(i = 0; i < n_res; ++i) {
1319 ir_node *res = get_Return_res(irn, i);
1320 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1322 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1323 regs[n++] = arg->reg;
1325 /* Clear the map entry to mark the register as processed. */
1326 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1329 /* grow the rest of the stuff. */
1330 pmap_foreach(reg_map, ent) {
1333 regs[n++] = ent->key;
1337 /* The in array for the new back end return is now ready. */
1338 ret = be_new_Return(irg, bl, n, in);
1340 /* Set the register classes of the return's parameter accordingly. */
1341 for(i = 0; i < n; ++i)
1343 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1345 /* Free the space of the Epilog's in array and the register <-> proj map. */
1346 obstack_free(&env->obst, in);
1348 pmap_destroy(reg_map);
1352 del_pset(dont_save);
1353 obstack_free(&env->obst, args);
1357 * Walker: puts all Alloc(stack_alloc) on a obstack
1359 static void collect_alloca_walker(ir_node *irn, void *data)
1361 be_abi_irg_t *env = data;
1362 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1363 obstack_ptr_grow(&env->obst, irn);
1366 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1368 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1369 ir_node *old_frame = get_irg_frame(birg->irg);
1370 ir_graph *irg = birg->irg;
1375 env->isa = birg->main_env->arch_env->isa;
1376 env->method_type = get_entity_type(get_irg_entity(irg));
1377 env->call = be_abi_call_new();
1378 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1380 env->dce_survivor = new_survive_dce();
1382 env->dbg = firm_dbg_register("firm.be.abi");
1383 env->stack_phis = pset_new_ptr(16);
1384 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1386 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1388 obstack_init(&env->obst);
1390 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1391 env->irn_ops.impl = &abi_irn_ops;
1393 /* Lower all call nodes in the IRG. */
1396 /* Process the IRG */
1399 /* reroute the stack origin of the calls to the true stack origin. */
1400 edges_reroute(dummy, env->init_sp, irg);
1401 edges_reroute(old_frame, get_irg_frame(irg), irg);
1403 /* Make some important node pointers survive the dead node elimination. */
1404 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1405 pmap_foreach(env->regs, ent)
1406 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1408 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1410 env->call->cb->done(env->cb);
1415 void be_abi_free(be_abi_irg_t *env)
1417 free_survive_dce(env->dce_survivor);
1418 del_pset(env->stack_phis);
1419 pmap_destroy(env->regs);
1420 obstack_free(&env->obst, NULL);
1421 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1429 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1430 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1431 | _| | |> < ___) | || (_| | (__| <
1432 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1436 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1440 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1441 pset_insert_ptr(s, irn);
1444 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1446 dom_front_info_t *df;
1449 /* We need dominance frontiers for fix up */
1450 df = be_compute_dominance_frontiers(env->birg->irg);
1451 stack_nodes = pset_new_ptr(16);
1452 pset_insert_ptr(stack_nodes, env->init_sp);
1453 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1454 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1455 del_pset(stack_nodes);
1457 /* Liveness could have changed due to Phi nodes. */
1458 be_liveness(env->birg->irg);
1460 /* free these dominance frontiers */
1461 be_free_dominance_frontiers(df);
1465 * Translates a direction of an IncSP node (either be_stack_dir_against, or ...along)
1466 * into -1 or 1, respectively.
1467 * @param irn The node.
1468 * @return 1, if the direction of the IncSP was along, -1 if against.
1470 static int get_dir(ir_node *irn)
1472 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
1475 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1477 const arch_env_t *aenv = env->birg->main_env->arch_env;
1479 int start_bias = bias;
1480 int omit_fp = env->call->flags.bits.try_omit_fp;
1482 sched_foreach(bl, irn) {
1485 If the node modifies the stack pointer by a constant offset,
1486 record that in the bias.
1488 if(be_is_IncSP(irn)) {
1489 int ofs = be_get_IncSP_offset(irn);
1490 int dir = get_dir(irn);
1492 if(ofs == BE_STACK_FRAME_SIZE) {
1493 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1494 be_set_IncSP_offset(irn, ofs);
1502 Else check, if the node relates to an entity on the stack frame.
1503 If so, set the true offset (including the bias) for that
1507 entity *ent = arch_get_frame_entity(aenv, irn);
1509 int offset = get_stack_entity_offset(env->frame, ent, bias);
1510 arch_set_frame_offset(aenv, irn, offset);
1511 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1520 * A helper struct for the bias walker.
1523 be_abi_irg_t *env; /**< The ABI irg environment. */
1524 int start_block_bias; /**< The bias at the end of the start block. */
1527 static void stack_bias_walker(ir_node *bl, void *data)
1529 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1530 struct bias_walk *bw = data;
1531 process_stack_bias(bw->env, bl, bw->start_block_bias);
1535 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1537 ir_graph *irg = env->birg->irg;
1538 struct bias_walk bw;
1540 stack_frame_compute_initial_offset(env->frame);
1541 // stack_frame_dump(stdout, env->frame);
1543 /* Determine the stack bias at the and of the start block. */
1544 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1546 /* fix the bias is all other blocks */
1548 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1551 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1553 assert(arch_register_type_is(reg, callee_save));
1554 assert(pmap_contains(abi->regs, (void *) reg));
1555 return pmap_get(abi->regs, (void *) reg);
1559 _____ _____ _ _ _ _ _ _
1560 |_ _| __ \| \ | | | | | | | | |
1561 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1562 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1563 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1564 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1566 for Phi nodes which are created due to stack modifying nodes
1567 such as IncSP, AddSP and SetSP.
1569 These Phis are always to be ignored by the reg alloc and are
1570 fixed on the SP register of the ISA.
1573 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1575 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1576 const void *res = NULL;
1578 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1579 res = &abi->irn_ops;
1584 static void be_abi_limited(void *data, bitset_t *bs)
1586 be_abi_irg_t *abi = data;
1587 bitset_clear_all(bs);
1588 bitset_set(bs, abi->isa->sp->index);
1591 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1593 be_abi_irg_t *abi = get_abi_from_ops(self);
1594 const arch_register_t *reg = abi->isa->sp;
1596 memset(req, 0, sizeof(req[0]));
1598 if(pos == BE_OUT_POS(0)) {
1599 req->cls = reg->reg_class;
1600 req->type = arch_register_req_type_limited;
1601 req->limited = be_abi_limited;
1602 req->limited_env = abi;
1605 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1606 req->cls = reg->reg_class;
1607 req->type = arch_register_req_type_normal;
1613 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1617 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1619 const be_abi_irg_t *abi = get_abi_from_ops(self);
1620 return abi->isa->sp;
1623 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1625 return arch_irn_class_normal;
1628 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1630 return arch_irn_flags_ignore;
1633 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1638 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1642 static const arch_irn_ops_if_t abi_irn_ops = {
1643 abi_get_irn_reg_req,
1648 abi_get_frame_entity,
1652 static const arch_irn_handler_t abi_irn_handler = {