4 * @author Sebastian Hack
18 #include "irgraph_t.h"
21 #include "iredges_t.h"
24 #include "irprintf_t.h"
32 #include "besched_t.h"
34 #define MAX(x, y) ((x) > (y) ? (x) : (y))
35 #define MIN(x, y) ((x) < (y) ? (x) : (y))
37 typedef struct _be_abi_call_arg_t {
40 unsigned on_stack : 1;
43 const arch_register_t *reg;
46 unsigned space_before;
50 struct _be_abi_call_t {
51 be_abi_call_flags_t flags;
52 const be_abi_callbacks_t *cb;
57 #define N_FRAME_TYPES 3
59 typedef struct _be_stack_frame_t {
64 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
70 struct _be_stack_slot_t {
71 struct _be_stack_frame_t *frame;
75 struct _be_abi_irg_t {
77 firm_dbg_module_t *dbg; /**< The debugging module. */
78 be_stack_frame_t *frame; /**< The stack frame model. */
79 const be_irg_t *birg; /**< The back end IRG. */
80 const arch_isa_t *isa; /**< The isa. */
81 survive_dce_t *dce_survivor;
83 be_abi_call_t *call; /**< The ABI call information. */
84 type *method_type; /**< The type of the method of the IRG. */
86 ir_node *init_sp; /**< The node representing the stack pointer
87 at the start of the function. */
89 ir_node *reg_params; /**< The reg params node. */
90 pmap *regs; /**< A map of all callee-save and ignore regs to
91 their Projs to the RegParams node. */
93 pset *stack_phis; /**< The set of all Phi nodes inserted due to
94 stack pointer modifying nodes. */
96 int start_block_bias; /**< The stack bias at the end of the start block. */
98 void *cb; /**< ABI Callback self pointer. */
100 arch_irn_handler_t irn_handler;
101 arch_irn_ops_t irn_ops;
104 #define get_abi_from_handler(ptr) firm_container_of(ptr, be_abi_irg_t, irn_handler)
105 #define get_abi_from_ops(ptr) firm_container_of(ptr, be_abi_irg_t, irn_ops)
107 /* Forward, since be need it in be_abi_introduce(). */
108 static const arch_irn_ops_if_t abi_irn_ops;
109 static const arch_irn_handler_t abi_irn_handler;
112 _ ____ ___ ____ _ _ _ _
113 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
114 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
115 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
116 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
118 These callbacks are used by the backend to set the parameters
119 for a specific call type.
122 static int cmp_call_arg(const void *a, const void *b, size_t n)
124 const be_abi_call_arg_t *p = a, *q = b;
125 return !(p->is_res == q->is_res && p->pos == q->pos);
128 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
130 be_abi_call_arg_t arg;
133 memset(&arg, 0, sizeof(arg));
137 hash = is_res * 100 + pos;
140 ? set_insert(call->params, &arg, sizeof(arg), hash)
141 : set_find(call->params, &arg, sizeof(arg), hash);
144 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
146 return get_or_set_call_arg(call, is_res, pos, 0);
149 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
155 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
157 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
159 arg->alignment = alignment;
160 arg->space_before = space_before;
161 arg->space_after = space_after;
162 assert(alignment > 0 && "Alignment must be greater than 0");
165 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
167 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
172 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
174 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
179 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
184 be_abi_call_t *be_abi_call_new(void)
186 be_abi_call_t *call = xmalloc(sizeof(call[0]));
188 call->params = new_set(cmp_call_arg, 16);
193 void be_abi_call_free(be_abi_call_t *call)
195 del_set(call->params);
201 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
202 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
203 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
204 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
207 Handling of the stack frame. It is composed of three types:
208 1) The type of the arguments which are pushed on the stack.
209 2) The "between type" which consists of stuff the call of the
210 function pushes on the stack (like the return address and
211 the old base pointer for ia32).
212 3) The Firm frame type which consists of all local variables
216 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
218 type *t = get_entity_owner(ent);
219 int ofs = get_entity_offset_bytes(ent);
223 /* Find the type the entity is contained in. */
224 for(index = 0; index < N_FRAME_TYPES; ++index) {
225 if(frame->order[index] == t)
229 /* Add the size of all the types below the one of the entity to the entity's offset */
230 for(i = 0; i < index; ++i)
231 ofs += get_type_size_bytes(frame->order[i]);
233 /* correct the offset by the initial position of the frame pointer */
234 ofs -= frame->initial_offset;
236 /* correct the offset with the current bias. */
242 static entity *search_ent_with_offset(type *t, int offset)
246 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
247 entity *ent = get_class_member(t, i);
248 if(get_entity_offset_bytes(ent) == offset)
255 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
257 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
258 entity *ent = search_ent_with_offset(base, 0);
259 frame->initial_offset = 0;
260 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
261 return frame->initial_offset;
264 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
266 frame->arg_type = args;
267 frame->between_type = between;
268 frame->frame_type = locals;
269 frame->initial_offset = 0;
270 frame->stack_dir = stack_dir;
271 frame->order[1] = between;
274 frame->order[0] = args;
275 frame->order[2] = locals;
279 frame->order[0] = locals;
280 frame->order[2] = args;
286 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
290 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
291 for(j = 0; j < N_FRAME_TYPES; ++j) {
292 type *t = frame->order[j];
294 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
295 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
296 entity *ent = get_class_member(t, i);
297 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
303 * If irn is a Sel node computing the address of an entity
304 * on the frame type return the entity, else NULL.
306 static INLINE entity *get_sel_ent(ir_node *irn)
308 if(get_irn_opcode(irn) == iro_Sel
309 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
311 return get_Sel_entity(irn);
318 * Walker: Replaces Loads, Stores and Sels of frame type entities
319 * by FrameLoad, FrameStore and FrameAdress.
321 static void lower_frame_sels_walker(ir_node *irn, void *data)
324 entity *ent = get_sel_ent(irn);
327 be_abi_irg_t *env = data;
328 ir_node *bl = get_nodes_block(irn);
329 ir_graph *irg = get_irn_irg(bl);
330 ir_node *frame = get_irg_frame(irg);
332 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
339 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
341 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
342 return arg && !arg->in_reg;
352 Adjustment of the calls inside a graph.
357 * Transform a call node.
358 * @param env The ABI environment for the current irg.
359 * @param irn The call node.
360 * @param curr_sp The stack pointer node to use.
361 * @return The stack pointer after the call.
363 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
365 ir_graph *irg = env->birg->irg;
366 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
367 be_abi_call_t *call = be_abi_call_new();
368 ir_type *mt = get_Call_type(irn);
369 ir_node *call_ptr = get_Call_ptr(irn);
370 int n_params = get_method_n_params(mt);
371 ir_node *curr_mem = get_Call_mem(irn);
372 ir_node *bl = get_nodes_block(irn);
373 pset *results = pset_new_ptr(8);
374 pset *caller_save = pset_new_ptr(8);
376 int stack_dir = arch_isa_stack_dir(isa);
377 const arch_register_t *sp = arch_isa_sp(isa);
378 ir_mode *mach_mode = sp->reg_class->mode;
379 struct obstack *obst = &env->obst;
380 ir_node *no_mem = get_irg_no_mem(irg);
381 int no_alloc = call->flags.bits.frame_is_setup_on_call;
383 ir_node *res_proj = NULL;
384 int curr_res_proj = pn_Call_max;
391 const ir_edge_t *edge;
396 /* Let the isa fill out the abi description for that call node. */
397 arch_isa_get_call_abi(isa, mt, call);
399 /* Insert code to put the stack arguments on the stack. */
400 assert(get_Call_n_params(irn) == n_params);
401 for(i = 0; i < n_params; ++i) {
402 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
405 stack_size += arg->space_before;
406 stack_size = round_up2(stack_size, arg->alignment);
407 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
408 stack_size += arg->space_after;
409 obstack_int_grow(obst, i);
413 pos = obstack_finish(obst);
415 /* Collect all arguments which are passed in registers. */
416 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
417 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
418 if(arg && arg->in_reg) {
419 obstack_int_grow(obst, i);
423 low_args = obstack_finish(obst);
425 /* If there are some parameters which shall be passed on the stack. */
428 int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
430 /* Reverse list of stack parameters if call arguments are from left to right */
431 if(call->flags.bits.left_to_right) {
432 for(i = 0; i < n_pos / 2; ++i) {
433 int other = n_pos - i - 1;
441 * If the stack is decreasing and we do not want to store sequentially,
442 * or someone else allocated the call frame
443 * we allocate as much space on the stack all parameters need, by
444 * moving the stack pointer along the stack's direction.
446 if(stack_dir < 0 && !do_seq && !no_alloc) {
447 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
450 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
451 for(i = 0; i < n_pos; ++i) {
453 be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
454 ir_node *param = get_Call_param(irn, p);
455 ir_node *addr = curr_sp;
457 type *param_type = get_method_param_type(mt, p);
458 int param_size = get_type_size_bytes(param_type) + arg->space_after;
460 curr_ofs += arg->space_before;
461 curr_ofs = round_up2(curr_ofs, arg->alignment);
463 /* Make the expression to compute the argument's offset. */
465 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
466 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
469 /* Insert a store for primitive arguments. */
470 if(is_atomic_type(param_type)) {
471 mem = new_r_Store(irg, bl, curr_mem, addr, param);
472 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
475 /* Make a mem copy for compound arguments. */
477 assert(mode_is_reference(get_irn_mode(param)));
478 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
479 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
482 obstack_ptr_grow(obst, mem);
484 curr_ofs += param_size;
487 * If we wanted to build the arguments sequentially,
488 * the stack pointer for the next must be incremented,
489 * and the memory value propagated.
493 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_expand);
498 in = (ir_node **) obstack_finish(obst);
500 /* We need the sync only, if we didn't build the stores sequentially. */
502 curr_mem = new_r_Sync(irg, bl, n_pos, in);
503 obstack_free(obst, in);
506 /* Collect caller save registers */
507 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
509 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
510 for(j = 0; j < cls->n_regs; ++j) {
511 const arch_register_t *reg = arch_register_for_index(cls, j);
512 if(arch_register_type_is(reg, caller_save))
513 pset_insert_ptr(caller_save, (void *) reg);
517 /* search the greatest result proj number */
518 foreach_out_edge(irn, edge) {
519 const ir_edge_t *res_edge;
520 ir_node *irn = get_edge_src_irn(edge);
522 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
524 foreach_out_edge(irn, res_edge) {
526 be_abi_call_arg_t *arg;
527 ir_node *res = get_edge_src_irn(res_edge);
529 assert(is_Proj(res));
531 proj = get_Proj_proj(res);
532 arg = get_call_arg(call, 1, proj);
535 shift the proj number to the right, since we will drop the
536 unspeakable Proj_T from the Call. Therefore, all real argument
537 Proj numbers must be increased by pn_Call_max
540 set_Proj_proj(res, proj);
541 obstack_ptr_grow(obst, res);
543 if(proj > curr_res_proj)
544 curr_res_proj = proj;
546 pset_remove_ptr(caller_save, arg->reg);
547 //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
554 obstack_ptr_grow(obst, NULL);
555 res_projs = obstack_finish(obst);
557 /* make the back end call node and set its register requirements. */
558 for(i = 0; i < n_low_args; ++i)
559 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
561 in = obstack_finish(obst);
563 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
564 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, curr_sp, curr_res_proj + pset_count(caller_save), n_low_args, in);
565 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
569 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, call_ptr, curr_res_proj + pset_count(caller_save), n_low_args, in);
571 set_irn_dbg_info(low_call, get_irn_dbg_info(irn));
575 Set the register class of the call address to the same as the stack pointer's.
576 That' probably buggy for some architectures.
578 be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
580 /* Set the register classes and constraints of the Call parameters. */
581 for(i = 0; i < n_low_args; ++i) {
582 int index = low_args[i];
583 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
584 assert(arg->reg != NULL);
586 be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
589 /* Set the register constraints of the results. */
590 for(i = 0; res_projs[i]; ++i) {
591 ir_node *irn = res_projs[i];
592 int proj = get_Proj_proj(irn);
594 /* Correct Proj number since it has been adjusted! (see above) */
595 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
598 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
600 obstack_free(obst, in);
601 exchange(irn, low_call);
603 /* redirect the result projs to the lowered call instead of the Proj_T */
604 for(i = 0; res_projs[i]; ++i)
605 set_Proj_pred(res_projs[i], low_call);
607 /* Make additional projs for the caller save registers
608 and the Keep node which keeps them alive. */
609 if(pset_count(caller_save) > 0) {
610 const arch_register_t *reg;
614 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
615 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
617 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
618 be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
619 set_irn_link(proj, (void *) reg);
620 obstack_ptr_grow(obst, proj);
624 in = (ir_node **) obstack_finish(obst);
625 keep = be_new_Keep(NULL, irg, bl, n, in);
626 for(i = 0; i < n; ++i) {
627 const arch_register_t *reg = get_irn_link(in[i]);
628 be_node_set_reg_class(keep, i, reg->reg_class);
630 obstack_free(obst, in);
633 /* Clean up the stack. */
635 ir_node *mem_proj = NULL;
637 foreach_out_edge(low_call, edge) {
638 ir_node *irn = get_edge_src_irn(edge);
639 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
646 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
648 /* Clean up the stack frame if we allocated it */
650 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
653 be_abi_call_free(call);
654 obstack_free(obst, pos);
656 del_pset(caller_save);
663 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
665 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
667 if (get_Alloc_where(alloc) == stack_alloc) {
668 ir_node *bl = get_nodes_block(alloc);
669 ir_graph *irg = get_irn_irg(bl);
670 ir_node *alloc_mem = NULL;
671 ir_node *alloc_res = NULL;
673 const ir_edge_t *edge;
676 env->call->flags.bits.try_omit_fp = 0;
678 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
680 foreach_out_edge(alloc, edge) {
681 ir_node *irn = get_edge_src_irn(edge);
683 assert(is_Proj(irn));
684 switch(get_Proj_proj(irn)) {
696 /* TODO: Beware: currently Alloc nodes without a result might happen,
697 only escape analysis kills them and this phase runs only for object
698 oriented source. So this must be fixed. */
699 assert(alloc_res != NULL);
700 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
702 if(alloc_mem != NULL)
703 exchange(alloc_mem, new_r_NoMem(irg));
712 * Walker for dependent_on().
713 * This function searches a node tgt recursively from a given node
714 * but is restricted to the given block.
715 * @return 1 if tgt was reachable from curr, 0 if not.
717 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
721 if(get_irn_visited(curr) >= visited_nr)
724 set_irn_visited(curr, visited_nr);
725 if(get_nodes_block(curr) != bl)
731 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
732 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
740 * Check if a node is somehow data dependent on another one.
741 * both nodes must be in the same basic block.
742 * @param n1 The first node.
743 * @param n2 The second node.
744 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
746 static int dependent_on(ir_node *n1, ir_node *n2)
748 ir_node *bl = get_nodes_block(n1);
749 ir_graph *irg = get_irn_irg(bl);
750 long vis_nr = get_irg_visited(irg) + 1;
752 assert(bl == get_nodes_block(n2));
753 set_irg_visited(irg, vis_nr);
754 return check_dependence(n1, n2, bl, vis_nr);
757 static int cmp_call_dependecy(const void *c1, const void *c2)
759 ir_node *n1 = *(ir_node **) c1;
760 ir_node *n2 = *(ir_node **) c2;
763 Classical qsort() comparison function behavior:
764 0 if both elements are equal
765 1 if second is "smaller" that first
766 -1 if first is "smaller" that second
768 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
771 static void link_calls_in_block_walker(ir_node *irn, void *data)
774 be_abi_irg_t *env = data;
775 ir_node *bl = get_nodes_block(irn);
776 void *save = get_irn_link(bl);
778 env->call->flags.bits.irg_is_leaf = 0;
780 set_irn_link(irn, save);
781 set_irn_link(bl, irn);
786 * Process all call nodes inside a basic block.
787 * Note that the link field of the block must contain a linked list of all
788 * Call nodes inside the block. We first order this list according to data dependency
789 * and that connect the calls together.
791 static void process_calls_in_block(ir_node *bl, void *data)
793 be_abi_irg_t *env = data;
794 ir_node *curr_sp = env->init_sp;
798 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
799 obstack_ptr_grow(&env->obst, irn);
801 /* If there were call nodes in the block. */
806 nodes = obstack_finish(&env->obst);
808 /* order the call nodes according to data dependency */
809 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
811 for(i = n - 1; i >= 0; --i) {
812 ir_node *irn = nodes[i];
814 switch(get_irn_opcode(irn)) {
816 curr_sp = adjust_call(env, irn, curr_sp);
819 curr_sp = adjust_alloc(env, irn, curr_sp);
826 obstack_free(&env->obst, nodes);
828 /* Keep the last stack state in the block by tying it to Keep node */
830 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
833 set_irn_link(bl, curr_sp);
837 * Adjust all call nodes in the graph to the ABI conventions.
839 static void process_calls(be_abi_irg_t *env)
841 ir_graph *irg = env->birg->irg;
843 env->call->flags.bits.irg_is_leaf = 1;
844 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
845 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
848 static void collect_return_walker(ir_node *irn, void *data)
850 if(get_irn_opcode(irn) == iro_Return) {
851 struct obstack *obst = data;
852 obstack_ptr_grow(obst, irn);
856 static ir_node *setup_frame(be_abi_irg_t *env)
858 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
859 const arch_register_t *sp = isa->sp;
860 const arch_register_t *bp = isa->bp;
861 be_abi_call_flags_bits_t flags = env->call->flags.bits;
862 ir_graph *irg = env->birg->irg;
863 ir_node *bl = get_irg_start_block(irg);
864 ir_node *no_mem = get_irg_no_mem(irg);
865 ir_node *old_frame = get_irg_frame(irg);
866 ir_node *stack = pmap_get(env->regs, (void *) sp);
867 ir_node *frame = pmap_get(env->regs, (void *) bp);
869 int stack_nr = get_Proj_proj(stack);
871 if(flags.try_omit_fp) {
872 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
877 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
879 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
881 be_set_constr_single_reg(frame, -1, bp);
882 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
883 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
886 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
889 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
890 env->init_sp = stack;
891 set_irg_frame(irg, frame);
892 edges_reroute(old_frame, frame, irg);
897 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
899 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
900 const arch_register_t *sp = isa->sp;
901 const arch_register_t *bp = isa->bp;
902 ir_graph *irg = env->birg->irg;
903 ir_node *ret_mem = get_Return_mem(ret);
904 ir_node *frame = get_irg_frame(irg);
905 ir_node *bl = get_nodes_block(ret);
906 ir_node *stack = get_irn_link(bl);
910 if(env->call->flags.bits.try_omit_fp) {
911 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
915 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
916 be_set_constr_single_reg(stack, -1, sp);
917 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
920 pmap_foreach(env->regs, ent) {
921 const arch_register_t *reg = ent->key;
922 ir_node *irn = ent->value;
925 obstack_ptr_grow(&env->obst, stack);
927 obstack_ptr_grow(&env->obst, frame);
928 else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
929 obstack_ptr_grow(obst, irn);
933 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
935 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
936 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
937 int n = get_method_n_params(method_type);
938 int curr = inc > 0 ? 0 : n - 1;
945 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
946 res = new_type_class(new_id_from_str(buf));
948 for(i = 0; i < n; ++i, curr += inc) {
949 type *param_type = get_method_param_type(method_type, curr);
950 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
953 snprintf(buf, sizeof(buf), "param_%d", i);
954 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
955 ofs += arg->space_before;
956 ofs = round_up2(ofs, arg->alignment);
957 set_entity_offset_bytes(arg->stack_ent, ofs);
958 ofs += arg->space_after;
959 ofs += get_type_size_bytes(param_type);
963 set_type_size_bytes(res, ofs);
967 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
974 /* Create a Perm after the RegParams node to delimit it. */
975 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
976 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
981 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
982 const arch_register_t *reg = &cls->regs[j];
983 ir_node *irn = pmap_get(regs, (void *) reg);
985 if(irn && !arch_register_type_is(reg, ignore)) {
987 obstack_ptr_grow(&obst, irn);
988 set_irn_link(irn, (void *) reg);
992 obstack_ptr_grow(&obst, NULL);
993 in = obstack_finish(&obst);
995 perm = be_new_Perm(cls, irg, bl, n_regs, in);
996 for(j = 0; j < n_regs; ++j) {
997 ir_node *arg = in[j];
998 arch_register_t *reg = get_irn_link(arg);
999 pmap_insert(regs, reg, arg);
1000 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
1003 obstack_free(&obst, in);
1006 obstack_free(&obst, NULL);
1010 const arch_register_t *reg;
1014 static int cmp_regs(const void *a, const void *b)
1016 const reg_node_map_t *p = a;
1017 const reg_node_map_t *q = b;
1019 if(p->reg->reg_class == q->reg->reg_class)
1020 return p->reg->index - q->reg->index;
1022 return p->reg->reg_class - q->reg->reg_class;
1025 static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
1028 int n = pmap_count(reg_map);
1030 reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
1032 pmap_foreach(reg_map, ent) {
1033 res[i].reg = ent->key;
1034 res[i].irn = ent->value;
1038 qsort(res, n, sizeof(res[0]), cmp_regs);
1042 static void create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
1044 ir_graph *irg = env->birg->irg;
1046 int n_regs = pmap_count(regs);
1051 rm = reg_map_to_arr(&env->obst, regs);
1053 for(n = 0; n < n_regs; ++n)
1054 obstack_ptr_grow(&env->obst, rm[n].irn);
1057 obstack_ptr_grow(&env->obst, *mem);
1061 in = (ir_node **) obstack_finish(&env->obst);
1062 irn = be_new_Barrier(env->birg->irg, bl, n, in);
1063 obstack_free(&env->obst, in);
1065 for(n = 0; n < n_regs; ++n) {
1066 int pos = BE_OUT_POS(n);
1068 const arch_register_t *reg = rm[n].reg;
1070 proj = new_r_Proj(env->birg->irg, bl, irn, get_irn_mode(rm[n].irn), n);
1071 be_node_set_reg_class(irn, n, reg->reg_class);
1073 be_set_constr_single_reg(irn, n, reg);
1074 be_set_constr_single_reg(irn, pos, reg);
1075 be_node_set_reg_class(irn, pos, reg->reg_class);
1076 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1077 if(arch_register_type_is(reg, ignore))
1078 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
1080 pmap_insert(regs, (void *) reg, proj);
1084 *mem = new_r_Proj(env->birg->irg, bl, irn, mode_M, n);
1087 obstack_free(&env->obst, rm);
1091 * Modify the irg itself and the frame type.
1093 static void modify_irg(be_abi_irg_t *env)
1095 firm_dbg_module_t *dbg = env->dbg;
1096 be_abi_call_t *call = env->call;
1097 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1098 const arch_register_t *sp = arch_isa_sp(isa);
1099 ir_graph *irg = env->birg->irg;
1100 ir_node *bl = get_irg_start_block(irg);
1101 ir_node *end = get_irg_end_block(irg);
1102 ir_node *arg_tuple = get_irg_args(irg);
1103 ir_node *no_mem = get_irg_no_mem(irg);
1104 ir_node *mem = get_irg_initial_mem(irg);
1105 type *method_type = get_entity_type(get_irg_entity(irg));
1106 pset *dont_save = pset_new_ptr(8);
1107 pmap *reg_proj_map = pmap_create();
1108 int n_params = get_method_n_params(method_type);
1115 const arch_register_t *fp_reg;
1116 ir_node *frame_pointer;
1117 ir_node *reg_params_bl;
1119 const ir_edge_t *edge;
1120 ir_type *arg_type, *bet_type;
1123 bitset_t *used_proj_nr;
1125 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1127 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1128 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1130 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1131 env->regs = pmap_create();
1133 /* Find the maximum proj number of the argument tuple proj */
1134 foreach_out_edge(arg_tuple, edge) {
1135 ir_node *irn = get_edge_src_irn(edge);
1136 int nr = get_Proj_proj(irn);
1137 max_arg = MAX(max_arg, nr);
1139 max_arg = MAX(max_arg + 1, n_params);
1140 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1141 memset(args, 0, max_arg * sizeof(args[0]));
1142 used_proj_nr = bitset_alloca(1024);
1144 /* Fill the argument vector */
1145 foreach_out_edge(arg_tuple, edge) {
1146 ir_node *irn = get_edge_src_irn(edge);
1147 int nr = get_Proj_proj(irn);
1149 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1152 arg_type = compute_arg_type(env, call, method_type);
1153 bet_type = call->cb->get_between_type(env->cb);
1154 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1156 /* Count the register params and add them to the number of Projs for the RegParams node */
1157 for(i = 0; i < n_params; ++i) {
1158 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1159 if(arg->in_reg && args[i]) {
1160 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1161 assert(i == get_Proj_proj(args[i]));
1163 /* For now, associate the register with the old Proj from Start representing that argument. */
1164 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1165 bitset_set(used_proj_nr, i);
1166 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1170 /* Collect all callee-save registers */
1171 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1172 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1173 for(j = 0; j < cls->n_regs; ++j) {
1174 const arch_register_t *reg = &cls->regs[j];
1175 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1176 pmap_insert(env->regs, (void *) reg, NULL);
1180 pmap_insert(env->regs, (void *) sp, NULL);
1181 pmap_insert(env->regs, (void *) isa->bp, NULL);
1182 reg_params_bl = get_irg_start_block(irg);
1183 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1186 * make proj nodes for the callee save registers.
1187 * memorize them, since Return nodes get those as inputs.
1189 * Note, that if a register corresponds to an argument, the regs map contains
1190 * the old Proj from start for that argument.
1193 rm = reg_map_to_arr(&env->obst, env->regs);
1194 for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
1195 arch_register_t *reg = (void *) rm[i].reg;
1196 ir_node *arg_proj = rm[i].irn;
1198 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1200 int pos = BE_OUT_POS((int) nr);
1203 bitset_set(used_proj_nr, nr);
1204 proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1205 pmap_insert(env->regs, (void *) reg, proj);
1206 be_set_constr_single_reg(env->reg_params, pos, reg);
1207 arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
1210 * If the register is an ignore register,
1211 * The Proj for that register shall also be ignored during register allocation.
1213 if(arch_register_type_is(reg, ignore))
1214 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1216 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1218 obstack_free(&env->obst, rm);
1220 /* Generate the Prologue */
1221 fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
1222 create_barrier(env, bl, &mem, env->regs, 0);
1224 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1225 env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
1226 arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
1227 be_abi_reg_map_set(env->regs, sp, env->init_sp);
1228 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1229 set_irg_frame(irg, frame_pointer);
1231 /* Now, introduce stack param nodes for all parameters passed on the stack */
1232 for(i = 0; i < max_arg; ++i) {
1233 ir_node *arg_proj = args[i];
1234 ir_node *repl = NULL;
1236 if(arg_proj != NULL) {
1237 be_abi_call_arg_t *arg;
1238 ir_type *param_type;
1239 int nr = get_Proj_proj(arg_proj);
1241 nr = MIN(nr, n_params);
1242 arg = get_call_arg(call, 0, nr);
1243 param_type = get_method_param_type(method_type, nr);
1246 repl = pmap_get(env->regs, (void *) arg->reg);
1249 else if(arg->on_stack) {
1250 /* For atomic parameters which are actually used, we create a StackParam node. */
1251 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1252 ir_mode *mode = get_type_mode(param_type);
1253 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1254 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1257 /* The stack parameter is not primitive (it is a struct or array),
1258 we thus will create a node representing the parameter's address
1261 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1265 assert(repl != NULL);
1266 edges_reroute(args[i], repl, irg);
1270 /* All Return nodes hang on the End node, so look for them there. */
1271 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1272 ir_node *irn = get_irn_n(end, i);
1274 if(get_irn_opcode(irn) == iro_Return) {
1275 ir_node *bl = get_nodes_block(irn);
1276 int n_res = get_Return_n_ress(irn);
1277 pmap *reg_map = pmap_create();
1278 ir_node *mem = get_Return_mem(irn);
1283 const arch_register_t **regs;
1285 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1287 /* Insert results for Return into the register map. */
1288 for(i = 0; i < n_res; ++i) {
1289 ir_node *res = get_Return_res(irn, i);
1290 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1291 assert(arg->in_reg && "return value must be passed in register");
1292 pmap_insert(reg_map, (void *) arg->reg, res);
1295 /* Add uses of the callee save registers. */
1296 pmap_foreach(env->regs, ent) {
1297 const arch_register_t *reg = ent->key;
1298 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1299 pmap_insert(reg_map, ent->key, ent->value);
1302 /* Make the Epilogue node and call the arch's epilogue maker. */
1303 create_barrier(env, bl, &mem, reg_map, 1);
1304 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1307 Maximum size of the in array for Return nodes is
1308 return args + callee save/ignore registers + memory + stack pointer
1310 in_max = pmap_count(reg_map) + get_Return_n_ress(irn) + 2;
1312 in = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
1313 regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
1316 in[1] = be_abi_reg_map_get(reg_map, sp);
1321 /* clear SP entry, since it has already been grown. */
1322 pmap_insert(reg_map, (void *) sp, NULL);
1323 for(i = 0; i < n_res; ++i) {
1324 ir_node *res = get_Return_res(irn, i);
1325 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1327 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1328 regs[n++] = arg->reg;
1330 /* Clear the map entry to mark the register as processed. */
1331 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1334 /* grow the rest of the stuff. */
1335 pmap_foreach(reg_map, ent) {
1338 regs[n++] = ent->key;
1342 /* The in array for the new back end return is now ready. */
1343 ret = be_new_Return(get_irn_dbg_info(irn), irg, bl, n, in);
1345 /* Set the register classes of the return's parameter accordingly. */
1346 for(i = 0; i < n; ++i)
1348 be_node_set_reg_class(ret, i, regs[i]->reg_class);
1350 /* Free the space of the Epilog's in array and the register <-> proj map. */
1351 obstack_free(&env->obst, in);
1353 pmap_destroy(reg_map);
1357 del_pset(dont_save);
1358 obstack_free(&env->obst, args);
1362 * Walker: puts all Alloc(stack_alloc) on a obstack
1364 static void collect_alloca_walker(ir_node *irn, void *data)
1366 be_abi_irg_t *env = data;
1367 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1368 obstack_ptr_grow(&env->obst, irn);
1371 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1373 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1374 ir_node *old_frame = get_irg_frame(birg->irg);
1375 ir_graph *irg = birg->irg;
1380 env->isa = birg->main_env->arch_env->isa;
1381 env->method_type = get_entity_type(get_irg_entity(irg));
1382 env->call = be_abi_call_new();
1383 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1385 env->dce_survivor = new_survive_dce();
1387 env->stack_phis = pset_new_ptr(16);
1388 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1389 FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
1391 env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
1393 obstack_init(&env->obst);
1395 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1396 env->irn_ops.impl = &abi_irn_ops;
1398 /* Lower all call nodes in the IRG. */
1401 /* Process the IRG */
1404 /* reroute the stack origin of the calls to the true stack origin. */
1405 edges_reroute(dummy, env->init_sp, irg);
1406 edges_reroute(old_frame, get_irg_frame(irg), irg);
1408 /* Make some important node pointers survive the dead node elimination. */
1409 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1410 pmap_foreach(env->regs, ent)
1411 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1413 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1415 env->call->cb->done(env->cb);
1420 void be_abi_free(be_abi_irg_t *env)
1422 free_survive_dce(env->dce_survivor);
1423 del_pset(env->stack_phis);
1424 pmap_destroy(env->regs);
1425 obstack_free(&env->obst, NULL);
1426 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1434 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1435 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1436 | _| | |> < ___) | || (_| | (__| <
1437 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1441 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1445 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1446 pset_insert_ptr(s, irn);
1449 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1451 dom_front_info_t *df;
1454 /* We need dominance frontiers for fix up */
1455 df = be_compute_dominance_frontiers(env->birg->irg);
1456 stack_nodes = pset_new_ptr(16);
1457 pset_insert_ptr(stack_nodes, env->init_sp);
1458 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1459 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1460 del_pset(stack_nodes);
1462 /* Liveness could have changed due to Phi nodes. */
1463 be_liveness(env->birg->irg);
1465 /* free these dominance frontiers */
1466 be_free_dominance_frontiers(df);
1470 * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
1471 * into -1 or 1, respectively.
1472 * @param irn The node.
1473 * @return 1, if the direction of the IncSP was along, -1 if against.
1475 static int get_dir(ir_node *irn)
1477 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
1480 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1482 const arch_env_t *aenv = env->birg->main_env->arch_env;
1484 int start_bias = bias;
1485 int omit_fp = env->call->flags.bits.try_omit_fp;
1487 sched_foreach(bl, irn) {
1490 If the node modifies the stack pointer by a constant offset,
1491 record that in the bias.
1493 if(be_is_IncSP(irn)) {
1494 int ofs = be_get_IncSP_offset(irn);
1495 int dir = get_dir(irn);
1497 if(ofs == BE_STACK_FRAME_SIZE) {
1498 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1499 be_set_IncSP_offset(irn, ofs);
1507 Else check, if the node relates to an entity on the stack frame.
1508 If so, set the true offset (including the bias) for that
1512 entity *ent = arch_get_frame_entity(aenv, irn);
1514 int offset = get_stack_entity_offset(env->frame, ent, bias);
1515 arch_set_frame_offset(aenv, irn, offset);
1516 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1525 * A helper struct for the bias walker.
1528 be_abi_irg_t *env; /**< The ABI irg environment. */
1529 int start_block_bias; /**< The bias at the end of the start block. */
1532 static void stack_bias_walker(ir_node *bl, void *data)
1534 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1535 struct bias_walk *bw = data;
1536 process_stack_bias(bw->env, bl, bw->start_block_bias);
1540 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1542 ir_graph *irg = env->birg->irg;
1543 struct bias_walk bw;
1545 stack_frame_compute_initial_offset(env->frame);
1546 // stack_frame_dump(stdout, env->frame);
1548 /* Determine the stack bias at the and of the start block. */
1549 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1551 /* fix the bias is all other blocks */
1553 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1556 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1558 assert(arch_register_type_is(reg, callee_save));
1559 assert(pmap_contains(abi->regs, (void *) reg));
1560 return pmap_get(abi->regs, (void *) reg);
1564 _____ _____ _ _ _ _ _ _
1565 |_ _| __ \| \ | | | | | | | | |
1566 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1567 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1568 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1569 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1571 for Phi nodes which are created due to stack modifying nodes
1572 such as IncSP, AddSP and SetSP.
1574 These Phis are always to be ignored by the reg alloc and are
1575 fixed on the SP register of the ISA.
1578 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1580 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1581 const void *res = NULL;
1583 if(is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn))
1584 res = &abi->irn_ops;
1589 static void be_abi_limited(void *data, bitset_t *bs)
1591 be_abi_irg_t *abi = data;
1592 bitset_clear_all(bs);
1593 bitset_set(bs, abi->isa->sp->index);
1596 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1598 be_abi_irg_t *abi = get_abi_from_ops(self);
1599 const arch_register_t *reg = abi->isa->sp;
1601 memset(req, 0, sizeof(req[0]));
1603 if(pos == BE_OUT_POS(0)) {
1604 req->cls = reg->reg_class;
1605 req->type = arch_register_req_type_limited;
1606 req->limited = be_abi_limited;
1607 req->limited_env = abi;
1610 else if(pos >= 0 && pos < get_irn_arity(irn)) {
1611 req->cls = reg->reg_class;
1612 req->type = arch_register_req_type_normal;
1618 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1622 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1624 const be_abi_irg_t *abi = get_abi_from_ops(self);
1625 return abi->isa->sp;
1628 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1630 return arch_irn_class_normal;
1633 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1635 return arch_irn_flags_ignore;
1638 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1643 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1647 static const arch_irn_ops_if_t abi_irn_ops = {
1648 abi_get_irn_reg_req,
1653 abi_get_frame_entity,
1657 static const arch_irn_handler_t abi_irn_handler = {