4 * @author Sebastian Hack
14 #include "survive_dce.h"
16 #include "irgraph_t.h"
19 #include "iredges_t.h"
22 #include "irprintf_t.h"
28 #include "besched_t.h"
30 #define MAX(x, y) ((x) > (y) ? (x) : (y))
31 #define MIN(x, y) ((x) < (y) ? (x) : (y))
33 typedef struct _be_abi_call_arg_t {
38 const arch_register_t *reg;
42 struct _be_abi_call_t {
43 be_abi_call_flags_t flags;
48 #define N_FRAME_TYPES 3
50 typedef struct _be_stack_frame_t {
55 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
61 struct _be_stack_slot_t {
62 struct _be_stack_frame_t *frame;
66 struct _be_abi_irg_t {
68 firm_dbg_module_t *dbg; /**< The debugging module. */
69 be_stack_frame_t *frame;
71 const arch_isa_t *isa; /**< The isa. */
72 survive_dce_t *dce_survivor;
77 ir_node *init_sp; /**< The node representing the stack pointer
78 at the start of the function. */
85 arch_irn_handler_t irn_handler;
86 arch_irn_ops_t irn_ops;
89 #define abi_offset_of(type,member) ((char *) &(((type *) 0)->member) - (char *) 0)
90 #define abi_get_relative(ptr, member) ((void *) ((char *) (ptr) - abi_offset_of(be_abi_irg_t, member)))
91 #define get_abi_from_handler(ptr) abi_get_relative(ptr, irn_handler)
92 #define get_abi_from_ops(ptr) abi_get_relative(ptr, irn_ops)
94 /* Forward, since be need it in be_abi_introduce(). */
95 static const arch_irn_ops_if_t abi_irn_ops;
96 static const arch_irn_handler_t abi_irn_handler;
99 _ ____ ___ ____ _ _ _ _
100 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
101 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
102 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
103 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
105 These callbacks are used by the backend to set the parameters
106 for a specific call type.
109 static int cmp_call_arg(const void *a, const void *b, size_t n)
111 const be_abi_call_arg_t *p = a, *q = b;
112 return !(p->is_res == q->is_res && p->pos == q->pos);
115 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
117 be_abi_call_arg_t arg;
123 hash = is_res * 100 + pos;
126 ? set_insert(call->params, &arg, sizeof(arg), hash)
127 : set_find(call->params, &arg, sizeof(arg), hash);
130 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
132 return get_or_set_call_arg(call, is_res, pos, 0);
135 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, ir_type *between_type)
138 call->between_type = between_type;
141 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
143 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
146 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
148 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
153 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
155 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
160 be_abi_call_t *be_abi_call_new(void)
162 be_abi_call_t *call = malloc(sizeof(call[0]));
164 call->params = new_set(cmp_call_arg, 16);
168 void be_abi_call_free(be_abi_call_t *call)
170 del_set(call->params);
176 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
177 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
178 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
179 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
182 Handling of the stack frame. It is composed of three types:
183 1) The type of the arguments which are pushed on the stack.
184 2) The "between type" which consists of stuff the call of the
185 function pushes on the stack (like the return address and
186 the old base pointer for ia32).
187 3) The Firm frame type which consists of all local variables
191 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
193 type *t = get_entity_owner(ent);
194 int ofs = get_entity_offset_bytes(ent);
198 /* Find the type the entity is contained in. */
199 for(index = 0; index < N_FRAME_TYPES; ++index) {
200 if(frame->order[index] == t)
204 /* Add the size of all the types below the one of the entity to the entity's offset */
205 for(i = 0; i < index; ++i)
206 ofs += get_type_size_bytes(frame->order[i]);
208 /* correct the offset by the initial position of the frame pointer */
209 ofs -= frame->initial_offset;
211 /* correct the offset with the current bias. */
217 static entity *search_ent_with_offset(type *t, int offset)
221 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
222 entity *ent = get_class_member(t, i);
223 if(get_entity_offset_bytes(ent) == offset)
230 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
232 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
233 entity *ent = search_ent_with_offset(base, 0);
234 frame->initial_offset = 0;
235 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
236 return frame->initial_offset;
239 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
241 frame->arg_type = args;
242 frame->between_type = between;
243 frame->frame_type = locals;
244 frame->initial_offset = 0;
245 frame->stack_dir = stack_dir;
246 frame->order[1] = between;
249 frame->order[0] = args;
250 frame->order[2] = locals;
254 frame->order[0] = locals;
255 frame->order[2] = args;
261 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
265 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
266 for(j = 0; j < N_FRAME_TYPES; ++j) {
267 type *t = frame->order[j];
269 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
270 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
271 entity *ent = get_class_member(t, i);
272 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
278 * If irn is a Sel node computing the address of an entity
279 * on the frame type return the entity, else NULL.
281 static INLINE entity *get_sel_ent(ir_node *irn)
283 if(get_irn_opcode(irn) == iro_Sel
284 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
286 return get_Sel_entity(irn);
293 * Walker: Replaces Loads, Stores and Sels of frame type entities
294 * by FrameLoad, FrameStore and FrameAdress.
296 static void lower_frame_sels_walker(ir_node *irn, void *data)
298 const arch_register_class_t *cls;
299 be_abi_irg_t *env = data;
300 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
301 ir_graph *irg = get_irn_irg(irn);
302 ir_node *frame = get_irg_frame(irg);
304 opcode opc = get_irn_opcode(irn);
306 if(opc == iro_Load) {
307 ir_node *bl = get_nodes_block(irn);
308 ir_node *sel = get_Load_ptr(irn);
309 entity *ent = get_sel_ent(sel);
310 cls = arch_isa_get_reg_class_for_mode(isa, get_Load_mode(irn));
312 nw = be_new_FrameLoad(isa->sp->reg_class, cls, irg, bl, get_Load_mem(irn), frame, ent);
315 else if(opc == iro_Store) {
316 ir_node *bl = get_nodes_block(irn);
317 ir_node *val = get_Store_value(irn);
318 ir_node *sel = get_Store_ptr(irn);
319 entity *ent = get_sel_ent(sel);
320 cls = arch_isa_get_reg_class_for_mode(isa, get_irn_mode(val));
322 nw = be_new_FrameStore(isa->sp->reg_class, cls, irg, bl, get_Store_mem(irn), frame, val, ent);
326 entity *ent = get_sel_ent(irn);
328 ir_node *bl = get_nodes_block(irn);
329 nw = be_new_FrameAddr(isa->sp->reg_class, irg, bl, frame, ent);
337 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
339 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
340 return arg && !arg->in_reg;
350 Adjustment of the calls inside a graph.
355 * Transform a call node.
356 * @param env The ABI environment for the current irg.
357 * @param irn The call node.
358 * @param curr_sp The stack pointer node to use.
359 * @return The stack pointer after the call.
361 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
363 ir_graph *irg = env->birg->irg;
364 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
365 be_abi_call_t *call = be_abi_call_new();
366 ir_type *mt = get_Call_type(irn);
367 int n_params = get_method_n_params(mt);
368 ir_node *curr_mem = get_Call_mem(irn);
369 ir_node *bl = get_nodes_block(irn);
370 pset *results = pset_new_ptr(8);
371 pset *caller_save = pset_new_ptr(8);
373 int stack_dir = arch_isa_stack_dir(isa);
374 const arch_register_t *sp = arch_isa_sp(isa);
375 ir_mode *mach_mode = sp->reg_class->mode;
376 struct obstack *obst = &env->obst;
377 ir_node *no_mem = get_irg_no_mem(irg);
379 ir_node *res_proj = NULL;
380 int curr_res_proj = pn_Call_max;
387 const ir_edge_t *edge;
392 /* Let the isa fill out the abi description for that call node. */
393 arch_isa_get_call_abi(isa, mt, call);
395 /* Insert code to put the stack arguments on the stack. */
396 assert(get_Call_n_params(irn) == n_params);
397 for(i = 0; i < n_params; ++i) {
398 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
401 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
402 obstack_int_grow(obst, i);
406 pos = obstack_finish(obst);
408 /* Collect all arguments which are passed in registers. */
409 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
410 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
411 if(arg && arg->in_reg) {
412 obstack_int_grow(obst, i);
416 low_args = obstack_finish(obst);
418 /* If there are some parameters which shall be passed on the stack. */
421 int do_seq = call->flags.bits.store_args_sequential;
423 /* Reverse list of stack parameters if call arguments are from left to right */
424 if(call->flags.bits.left_to_right) {
425 for(i = 0; i < n_pos / 2; ++i) {
426 int other = n_pos - i - 1;
434 * If the stack is decreasing and we do not want to store sequentially,
435 * we allocate as much space on the stack all parameters need, by
436 * moving the stack pointer along the stack's direction.
438 if(stack_dir < 0 && !do_seq) {
439 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
442 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
443 for(i = 0; i < n_pos; ++i) {
445 ir_node *param = get_Call_param(irn, p);
446 ir_node *addr = curr_sp;
448 type *param_type = get_method_param_type(mt, p);
449 int param_size = get_type_size_bytes(param_type);
451 /* Make the expression to compute the argument's offset. */
453 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
454 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
457 /* Insert a store for primitive arguments. */
458 if(is_atomic_type(param_type)) {
459 mem = new_r_Store(irg, bl, curr_mem, addr, param);
460 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
463 /* Make a memcopy for compound arguments. */
465 assert(mode_is_reference(get_irn_mode(param)));
466 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
467 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
470 obstack_ptr_grow(obst, mem);
472 curr_ofs += param_size;
475 * If we wanted to build the arguments sequentially,
476 * the stack pointer for the next must be incremented,
477 * and the memory value propagated.
481 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
486 in = (ir_node **) obstack_finish(obst);
488 /* We need the sync only, if we didn't build the stores sequentially. */
490 curr_mem = new_r_Sync(irg, bl, n_pos, in);
491 obstack_free(obst, in);
494 /* Collect caller save registers */
495 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
497 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
498 for(j = 0; j < cls->n_regs; ++j) {
499 const arch_register_t *reg = arch_register_for_index(cls, j);
500 if(arch_register_type_is(reg, caller_save))
501 pset_insert_ptr(caller_save, (void *) reg);
505 /* search the greatest result proj number */
506 foreach_out_edge(irn, edge) {
507 const ir_edge_t *res_edge;
508 ir_node *irn = get_edge_src_irn(edge);
510 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
512 foreach_out_edge(irn, res_edge) {
514 be_abi_call_arg_t *arg;
515 ir_node *res = get_edge_src_irn(res_edge);
517 assert(is_Proj(res));
519 proj = get_Proj_proj(res);
520 arg = get_call_arg(call, 1, proj);
523 shift the proj number to the right, since we will drop the
524 unspeakable Proj_T from the Call. Therefore, all real argument
525 Proj numbers must be increased by pn_Call_max
528 set_Proj_proj(res, proj);
529 obstack_ptr_grow(obst, res);
531 if(proj > curr_res_proj)
532 curr_res_proj = proj;
534 pset_remove_ptr(caller_save, arg->reg);
539 obstack_ptr_grow(obst, NULL);
540 res_projs = obstack_finish(obst);
542 /* make the back end call node and set its register requirements. */
543 for(i = 0; i < n_low_args; ++i)
544 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
546 in = obstack_finish(obst);
547 // if(env->call->flags.bits.call_has_imm && get_irn_opcode());
548 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
549 obstack_free(obst, in);
550 exchange(irn, low_call);
552 /* redirect the result projs to the lowered call instead of the Proj_T */
553 for(i = 0; res_projs[i]; ++i)
554 set_Proj_pred(res_projs[i], low_call);
556 /* Make additional projs for the caller save registers
557 and the Keep node which keeps them alive. */
558 if(pset_count(caller_save) > 0) {
559 const arch_register_t *reg;
563 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
564 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj++);
566 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
567 set_irn_link(proj, (void *) reg);
568 obstack_ptr_grow(obst, proj);
571 in = (ir_node **) obstack_finish(obst);
572 keep = be_new_Keep(NULL, irg, bl, n, in);
573 for(i = 0; i < n; ++i) {
574 const arch_register_t *reg = get_irn_link(in[i]);
575 be_node_set_reg_class(keep, i, reg->reg_class);
577 obstack_free(obst, in);
580 /* Clean up the stack. */
582 ir_node *mem_proj = NULL;
584 foreach_out_edge(low_call, edge) {
585 ir_node *irn = get_edge_src_irn(edge);
586 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
593 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
595 /* Make a Proj for the stack pointer. */
596 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_against);
599 be_abi_call_free(call);
600 obstack_free(obst, pos);
602 del_pset(caller_save);
609 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
611 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
613 if(get_Alloc_where(alloc) == stack_alloc) {
616 ir_node *bl = get_nodes_block(alloc);
617 ir_graph *irg = get_irn_irg(bl);
619 new_alloc = be_new_Alloca(env->isa->sp, irg, bl, get_Alloc_mem(alloc), curr_sp, get_Alloc_size(alloc));
620 exchange(alloc, new_alloc);
621 curr_sp = new_r_Proj(irg, bl, new_alloc, mode_P, pn_Alloc_res);
628 * Walker for dependent_on().
629 * This function searches a node tgt recursively from a given node
630 * but is restricted to the given block.
631 * @return 1 if tgt was reachable from curr, 0 if not.
633 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
637 if(get_irn_visited(curr) >= visited_nr)
640 set_irn_visited(curr, visited_nr);
641 if(get_nodes_block(curr) != bl)
647 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
648 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
656 * Check if a node is somehow data dependent on another one.
657 * both nodes must be in the same basic block.
658 * @param n1 The first node.
659 * @param n2 The second node.
660 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
662 static int dependent_on(ir_node *n1, ir_node *n2)
664 ir_node *bl = get_nodes_block(n1);
665 ir_graph *irg = get_irn_irg(bl);
666 long vis_nr = get_irg_visited(irg) + 1;
668 assert(bl == get_nodes_block(n2));
669 set_irg_visited(irg, vis_nr);
670 return check_dependence(n1, n2, bl, vis_nr);
673 static int cmp_call_dependecy(const void *c1, const void *c2)
675 ir_node *n1 = *(ir_node **) c1;
676 ir_node *n2 = *(ir_node **) c2;
679 Classical qsort() comparison function behavior:
680 0 if both elements are equal
681 1 if second is "smaller" that first
682 -1 if first is "smaller" that second
684 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
687 static void link_calls_in_block_walker(ir_node *irn, void *data)
690 ir_node *bl = get_nodes_block(irn);
691 void *save = get_irn_link(bl);
693 set_irn_link(irn, save);
694 set_irn_link(bl, irn);
699 * Process all call nodes inside a basic block.
700 * Note that the link field of the block must contain a linked list of all
701 * Call nodes inside the block. We first order this list according to data dependency
702 * and that connect the calls together.
704 static void process_calls_in_block(ir_node *bl, void *data)
706 be_abi_irg_t *env = data;
707 ir_node *curr_sp = env->init_sp;
711 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
712 obstack_ptr_grow(&env->obst, irn);
714 /* If there were call nodes in the block. */
719 nodes = obstack_finish(&env->obst);
721 /* order the call nodes according to data dependency */
722 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
724 for(i = n - 1; i >= 0; --i) {
725 ir_node *irn = nodes[i];
727 switch(get_irn_opcode(irn)) {
729 curr_sp = adjust_call(env, irn, curr_sp);
732 curr_sp = adjust_alloc(env, irn, curr_sp);
739 obstack_free(&env->obst, nodes);
741 /* Keep the last stack state in the block by tying it to Keep node */
743 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
746 set_irn_link(bl, curr_sp);
750 * Adjust all call nodes in the graph to the ABI conventions.
752 static void process_calls(be_abi_irg_t *env)
754 ir_graph *irg = env->birg->irg;
756 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, NULL);
757 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
762 * Walker to implement alloca-style allocations.
763 * They are implemented using an add to the stack pointer
764 * and a copy instruction.
766 static void implement_stack_alloc(be_abi_irg_t *env, ir_node *irn)
768 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
769 ir_node *bl = get_nodes_block(irn);
770 ir_node *res = env->init_sp;
773 assert(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc);
775 size = get_Alloc_size(irn);
776 if(isa->stack_dir > 0)
777 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
779 res = be_new_AddSP(isa->sp, env->birg->irg, bl, res, size);
781 if(isa->stack_dir < 0)
782 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
787 static void collect_return_walker(ir_node *irn, void *data)
789 if(get_irn_opcode(irn) == iro_Return) {
790 struct obstack *obst = data;
791 obstack_ptr_grow(obst, irn);
795 static ir_node *setup_frame(be_abi_irg_t *env)
797 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
798 const arch_register_t *sp = isa->sp;
799 const arch_register_t *bp = isa->bp;
800 be_abi_call_flags_bits_t flags = env->call->flags.bits;
801 ir_graph *irg = env->birg->irg;
802 ir_node *bl = get_irg_start_block(irg);
803 ir_node *no_mem = get_irg_no_mem(irg);
804 ir_node *old_frame = get_irg_frame(irg);
805 ir_node *stack = pmap_get(env->regs, (void *) sp);
806 ir_node *frame = pmap_get(env->regs, (void *) bp);
808 int stack_nr = get_Proj_proj(stack);
810 if(flags.try_omit_fp) {
811 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
816 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
818 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
820 be_set_constr_single_reg(frame, -1, bp);
821 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
822 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
825 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_along);
828 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
829 env->init_sp = stack;
830 set_irg_frame(irg, frame);
831 edges_reroute(old_frame, frame, irg);
836 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, struct obstack *obst)
838 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
839 const arch_register_t *sp = isa->sp;
840 const arch_register_t *bp = isa->bp;
841 ir_graph *irg = env->birg->irg;
842 ir_node *ret_mem = get_Return_mem(ret);
843 ir_node *frame = get_irg_frame(irg);
844 ir_node *bl = get_nodes_block(ret);
845 ir_node *stack = get_irn_link(bl);
849 if(env->call->flags.bits.try_omit_fp) {
850 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
854 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
855 be_set_constr_single_reg(stack, -1, sp);
856 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
859 pmap_foreach(env->regs, ent) {
860 const arch_register_t *reg = ent->key;
861 ir_node *irn = ent->value;
868 obstack_ptr_grow(obst, irn);
872 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
874 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
875 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
876 int n = get_method_n_params(method_type);
877 int curr = inc > 0 ? 0 : n - 1;
884 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
885 res = new_type_class(new_id_from_str(buf));
887 for(i = 0; i < n; ++i, curr += inc) {
888 type *param_type = get_method_param_type(method_type, curr);
889 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
892 snprintf(buf, sizeof(buf), "param_%d", i);
893 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
894 set_entity_offset_bytes(arg->stack_ent, ofs);
895 ofs += get_type_size_bytes(param_type);
899 set_type_size_bytes(res, ofs);
904 * Modify the irg itself and the frame type.
906 static void modify_irg(be_abi_irg_t *env)
908 firm_dbg_module_t *dbg = env->dbg;
909 be_abi_call_t *call = be_abi_call_new();
910 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
911 const arch_register_t *sp = arch_isa_sp(isa);
912 ir_graph *irg = env->birg->irg;
913 ir_node *bl = get_irg_start_block(irg);
914 ir_node *end = get_irg_end_block(irg);
915 ir_node *arg_tuple = get_irg_args(irg);
916 ir_node *no_mem = get_irg_no_mem(irg);
917 type *method_type = get_entity_type(get_irg_entity(irg));
918 int n_params = get_method_n_params(method_type);
920 int reg_params_nr = 0;
925 ir_node *frame_pointer;
926 ir_node *reg_params_bl;
927 ir_node **args, **args_repl;
928 const ir_edge_t *edge;
933 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
935 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
936 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
938 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
939 env->regs = pmap_create();
941 /* Find the maximum proj number of the argument tuple proj */
942 foreach_out_edge(arg_tuple, edge) {
943 ir_node *irn = get_edge_src_irn(edge);
944 int nr = get_Proj_proj(irn);
945 max_arg = MAX(max_arg, nr);
948 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
949 args_repl = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
950 memset(args, 0, max_arg * sizeof(args[0]));
951 memset(args_repl, 0, max_arg * sizeof(args[0]));
953 /* Fill the argument vector */
954 foreach_out_edge(arg_tuple, edge) {
955 ir_node *irn = get_edge_src_irn(edge);
956 int nr = get_Proj_proj(irn);
958 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
961 /* Get the ABI constraints from the ISA */
962 arch_isa_get_call_abi(isa, method_type, call);
964 arg_type = compute_arg_type(env, call, method_type);
965 stack_frame_init(env->frame, arg_type, call->between_type, get_irg_frame_type(irg), isa->stack_dir);
967 /* Count the register params and add them to the number of Projs for the RegParams node */
968 for(i = 0; i < n_params; ++i) {
969 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
971 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
972 pmap_insert(env->regs, (void *) arg->reg, NULL);
973 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
977 /* Collect all callee-save registers */
978 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
979 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
980 for(j = 0; j < cls->n_regs; ++j) {
981 const arch_register_t *reg = &cls->regs[j];
982 if(arch_register_type_is(reg, callee_save))
983 pmap_insert(env->regs, (void *) reg, NULL);
987 pmap_insert(env->regs, (void *) sp, NULL);
988 pmap_insert(env->regs, (void *) isa->bp, NULL);
990 reg_params_bl = get_irg_start_block(irg);
991 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
994 * make proj nodes for the callee save registers.
995 * memorize them, since Return nodes get those as inputs.
997 for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs)) {
998 arch_register_t *reg = ent->key;
999 int pos = -(reg_params_nr + 1);
1000 ent->value = new_r_Proj(irg, reg_params_bl, env->reg_params, reg->reg_class->mode, reg_params_nr);
1001 be_set_constr_single_reg(env->reg_params, pos, reg);
1002 arch_set_irn_register(env->birg->main_env->arch_env, ent->value, reg);
1005 * If the register is an ignore register,
1006 * The Proj for that register shall also be ignored during register allocation.
1008 if(arch_register_type_is(reg, ignore))
1009 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1013 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", reg_params_nr - 1, reg->name));
1016 /* Insert the code to set up the stack frame */
1017 frame_pointer = setup_frame(env);
1019 /* Now, introduce stack param nodes for all parameters passed on the stack */
1020 for(i = 0; i < max_arg; ++i) {
1021 ir_node *arg_proj = args[i];
1022 if(arg_proj != NULL) {
1023 be_abi_call_arg_t *arg;
1024 ir_type *param_type;
1025 int nr = get_Proj_proj(arg_proj);
1027 nr = MIN(nr, n_params);
1028 arg = get_call_arg(call, 0, nr);
1029 param_type = get_method_param_type(method_type, nr);
1032 args_repl[i] = new_r_Proj(irg, reg_params_bl, env->reg_params, get_irn_mode(arg_proj), reg_params_nr);
1033 be_set_constr_single_reg(env->reg_params, -(reg_params_nr + 1), arg->reg);
1037 /* when the (stack) parameter is primitive, we insert a StackParam
1038 node representing the load of that parameter */
1041 /* For atomic parameters which are actually used, we create a StackParam node. */
1042 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1043 ir_mode *mode = get_type_mode(param_type);
1044 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1045 args_repl[i] = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1048 /* The stack parameter is not primitive (it is a struct or array),
1049 we thus will create a node representing the parameter's address
1052 args_repl[i] = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1058 /* reroute the edges from the original argument projs to the RegParam ones. */
1059 for(i = 0; i < max_arg; ++i) {
1060 if(args[i] != NULL) {
1061 assert(args_repl[i] != NULL);
1062 edges_reroute(args[i], args_repl[i], irg);
1066 /* All Return nodes hang on the End node, so look for them there. */
1067 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1068 ir_node *irn = get_irn_n(end, i);
1070 if(get_irn_opcode(irn) == iro_Return) {
1071 ir_node *bl = get_nodes_block(irn);
1072 int n_res = get_Return_n_ress(irn);
1073 pmap *reg_map = pmap_create_ex(n_res);
1078 /* collect all arguments of the return */
1079 obstack_ptr_grow(&env->obst, get_Return_mem(irn));
1080 for(i = 0; i < n_res; ++i) {
1081 ir_node *res = get_Return_res(irn, i);
1082 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1084 assert(arg->in_reg && "return value must be passed in register");
1085 pmap_insert(reg_map, res, (void *) arg->reg);
1086 obstack_ptr_grow(&env->obst, res);
1089 /* generate the clean up code and add additional parameters to the return. */
1090 clearup_frame(env, irn, &env->obst);
1092 /* The in array for the new back end return is now ready. */
1093 n = obstack_object_size(&env->obst) / sizeof(in[0]);
1094 in = obstack_finish(&env->obst);
1095 ret = be_new_Return(irg, bl, n, in);
1097 /* Set the constraints for some arguments of the return. */
1098 for(i = 0; i < n; i++) {
1099 const arch_register_t *reg = pmap_get(reg_map, in[i]);
1101 be_set_constr_single_reg(ret, i, reg);
1104 obstack_free(&env->obst, in);
1105 pmap_destroy(reg_map);
1109 obstack_free(&env->obst, args);
1110 be_abi_call_free(call);
1114 * Walker: puts all Alloc(stack_alloc) on a obstack
1116 static void collect_alloca_walker(ir_node *irn, void *data)
1118 be_abi_irg_t *env = data;
1119 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1120 obstack_ptr_grow(&env->obst, irn);
1123 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1125 be_abi_irg_t *env = malloc(sizeof(env[0]));
1130 env->isa = birg->main_env->arch_env->isa;
1131 env->method_type = get_entity_type(get_irg_entity(birg->irg));
1132 env->call = be_abi_call_new();
1133 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1135 env->dce_survivor = new_survive_dce();
1137 env->dbg = firm_dbg_register("firm.be.abi");
1138 env->stack_phis = pset_new_ptr(16);
1139 env->init_sp = dummy = new_r_Unknown(birg->irg, env->isa->sp->reg_class->mode);
1141 obstack_init(&env->obst);
1143 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1144 env->irn_ops.impl = &abi_irn_ops;
1146 /* Lower all call nodes in the IRG. */
1149 /* Process the IRG */
1152 /* reroute the stack origin of the calls to the true stack origin. */
1153 exchange(dummy, env->init_sp);
1155 /* Make some important node pointers survive the dead node elimination. */
1156 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1157 for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs))
1158 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1160 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1165 static void collect_stack_nodes(ir_node *irn, void *data)
1169 switch(be_get_irn_opcode(irn)) {
1173 pset_insert_ptr(s, irn);
1179 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1181 dom_front_info_t *df;
1184 /* We need dominance frontiers for fix up */
1185 df = be_compute_dominance_frontiers(env->birg->irg);
1187 stack_ops = pset_new_ptr_default();
1188 pset_insert_ptr(stack_ops, env->init_sp);
1189 irg_walk_graph(env->birg->irg, collect_stack_nodes, NULL, stack_ops);
1190 be_ssa_constr_set_phis(df, stack_ops, env->stack_phis);
1191 del_pset(stack_ops);
1193 /* free these dominance frontiers */
1194 be_free_dominance_frontiers(df);
1198 * Translates a direction of an IncSP node (either be_stack_dir_against, or ...along)
1199 * into -1 or 1, respectively.
1200 * @param irn The node.
1201 * @return 1, if the direction of the IncSP was along, -1 if against.
1203 static int get_dir(ir_node *irn)
1205 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
1208 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1210 const arch_env_t *aenv = env->birg->main_env->arch_env;
1212 int start_bias = bias;
1213 int omit_fp = env->call->flags.bits.try_omit_fp;
1215 sched_foreach(bl, irn) {
1218 If the node modifies the stack pointer by a constant offset,
1219 record that in the bias.
1221 if(be_is_IncSP(irn)) {
1222 int ofs = be_get_IncSP_offset(irn);
1223 int dir = get_dir(irn);
1225 if(ofs == BE_STACK_FRAME_SIZE) {
1226 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1227 be_set_IncSP_offset(irn, ofs);
1235 Else check, if the node relates to an entity on the stack frame.
1236 If so, set the true offset (including the bias) for that
1240 entity *ent = arch_get_frame_entity(aenv, irn);
1242 int offset = get_stack_entity_offset(env->frame, ent, bias);
1243 arch_set_frame_offset(aenv, irn, offset);
1244 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1253 * A helper struct for the bias walker.
1256 be_abi_irg_t *env; /**< The ABI irg environment. */
1257 int start_block_bias; /**< The bias at the end of the start block. */
1260 static void stack_bias_walker(ir_node *bl, void *data)
1262 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1263 struct bias_walk *bw = data;
1264 process_stack_bias(bw->env, bl, bw->start_block_bias);
1268 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1270 ir_graph *irg = env->birg->irg;
1271 struct bias_walk bw;
1273 stack_frame_compute_initial_offset(env->frame);
1274 stack_frame_dump(stdout, env->frame);
1276 /* Determine the stack bias at the and of the start block. */
1277 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1279 /* fix the bias is all other blocks */
1281 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1284 void be_abi_free(be_abi_irg_t *env)
1286 free_survive_dce(env->dce_survivor);
1287 del_pset(env->stack_phis);
1288 pmap_destroy(env->regs);
1289 obstack_free(&env->obst, NULL);
1290 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1294 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1296 assert(arch_register_type_is(reg, callee_save));
1297 assert(pmap_contains(abi->regs, (void *) reg));
1298 return pmap_get(abi->regs, (void *) reg);
1302 _____ _____ _ _ _ _ _ _
1303 |_ _| __ \| \ | | | | | | | | |
1304 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1305 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1306 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1307 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1309 for Phi nodes which are created due to stack modifying nodes
1310 such as IncSP, AddSP and SetSP.
1312 These Phis are always to be ignored by the reg alloc and are
1313 fixed on the SP register of the ISA.
1316 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1318 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1319 return is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn) != NULL ? &abi->irn_ops : NULL;
1322 static void be_abi_limited(void *data, bitset_t *bs)
1324 be_abi_irg_t *abi = data;
1325 bitset_clear_all(bs);
1326 bitset_set(bs, abi->isa->sp->index);
1329 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1331 be_abi_irg_t *abi = get_abi_from_ops(self);
1332 const arch_register_t *reg = abi->isa->sp;
1334 req->cls = reg->reg_class;
1335 req->type = arch_register_req_type_limited;
1336 req->limited = be_abi_limited;
1337 req->limited_env = abi;
1341 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1345 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1347 const be_abi_irg_t *abi = get_abi_from_ops(self);
1348 return abi->isa->sp;
1351 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1353 return arch_irn_class_normal;
1356 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1358 return arch_irn_flags_ignore;
1361 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1366 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1370 static const arch_irn_ops_if_t abi_irn_ops = {
1371 abi_get_irn_reg_req,
1376 abi_get_frame_entity,
1380 static const arch_irn_handler_t abi_irn_handler = {