4 * @author Sebastian Hack
16 #include "irgraph_t.h"
19 #include "iredges_t.h"
22 #include "irprintf_t.h"
29 #include "besched_t.h"
31 #define MAX(x, y) ((x) > (y) ? (x) : (y))
32 #define MIN(x, y) ((x) < (y) ? (x) : (y))
34 typedef struct _be_abi_call_arg_t {
37 unsigned on_stack : 1;
40 const arch_register_t *reg;
44 struct _be_abi_call_t {
45 be_abi_call_flags_t flags;
46 const be_abi_callbacks_t *cb;
51 #define N_FRAME_TYPES 3
53 typedef struct _be_stack_frame_t {
58 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
64 struct _be_stack_slot_t {
65 struct _be_stack_frame_t *frame;
69 struct _be_abi_irg_t {
71 firm_dbg_module_t *dbg; /**< The debugging module. */
72 be_stack_frame_t *frame; /**< The stack frame model. */
73 const be_irg_t *birg; /**< The back end IRG. */
74 const arch_isa_t *isa; /**< The isa. */
75 survive_dce_t *dce_survivor;
77 be_abi_call_t *call; /**< The ABI call information. */
78 type *method_type; /**< The type of the method of the IRG. */
80 ir_node *init_sp; /**< The node representing the stack pointer
81 at the start of the function. */
83 ir_node *reg_params; /**< The reg params node. */
84 pmap *regs; /**< A map of all callee-save and ignore regs to
85 their Projs to the RegParams node. */
87 pset *stack_phis; /**< The set of all Phi nodes inserted due to
88 stack pointer modifying nodes. */
90 int start_block_bias; /**< The stack bias at the end of the start block. */
92 void *cb; /**< ABI Callback self pointer. */
94 arch_irn_handler_t irn_handler;
95 arch_irn_ops_t irn_ops;
98 #define abi_offset_of(type,member) ((char *) &(((type *) 0)->member) - (char *) 0)
99 #define abi_get_relative(ptr, member) ((void *) ((char *) (ptr) - abi_offset_of(be_abi_irg_t, member)))
100 #define get_abi_from_handler(ptr) abi_get_relative(ptr, irn_handler)
101 #define get_abi_from_ops(ptr) abi_get_relative(ptr, irn_ops)
103 /* Forward, since be need it in be_abi_introduce(). */
104 static const arch_irn_ops_if_t abi_irn_ops;
105 static const arch_irn_handler_t abi_irn_handler;
108 _ ____ ___ ____ _ _ _ _
109 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
110 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
111 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
112 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
114 These callbacks are used by the backend to set the parameters
115 for a specific call type.
118 static int cmp_call_arg(const void *a, const void *b, size_t n)
120 const be_abi_call_arg_t *p = a, *q = b;
121 return !(p->is_res == q->is_res && p->pos == q->pos);
124 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
126 be_abi_call_arg_t arg;
129 memset(&arg, 0, sizeof(arg));
133 hash = is_res * 100 + pos;
136 ? set_insert(call->params, &arg, sizeof(arg), hash)
137 : set_find(call->params, &arg, sizeof(arg), hash);
140 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
142 return get_or_set_call_arg(call, is_res, pos, 0);
145 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
151 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
153 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
157 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
159 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
164 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
166 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
171 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
176 be_abi_call_t *be_abi_call_new(void)
178 be_abi_call_t *call = malloc(sizeof(call[0]));
180 call->params = new_set(cmp_call_arg, 16);
185 void be_abi_call_free(be_abi_call_t *call)
187 del_set(call->params);
193 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
194 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
195 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
196 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
199 Handling of the stack frame. It is composed of three types:
200 1) The type of the arguments which are pushed on the stack.
201 2) The "between type" which consists of stuff the call of the
202 function pushes on the stack (like the return address and
203 the old base pointer for ia32).
204 3) The Firm frame type which consists of all local variables
208 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
210 type *t = get_entity_owner(ent);
211 int ofs = get_entity_offset_bytes(ent);
215 /* Find the type the entity is contained in. */
216 for(index = 0; index < N_FRAME_TYPES; ++index) {
217 if(frame->order[index] == t)
221 /* Add the size of all the types below the one of the entity to the entity's offset */
222 for(i = 0; i < index; ++i)
223 ofs += get_type_size_bytes(frame->order[i]);
225 /* correct the offset by the initial position of the frame pointer */
226 ofs -= frame->initial_offset;
228 /* correct the offset with the current bias. */
234 static entity *search_ent_with_offset(type *t, int offset)
238 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
239 entity *ent = get_class_member(t, i);
240 if(get_entity_offset_bytes(ent) == offset)
247 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
249 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
250 entity *ent = search_ent_with_offset(base, 0);
251 frame->initial_offset = 0;
252 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
253 return frame->initial_offset;
256 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
258 frame->arg_type = args;
259 frame->between_type = between;
260 frame->frame_type = locals;
261 frame->initial_offset = 0;
262 frame->stack_dir = stack_dir;
263 frame->order[1] = between;
266 frame->order[0] = args;
267 frame->order[2] = locals;
271 frame->order[0] = locals;
272 frame->order[2] = args;
278 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
282 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
283 for(j = 0; j < N_FRAME_TYPES; ++j) {
284 type *t = frame->order[j];
286 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
287 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
288 entity *ent = get_class_member(t, i);
289 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
295 * If irn is a Sel node computing the address of an entity
296 * on the frame type return the entity, else NULL.
298 static INLINE entity *get_sel_ent(ir_node *irn)
300 if(get_irn_opcode(irn) == iro_Sel
301 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
303 return get_Sel_entity(irn);
310 * Walker: Replaces Loads, Stores and Sels of frame type entities
311 * by FrameLoad, FrameStore and FrameAdress.
313 static void lower_frame_sels_walker(ir_node *irn, void *data)
316 entity *ent = get_sel_ent(irn);
319 be_abi_irg_t *env = data;
320 ir_node *bl = get_nodes_block(irn);
321 ir_graph *irg = get_irn_irg(bl);
322 ir_node *frame = get_irg_frame(irg);
324 nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
331 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
333 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
334 return arg && !arg->in_reg;
344 Adjustment of the calls inside a graph.
349 * Transform a call node.
350 * @param env The ABI environment for the current irg.
351 * @param irn The call node.
352 * @param curr_sp The stack pointer node to use.
353 * @return The stack pointer after the call.
355 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
357 ir_graph *irg = env->birg->irg;
358 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
359 be_abi_call_t *call = be_abi_call_new();
360 ir_type *mt = get_Call_type(irn);
361 ir_node *call_ptr = get_Call_ptr(irn);
362 int n_params = get_method_n_params(mt);
363 ir_node *curr_mem = get_Call_mem(irn);
364 ir_node *bl = get_nodes_block(irn);
365 pset *results = pset_new_ptr(8);
366 pset *caller_save = pset_new_ptr(8);
368 int stack_dir = arch_isa_stack_dir(isa);
369 const arch_register_t *sp = arch_isa_sp(isa);
370 ir_mode *mach_mode = sp->reg_class->mode;
371 struct obstack *obst = &env->obst;
372 ir_node *no_mem = get_irg_no_mem(irg);
374 ir_node *res_proj = NULL;
375 int curr_res_proj = pn_Call_max;
382 const ir_edge_t *edge;
387 /* Let the isa fill out the abi description for that call node. */
388 arch_isa_get_call_abi(isa, mt, call);
390 /* Insert code to put the stack arguments on the stack. */
391 assert(get_Call_n_params(irn) == n_params);
392 for(i = 0; i < n_params; ++i) {
393 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
396 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
397 obstack_int_grow(obst, i);
401 pos = obstack_finish(obst);
403 /* Collect all arguments which are passed in registers. */
404 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
405 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
406 if(arg && arg->in_reg) {
407 obstack_int_grow(obst, i);
411 low_args = obstack_finish(obst);
413 /* If there are some parameters which shall be passed on the stack. */
416 int do_seq = call->flags.bits.store_args_sequential;
418 /* Reverse list of stack parameters if call arguments are from left to right */
419 if(call->flags.bits.left_to_right) {
420 for(i = 0; i < n_pos / 2; ++i) {
421 int other = n_pos - i - 1;
429 * If the stack is decreasing and we do not want to store sequentially,
430 * we allocate as much space on the stack all parameters need, by
431 * moving the stack pointer along the stack's direction.
433 if(stack_dir < 0 && !do_seq) {
434 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
437 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
438 for(i = 0; i < n_pos; ++i) {
440 ir_node *param = get_Call_param(irn, p);
441 ir_node *addr = curr_sp;
443 type *param_type = get_method_param_type(mt, p);
444 int param_size = get_type_size_bytes(param_type);
446 /* Make the expression to compute the argument's offset. */
448 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
449 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
452 /* Insert a store for primitive arguments. */
453 if(is_atomic_type(param_type)) {
454 mem = new_r_Store(irg, bl, curr_mem, addr, param);
455 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
458 /* Make a memcopy for compound arguments. */
460 assert(mode_is_reference(get_irn_mode(param)));
461 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
462 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
465 obstack_ptr_grow(obst, mem);
467 curr_ofs += param_size;
470 * If we wanted to build the arguments sequentially,
471 * the stack pointer for the next must be incremented,
472 * and the memory value propagated.
476 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
481 in = (ir_node **) obstack_finish(obst);
483 /* We need the sync only, if we didn't build the stores sequentially. */
485 curr_mem = new_r_Sync(irg, bl, n_pos, in);
486 obstack_free(obst, in);
489 /* Collect caller save registers */
490 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
492 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
493 for(j = 0; j < cls->n_regs; ++j) {
494 const arch_register_t *reg = arch_register_for_index(cls, j);
495 if(arch_register_type_is(reg, caller_save))
496 pset_insert_ptr(caller_save, (void *) reg);
500 /* search the greatest result proj number */
501 foreach_out_edge(irn, edge) {
502 const ir_edge_t *res_edge;
503 ir_node *irn = get_edge_src_irn(edge);
505 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
507 foreach_out_edge(irn, res_edge) {
509 be_abi_call_arg_t *arg;
510 ir_node *res = get_edge_src_irn(res_edge);
512 assert(is_Proj(res));
514 proj = get_Proj_proj(res);
515 arg = get_call_arg(call, 1, proj);
518 shift the proj number to the right, since we will drop the
519 unspeakable Proj_T from the Call. Therefore, all real argument
520 Proj numbers must be increased by pn_Call_max
523 set_Proj_proj(res, proj);
524 obstack_ptr_grow(obst, res);
526 if(proj > curr_res_proj)
527 curr_res_proj = proj;
529 pset_remove_ptr(caller_save, arg->reg);
534 obstack_ptr_grow(obst, NULL);
535 res_projs = obstack_finish(obst);
537 /* make the back end call node and set its register requirements. */
538 for(i = 0; i < n_low_args; ++i)
539 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
541 in = obstack_finish(obst);
542 if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
543 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, curr_sp, curr_res_proj, n_low_args, in);
544 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
548 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, call_ptr, curr_res_proj, n_low_args, in);
550 /* Set the register classes and constraints of the Call parameters. */
551 for(i = 0; i < n_low_args; ++i) {
552 int index = low_args[i];
553 be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
554 assert(arg->reg != NULL);
555 be_set_constr_single_reg(low_call, index, arg->reg);
558 /* Set the register constraints of the results. */
559 for(i = 0; res_projs[i]; ++i) {
560 ir_node *irn = res_projs[i];
561 int proj = get_Proj_proj(irn);
563 /* Correct Proj number since it has been adjusted! (see above) */
564 const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
567 be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
569 obstack_free(obst, in);
570 exchange(irn, low_call);
572 /* redirect the result projs to the lowered call instead of the Proj_T */
573 for(i = 0; res_projs[i]; ++i)
574 set_Proj_pred(res_projs[i], low_call);
576 /* Make additional projs for the caller save registers
577 and the Keep node which keeps them alive. */
578 if(pset_count(caller_save) > 0) {
579 const arch_register_t *reg;
583 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
584 ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj++);
586 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
587 set_irn_link(proj, (void *) reg);
588 obstack_ptr_grow(obst, proj);
591 in = (ir_node **) obstack_finish(obst);
592 keep = be_new_Keep(NULL, irg, bl, n, in);
593 for(i = 0; i < n; ++i) {
594 const arch_register_t *reg = get_irn_link(in[i]);
595 be_node_set_reg_class(keep, i, reg->reg_class);
597 obstack_free(obst, in);
600 /* Clean up the stack. */
602 ir_node *mem_proj = NULL;
604 foreach_out_edge(low_call, edge) {
605 ir_node *irn = get_edge_src_irn(edge);
606 if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
613 mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
615 /* Make a Proj for the stack pointer. */
616 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_against);
619 be_abi_call_free(call);
620 obstack_free(obst, pos);
622 del_pset(caller_save);
629 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
631 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
633 if(get_Alloc_where(alloc) == stack_alloc) {
634 ir_node *bl = get_nodes_block(alloc);
635 ir_graph *irg = get_irn_irg(bl);
636 ir_node *alloc_mem = NULL;
637 ir_node *alloc_res = NULL;
639 const ir_edge_t *edge;
642 env->call->flags.bits.try_omit_fp = 0;
644 new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
646 foreach_out_edge(alloc, edge) {
647 ir_node *irn = get_edge_src_irn(edge);
649 assert(is_Proj(irn));
650 switch(get_Proj_proj(irn)) {
662 assert(alloc_res != NULL);
663 exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
665 if(alloc_mem != NULL)
666 exchange(alloc_mem, new_r_NoMem(irg));
675 * Walker for dependent_on().
676 * This function searches a node tgt recursively from a given node
677 * but is restricted to the given block.
678 * @return 1 if tgt was reachable from curr, 0 if not.
680 static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
684 if(get_irn_visited(curr) >= visited_nr)
687 set_irn_visited(curr, visited_nr);
688 if(get_nodes_block(curr) != bl)
694 for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
695 if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
703 * Check if a node is somehow data dependent on another one.
704 * both nodes must be in the same basic block.
705 * @param n1 The first node.
706 * @param n2 The second node.
707 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
709 static int dependent_on(ir_node *n1, ir_node *n2)
711 ir_node *bl = get_nodes_block(n1);
712 ir_graph *irg = get_irn_irg(bl);
713 long vis_nr = get_irg_visited(irg) + 1;
715 assert(bl == get_nodes_block(n2));
716 set_irg_visited(irg, vis_nr);
717 return check_dependence(n1, n2, bl, vis_nr);
720 static int cmp_call_dependecy(const void *c1, const void *c2)
722 ir_node *n1 = *(ir_node **) c1;
723 ir_node *n2 = *(ir_node **) c2;
726 Classical qsort() comparison function behavior:
727 0 if both elements are equal
728 1 if second is "smaller" that first
729 -1 if first is "smaller" that second
731 return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
734 static void link_calls_in_block_walker(ir_node *irn, void *data)
737 ir_node *bl = get_nodes_block(irn);
738 void *save = get_irn_link(bl);
740 set_irn_link(irn, save);
741 set_irn_link(bl, irn);
746 * Process all call nodes inside a basic block.
747 * Note that the link field of the block must contain a linked list of all
748 * Call nodes inside the block. We first order this list according to data dependency
749 * and that connect the calls together.
751 static void process_calls_in_block(ir_node *bl, void *data)
753 be_abi_irg_t *env = data;
754 ir_node *curr_sp = env->init_sp;
758 for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
759 obstack_ptr_grow(&env->obst, irn);
761 /* If there were call nodes in the block. */
766 nodes = obstack_finish(&env->obst);
768 /* order the call nodes according to data dependency */
769 qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
771 for(i = n - 1; i >= 0; --i) {
772 ir_node *irn = nodes[i];
774 switch(get_irn_opcode(irn)) {
776 curr_sp = adjust_call(env, irn, curr_sp);
779 curr_sp = adjust_alloc(env, irn, curr_sp);
786 obstack_free(&env->obst, nodes);
788 /* Keep the last stack state in the block by tying it to Keep node */
790 be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
793 set_irn_link(bl, curr_sp);
797 * Adjust all call nodes in the graph to the ABI conventions.
799 static void process_calls(be_abi_irg_t *env)
801 ir_graph *irg = env->birg->irg;
803 irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, NULL);
804 irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
807 static void collect_return_walker(ir_node *irn, void *data)
809 if(get_irn_opcode(irn) == iro_Return) {
810 struct obstack *obst = data;
811 obstack_ptr_grow(obst, irn);
815 static ir_node *setup_frame(be_abi_irg_t *env)
817 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
818 const arch_register_t *sp = isa->sp;
819 const arch_register_t *bp = isa->bp;
820 be_abi_call_flags_bits_t flags = env->call->flags.bits;
821 ir_graph *irg = env->birg->irg;
822 ir_node *bl = get_irg_start_block(irg);
823 ir_node *no_mem = get_irg_no_mem(irg);
824 ir_node *old_frame = get_irg_frame(irg);
825 ir_node *stack = pmap_get(env->regs, (void *) sp);
826 ir_node *frame = pmap_get(env->regs, (void *) bp);
828 int stack_nr = get_Proj_proj(stack);
830 if(flags.try_omit_fp) {
831 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
836 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
838 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
840 be_set_constr_single_reg(frame, -1, bp);
841 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
842 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
845 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_along);
848 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
849 env->init_sp = stack;
850 set_irg_frame(irg, frame);
851 edges_reroute(old_frame, frame, irg);
856 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
858 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
859 const arch_register_t *sp = isa->sp;
860 const arch_register_t *bp = isa->bp;
861 ir_graph *irg = env->birg->irg;
862 ir_node *ret_mem = get_Return_mem(ret);
863 ir_node *frame = get_irg_frame(irg);
864 ir_node *bl = get_nodes_block(ret);
865 ir_node *stack = get_irn_link(bl);
869 if(env->call->flags.bits.try_omit_fp) {
870 stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
874 stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
875 be_set_constr_single_reg(stack, -1, sp);
876 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
879 pmap_foreach(env->regs, ent) {
880 const arch_register_t *reg = ent->key;
881 ir_node *irn = ent->value;
888 obstack_ptr_grow(obst, irn);
892 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
894 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
895 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
896 int n = get_method_n_params(method_type);
897 int curr = inc > 0 ? 0 : n - 1;
904 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
905 res = new_type_class(new_id_from_str(buf));
907 for(i = 0; i < n; ++i, curr += inc) {
908 type *param_type = get_method_param_type(method_type, curr);
909 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
912 snprintf(buf, sizeof(buf), "param_%d", i);
913 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
914 set_entity_offset_bytes(arg->stack_ent, ofs);
915 ofs += get_type_size_bytes(param_type);
919 set_type_size_bytes(res, ofs);
923 static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
930 /* Create a Perm after the RegParams node to delimit it. */
931 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
932 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
937 for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
938 const arch_register_t *reg = &cls->regs[j];
939 ir_node *irn = pmap_get(regs, (void *) reg);
941 if(irn && !arch_register_type_is(reg, ignore)) {
943 obstack_ptr_grow(&obst, irn);
944 set_irn_link(irn, (void *) reg);
948 obstack_ptr_grow(&obst, NULL);
949 in = obstack_finish(&obst);
951 perm = be_new_Perm(cls, irg, bl, n_regs, in);
952 for(j = 0; j < n_regs; ++j) {
953 ir_node *arg = in[j];
954 arch_register_t *reg = get_irn_link(arg);
955 pmap_insert(regs, reg, arg);
956 be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
959 obstack_free(&obst, in);
962 obstack_free(&obst, NULL);
965 static void create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs,
966 ir_node *(*barrier_node_constr)(ir_graph *irg, ir_node *bl, int n, ir_node *in[]))
972 for(ent = pmap_first(regs), n = 0; ent; ent = pmap_next(regs), ++n) {
973 obstack_ptr_grow(&env->obst, ent->value);
977 obstack_ptr_grow(&env->obst, *mem);
981 in = (ir_node **) obstack_finish(&env->obst);
982 irn = barrier_node_constr(env->birg->irg, bl, n, in);
983 obstack_free(&env->obst, in);
985 for(ent = pmap_first(regs), n = 0; ent; ent = pmap_next(regs), ++n) {
986 int pos = BE_OUT_POS(n);
987 const arch_register_t *reg = ent->key;
989 be_set_constr_single_reg(irn, pos, reg);
990 ent->value = new_r_Proj(env->birg->irg, bl, irn, get_irn_mode(ent->value), n);
991 if(arch_register_type_is(reg, ignore)) {
992 be_node_set_flags(irn, pos, arch_irn_flags_ignore);
993 arch_set_irn_register(env->birg->main_env->arch_env, ent->value, reg);
999 * Modify the irg itself and the frame type.
1001 static void modify_irg(be_abi_irg_t *env)
1003 firm_dbg_module_t *dbg = env->dbg;
1004 be_abi_call_t *call = env->call;
1005 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
1006 const arch_register_t *sp = arch_isa_sp(isa);
1007 ir_graph *irg = env->birg->irg;
1008 ir_node *bl = get_irg_start_block(irg);
1009 ir_node *end = get_irg_end_block(irg);
1010 ir_node *arg_tuple = get_irg_args(irg);
1011 ir_node *no_mem = get_irg_no_mem(irg);
1012 type *method_type = get_entity_type(get_irg_entity(irg));
1013 pset *dont_save = pset_new_ptr(8);
1014 pmap *reg_proj_map = pmap_create();
1015 int n_params = get_method_n_params(method_type);
1021 const arch_register_t *fp_reg;
1022 ir_node *frame_pointer;
1023 ir_node *reg_params_bl;
1025 const ir_edge_t *edge;
1026 ir_type *arg_type, *bet_type;
1029 bitset_t *used_proj_nr;
1031 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1033 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
1034 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
1036 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
1037 env->regs = pmap_create();
1039 /* Find the maximum proj number of the argument tuple proj */
1040 foreach_out_edge(arg_tuple, edge) {
1041 ir_node *irn = get_edge_src_irn(edge);
1042 int nr = get_Proj_proj(irn);
1043 max_arg = MAX(max_arg, nr);
1046 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
1047 memset(args, 0, max_arg * sizeof(args[0]));
1048 used_proj_nr = bitset_alloca(1024);
1050 /* Fill the argument vector */
1051 foreach_out_edge(arg_tuple, edge) {
1052 ir_node *irn = get_edge_src_irn(edge);
1053 int nr = get_Proj_proj(irn);
1055 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1058 arg_type = compute_arg_type(env, call, method_type);
1059 bet_type = call->cb->get_between_type(env->cb);
1060 stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir);
1062 /* Count the register params and add them to the number of Projs for the RegParams node */
1063 for(i = 0; i < n_params; ++i) {
1064 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
1066 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1067 assert(i == get_Proj_proj(args[i]));
1069 /* For now, associate the register with the old Proj from Start representing that argument. */
1070 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1071 bitset_set(used_proj_nr, i);
1072 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1076 /* Collect all callee-save registers */
1077 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
1078 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
1079 for(j = 0; j < cls->n_regs; ++j) {
1080 const arch_register_t *reg = &cls->regs[j];
1081 if(arch_register_type_is(reg, callee_save))
1082 pmap_insert(env->regs, (void *) reg, NULL);
1086 pmap_insert(env->regs, (void *) sp, NULL);
1087 pmap_insert(env->regs, (void *) isa->bp, NULL);
1088 reg_params_bl = get_irg_start_block(irg);
1089 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
1092 * make proj nodes for the callee save registers.
1093 * memorize them, since Return nodes get those as inputs.
1095 * Note, that if a register corresponds to an argument, the regs map contains
1096 * the old Proj from start for that argument.
1098 pmap_foreach(env->regs, ent) {
1099 arch_register_t *reg = ent->key;
1100 ir_node *arg_proj = ent->value;
1101 ir_mode *mode = arg_proj ? get_irn_mode(arg_proj) : reg->reg_class->mode;
1102 long nr = arg_proj ? get_Proj_proj(arg_proj) : (long) bitset_next_clear(used_proj_nr, 0);
1103 int pos = BE_OUT_POS((int) nr);
1106 bitset_set(used_proj_nr, nr);
1107 ent->value = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
1108 be_set_constr_single_reg(env->reg_params, pos, reg);
1109 arch_set_irn_register(env->birg->main_env->arch_env, ent->value, reg);
1112 * If the register is an ignore register,
1113 * The Proj for that register shall also be ignored during register allocation.
1115 if(arch_register_type_is(reg, ignore))
1116 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
1118 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1121 /* Generate the Prologue */
1122 fp_reg = call->cb->prologue(env->cb, env->regs);
1123 create_barrier(env, bl, NULL, env->regs, be_new_Epilogue);
1124 frame_pointer = pmap_get(env->regs, (void *) fp_reg);
1125 env->init_sp = pmap_get(env->regs, (void *) sp);
1126 set_irg_frame(irg, frame_pointer);
1128 /* Now, introduce stack param nodes for all parameters passed on the stack */
1129 for(i = 0; i < max_arg; ++i) {
1130 ir_node *arg_proj = args[i];
1131 ir_node *repl = NULL;
1133 if(arg_proj != NULL) {
1134 be_abi_call_arg_t *arg;
1135 ir_type *param_type;
1136 int nr = get_Proj_proj(arg_proj);
1138 nr = MIN(nr, n_params);
1139 arg = get_call_arg(call, 0, nr);
1140 param_type = get_method_param_type(method_type, nr);
1143 repl = pmap_get(env->regs, (void *) arg->reg);
1146 else if(arg->on_stack) {
1147 /* For atomic parameters which are actually used, we create a StackParam node. */
1148 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1149 ir_mode *mode = get_type_mode(param_type);
1150 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
1151 repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
1154 /* The stack parameter is not primitive (it is a struct or array),
1155 we thus will create a node representing the parameter's address
1158 repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
1162 assert(repl != NULL);
1163 exchange(args[i], repl);
1167 /* All Return nodes hang on the End node, so look for them there. */
1168 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
1169 ir_node *irn = get_irn_n(end, i);
1171 if(get_irn_opcode(irn) == iro_Return) {
1172 ir_node *bl = get_nodes_block(irn);
1173 int n_res = get_Return_n_ress(irn);
1174 pmap *reg_map = pmap_create();
1175 ir_node *mem = get_Return_mem(irn);
1180 /* Add the stack pointer as an argument to the epilogue. */
1181 pmap_insert(reg_map, (void *) sp, pmap_get(env->regs, (void *) sp));
1183 /* Insert results for Return into the register map. */
1184 for(i = 0; i < n_res; ++i) {
1185 ir_node *res = get_Return_res(irn, i);
1186 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1187 assert(arg->in_reg && "return value must be passed in register");
1188 pmap_insert(reg_map, (void *) arg->reg, res);
1191 /* Add uses of the callee save registers. */
1192 pmap_foreach(env->regs, ent) {
1193 const arch_register_t *reg = ent->key;
1194 if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
1195 pmap_insert(reg_map, ent->key, ent->value);
1198 /* Make the Epilogue node and call the arch's epilogue maker. */
1199 create_barrier(env, bl, &mem, reg_map, be_new_Epilogue);
1200 call->cb->epilogue(env->cb, bl, &mem, reg_map);
1202 obstack_ptr_grow(&env->obst, mem);
1203 obstack_ptr_grow(&env->obst, pmap_get(reg_map, (void *) sp));
1205 /* clear SP entry, since it has already been grown. */
1206 pmap_insert(reg_map, (void *) sp, NULL);
1207 for(i = 0; i < n_res; ++i) {
1208 ir_node *res = get_Return_res(irn, i);
1209 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
1211 obstack_ptr_grow(&env->obst, pmap_get(reg_map, (void *) arg->reg));
1213 /* Clear the map entry to mark the register as processed. */
1214 pmap_insert(reg_map, (void *) arg->reg, NULL);
1217 /* grow the rest of the stuff. */
1218 pmap_foreach(reg_map, ent) {
1220 obstack_ptr_grow(&env->obst, ent->value);
1223 /* The in array for the new back end return is now ready. */
1224 n = obstack_object_size(&env->obst) / sizeof(in[0]);
1225 in = obstack_finish(&env->obst);
1226 ret = be_new_Return(irg, bl, n, in);
1228 /* Free the space of the Epilog's in array and the register <-> proj map. */
1229 obstack_free(&env->obst, in);
1231 pmap_destroy(reg_map);
1235 obstack_free(&env->obst, args);
1239 * Walker: puts all Alloc(stack_alloc) on a obstack
1241 static void collect_alloca_walker(ir_node *irn, void *data)
1243 be_abi_irg_t *env = data;
1244 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
1245 obstack_ptr_grow(&env->obst, irn);
1248 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
1250 be_abi_irg_t *env = xmalloc(sizeof(env[0]));
1251 ir_node *old_frame = get_irg_frame(birg->irg);
1252 ir_graph *irg = birg->irg;
1257 env->isa = birg->main_env->arch_env->isa;
1258 env->method_type = get_entity_type(get_irg_entity(irg));
1259 env->call = be_abi_call_new();
1260 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
1262 env->dce_survivor = new_survive_dce();
1264 env->dbg = firm_dbg_register("firm.be.abi");
1265 env->stack_phis = pset_new_ptr(16);
1266 env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
1268 env->cb = env->call->cb->init(env->call, env->isa, irg);
1270 obstack_init(&env->obst);
1272 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
1273 env->irn_ops.impl = &abi_irn_ops;
1275 /* Lower all call nodes in the IRG. */
1278 /* Process the IRG */
1281 /* reroute the stack origin of the calls to the true stack origin. */
1282 edges_reroute(dummy, env->init_sp, irg);
1283 edges_reroute(old_frame, get_irg_frame(irg), irg);
1285 /* Make some important node pointers survive the dead node elimination. */
1286 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
1287 pmap_foreach(env->regs, ent)
1288 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
1290 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
1292 env->call->cb->done(env->cb);
1297 void be_abi_free(be_abi_irg_t *env)
1299 free_survive_dce(env->dce_survivor);
1300 del_pset(env->stack_phis);
1301 pmap_destroy(env->regs);
1302 obstack_free(&env->obst, NULL);
1303 arch_env_pop_irn_handler(env->birg->main_env->arch_env);
1311 | ___(_)_ __ / ___|| |_ __ _ ___| | __
1312 | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
1313 | _| | |> < ___) | || (_| | (__| <
1314 |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
1318 static void collect_stack_nodes_walker(ir_node *irn, void *data)
1322 if(be_is_AddSP(irn) || be_is_IncSP(irn) || be_is_SetSP(irn))
1323 pset_insert_ptr(s, irn);
1326 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1328 dom_front_info_t *df;
1331 /* We need dominance frontiers for fix up */
1332 df = be_compute_dominance_frontiers(env->birg->irg);
1333 stack_nodes = pset_new_ptr(16);
1334 pset_insert_ptr(stack_nodes, env->init_sp);
1335 irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
1336 be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
1337 del_pset(stack_nodes);
1339 /* free these dominance frontiers */
1340 be_free_dominance_frontiers(df);
1344 * Translates a direction of an IncSP node (either be_stack_dir_against, or ...along)
1345 * into -1 or 1, respectively.
1346 * @param irn The node.
1347 * @return 1, if the direction of the IncSP was along, -1 if against.
1349 static int get_dir(ir_node *irn)
1351 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
1354 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1356 const arch_env_t *aenv = env->birg->main_env->arch_env;
1358 int start_bias = bias;
1359 int omit_fp = env->call->flags.bits.try_omit_fp;
1361 sched_foreach(bl, irn) {
1364 If the node modifies the stack pointer by a constant offset,
1365 record that in the bias.
1367 if(be_is_IncSP(irn)) {
1368 int ofs = be_get_IncSP_offset(irn);
1369 int dir = get_dir(irn);
1371 if(ofs == BE_STACK_FRAME_SIZE) {
1372 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1373 be_set_IncSP_offset(irn, ofs);
1381 Else check, if the node relates to an entity on the stack frame.
1382 If so, set the true offset (including the bias) for that
1386 entity *ent = arch_get_frame_entity(aenv, irn);
1388 int offset = get_stack_entity_offset(env->frame, ent, bias);
1389 arch_set_frame_offset(aenv, irn, offset);
1390 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1399 * A helper struct for the bias walker.
1402 be_abi_irg_t *env; /**< The ABI irg environment. */
1403 int start_block_bias; /**< The bias at the end of the start block. */
1406 static void stack_bias_walker(ir_node *bl, void *data)
1408 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1409 struct bias_walk *bw = data;
1410 process_stack_bias(bw->env, bl, bw->start_block_bias);
1414 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1416 ir_graph *irg = env->birg->irg;
1417 struct bias_walk bw;
1419 stack_frame_compute_initial_offset(env->frame);
1420 // stack_frame_dump(stdout, env->frame);
1422 /* Determine the stack bias at the and of the start block. */
1423 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1425 /* fix the bias is all other blocks */
1427 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1430 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1432 assert(arch_register_type_is(reg, callee_save));
1433 assert(pmap_contains(abi->regs, (void *) reg));
1434 return pmap_get(abi->regs, (void *) reg);
1438 _____ _____ _ _ _ _ _ _
1439 |_ _| __ \| \ | | | | | | | | |
1440 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1441 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1442 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1443 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1445 for Phi nodes which are created due to stack modifying nodes
1446 such as IncSP, AddSP and SetSP.
1448 These Phis are always to be ignored by the reg alloc and are
1449 fixed on the SP register of the ISA.
1452 static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1454 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1455 return is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn) != NULL ? &abi->irn_ops : NULL;
1458 static void be_abi_limited(void *data, bitset_t *bs)
1460 be_abi_irg_t *abi = data;
1461 bitset_clear_all(bs);
1462 bitset_set(bs, abi->isa->sp->index);
1465 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1467 be_abi_irg_t *abi = get_abi_from_ops(self);
1468 const arch_register_t *reg = abi->isa->sp;
1470 req->cls = reg->reg_class;
1471 req->type = arch_register_req_type_limited;
1472 req->limited = be_abi_limited;
1473 req->limited_env = abi;
1477 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1481 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1483 const be_abi_irg_t *abi = get_abi_from_ops(self);
1484 return abi->isa->sp;
1487 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1489 return arch_irn_class_normal;
1492 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1494 return arch_irn_flags_ignore;
1497 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1502 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1506 static const arch_irn_ops_if_t abi_irn_ops = {
1507 abi_get_irn_reg_req,
1512 abi_get_frame_entity,
1516 static const arch_irn_handler_t abi_irn_handler = {