14 #include "survive_dce.h"
16 #include "irgraph_t.h"
19 #include "iredges_t.h"
22 #include "irprintf_t.h"
28 #include "besched_t.h"
30 #define MAX(x, y) ((x) > (y) ? (x) : (y))
31 #define MIN(x, y) ((x) < (y) ? (x) : (y))
33 typedef struct _be_abi_call_arg_t {
38 const arch_register_t *reg;
42 struct _be_abi_call_t {
43 be_abi_call_flags_t flags;
48 #define N_FRAME_TYPES 3
50 typedef struct _be_stack_frame_t {
55 type *order[N_FRAME_TYPES]; /**< arg, between and frame types ordered. */
61 struct _be_stack_slot_t {
62 struct _be_stack_frame_t *frame;
66 struct _be_abi_irg_t {
68 firm_dbg_module_t *dbg; /**< The debugging module. */
69 be_stack_frame_t *frame;
71 const arch_isa_t *isa; /**< The isa. */
72 survive_dce_t *dce_survivor;
77 ir_node *init_sp; /**< The node representing the stack pointer
78 at the start of the function. */
85 arch_irn_handler_t irn_handler;
86 arch_irn_ops_t irn_ops;
89 #define abi_offset_of(type,member) ((char *) &(((type *) 0)->member) - (char *) 0)
90 #define abi_get_relative(ptr, member) ((void *) ((char *) (ptr) - abi_offset_of(be_abi_irg_t, member)))
91 #define get_abi_from_handler(ptr) abi_get_relative(ptr, irn_handler)
92 #define get_abi_from_ops(ptr) abi_get_relative(ptr, irn_ops)
94 /* Forward, since be need it in be_abi_introduce(). */
95 static const arch_irn_ops_if_t abi_irn_ops;
96 static const arch_irn_handler_t abi_irn_handler;
99 _ ____ ___ ____ _ _ _ _
100 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
101 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
102 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
103 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
105 These callbacks are used by the backend to set the parameters
106 for a specific call type.
109 static int cmp_call_arg(const void *a, const void *b, size_t n)
111 const be_abi_call_arg_t *p = a, *q = b;
112 return !(p->is_res == q->is_res && p->pos == q->pos);
115 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
117 be_abi_call_arg_t arg;
123 hash = is_res * 100 + pos;
126 ? set_insert(call->params, &arg, sizeof(arg), hash)
127 : set_find(call->params, &arg, sizeof(arg), hash);
130 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
132 return get_or_set_call_arg(call, is_res, pos, 0);
135 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, ir_type *between_type)
138 call->between_type = between_type;
141 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
143 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
146 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
148 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
153 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
155 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
160 be_abi_call_t *be_abi_call_new(void)
162 be_abi_call_t *call = malloc(sizeof(call[0]));
164 call->params = new_set(cmp_call_arg, 16);
168 void be_abi_call_free(be_abi_call_t *call)
170 del_set(call->params);
176 | ___| __ __ _ _ __ ___ ___ | | | | __ _ _ __ __| | (_)_ __ __ _
177 | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
178 | _|| | | (_| | | | | | | __/ | _ | (_| | | | | (_| | | | | | | (_| |
179 |_| |_| \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
182 Handling of the stack frame. It is composed of three types:
183 1) The type of the arguments which are pushed on the stack.
184 2) The "between type" which consists of stuff the call of the
185 function pushes on the stack (like the return address and
186 the old base pointer for ia32).
187 3) The Firm frame type which consists of all local variables
191 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
193 type *t = get_entity_owner(ent);
194 int ofs = get_entity_offset_bytes(ent);
198 /* Find the type the entity is contained in. */
199 for(index = 0; index < N_FRAME_TYPES; ++index) {
200 if(frame->order[index] == t)
204 /* Add the size of all the types below the one of the entity to the entity's offset */
205 for(i = 0; i < index; ++i)
206 ofs += get_type_size_bytes(frame->order[i]);
208 /* correct the offset by the initial position of the frame pointer */
209 ofs -= frame->initial_offset;
211 /* correct the offset with the current bias. */
217 static entity *search_ent_with_offset(type *t, int offset)
221 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
222 entity *ent = get_class_member(t, i);
223 if(get_entity_offset_bytes(ent) == offset)
230 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame)
232 type *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
233 entity *ent = search_ent_with_offset(base, 0);
234 frame->initial_offset = 0;
235 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
236 return frame->initial_offset;
239 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
241 frame->arg_type = args;
242 frame->between_type = between;
243 frame->frame_type = locals;
244 frame->initial_offset = 0;
245 frame->stack_dir = stack_dir;
246 frame->order[1] = between;
249 frame->order[0] = args;
250 frame->order[2] = locals;
254 frame->order[0] = locals;
255 frame->order[2] = args;
261 static void stack_frame_dump(FILE *file, be_stack_frame_t *frame)
265 ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
266 for(j = 0; j < N_FRAME_TYPES; ++j) {
267 type *t = frame->order[j];
269 ir_fprintf(file, "type %d: %Fm size: %d\n", j, t, get_type_size_bytes(t));
270 for(i = 0, n = get_class_n_members(t); i < n; ++i) {
271 entity *ent = get_class_member(t, i);
272 ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
278 * If irn is a Sel node computing the address of an entity
279 * on the frame type return the entity, else NULL.
281 static INLINE entity *get_sel_ent(ir_node *irn)
283 if(get_irn_opcode(irn) == iro_Sel
284 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
286 return get_Sel_entity(irn);
293 * Walker: Replaces Loads, Stores and Sels of frame type entities
294 * by FrameLoad, FrameStore and FrameAdress.
296 static void lower_frame_sels_walker(ir_node *irn, void *data)
298 const arch_register_class_t *cls;
299 be_abi_irg_t *env = data;
300 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
301 ir_graph *irg = get_irn_irg(irn);
302 ir_node *frame = get_irg_frame(irg);
304 opcode opc = get_irn_opcode(irn);
306 if(opc == iro_Load) {
307 ir_node *bl = get_nodes_block(irn);
308 ir_node *sel = get_Load_ptr(irn);
309 entity *ent = get_sel_ent(sel);
310 cls = arch_isa_get_reg_class_for_mode(isa, get_Load_mode(irn));
312 nw = be_new_FrameLoad(isa->sp->reg_class, cls, irg, bl, get_Load_mem(irn), frame, ent);
315 else if(opc == iro_Store) {
316 ir_node *bl = get_nodes_block(irn);
317 ir_node *val = get_Store_value(irn);
318 ir_node *sel = get_Store_ptr(irn);
319 entity *ent = get_sel_ent(sel);
320 cls = arch_isa_get_reg_class_for_mode(isa, get_irn_mode(val));
322 nw = be_new_FrameStore(isa->sp->reg_class, cls, irg, bl, get_Store_mem(irn), frame, val, ent);
326 entity *ent = get_sel_ent(irn);
328 ir_node *bl = get_nodes_block(irn);
329 nw = be_new_FrameAddr(isa->sp->reg_class, irg, bl, frame, ent);
337 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
339 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
340 return arg && !arg->in_reg;
344 * Transform a call node.
345 * @param env The ABI environment for the current irg.
346 * @param irn THe call node.
348 static void adjust_call(be_abi_irg_t *env, ir_node *irn)
350 ir_graph *irg = env->birg->irg;
351 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
352 be_abi_call_t *call = be_abi_call_new();
353 ir_type *mt = get_Call_type(irn);
354 int n_params = get_method_n_params(mt);
355 ir_node *curr_sp = env->init_sp;
356 ir_node *curr_mem = get_Call_mem(irn);
357 ir_node *bl = get_nodes_block(irn);
358 pset *results = pset_new_ptr(8);
359 pset *caller_save = pset_new_ptr(8);
361 int stack_dir = arch_isa_stack_dir(isa);
362 const arch_register_t *sp = arch_isa_sp(isa);
363 ir_mode *mach_mode = sp->reg_class->mode;
364 struct obstack *obst = &env->obst;
365 ir_node *no_mem = get_irg_no_mem(irg);
367 ir_node *res_proj = NULL;
368 int curr_res_proj = -1;
375 const ir_edge_t *edge;
380 /* Let the isa fill out the abi description for that call node. */
381 arch_isa_get_call_abi(isa, mt, call);
383 /* Insert code to put the stack arguments on the stack. */
384 assert(get_Call_n_params(irn) == n_params);
385 for(i = 0; i < n_params; ++i) {
386 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
389 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
390 obstack_int_grow(obst, i);
394 pos = obstack_finish(obst);
396 /* Collect all arguments which are passed in registers. */
397 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
398 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
399 if(arg && arg->in_reg) {
400 obstack_int_grow(obst, i);
404 low_args = obstack_finish(obst);
406 /* If there are some parameters which shall be passed on the stack. */
409 int do_seq = call->flags.bits.store_args_sequential;
411 /* Reverse list of stack parameters if call arguments are from left to right */
412 if(call->flags.bits.left_to_right) {
413 for(i = 0; i < n_pos / 2; ++i) {
414 int other = n_pos - i - 1;
422 * If the stack is decreasing and we do not want to store sequentially,
423 * we allocate as much space on the stack all parameters need, by
424 * moving the stack pointer along the stack's direction.
426 if(stack_dir < 0 && !do_seq) {
427 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
430 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
431 for(i = 0; i < n_pos; ++i) {
433 ir_node *param = get_Call_param(irn, p);
434 ir_node *addr = curr_sp;
436 type *param_type = get_method_param_type(mt, p);
437 int param_size = get_type_size_bytes(param_type);
439 /* Make the expression to compute the argument's offset. */
441 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
442 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
445 /* Insert a store for primitive arguments. */
446 if(is_atomic_type(param_type)) {
447 mem = new_r_Store(irg, bl, curr_mem, addr, param);
448 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
451 /* Make a memcopy for compound arguments. */
453 assert(mode_is_reference(get_irn_mode(param)));
454 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
455 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
458 obstack_ptr_grow(obst, mem);
460 curr_ofs += param_size;
463 * If we wanted to build the arguments sequentially,
464 * the stack pointer for the next must be incremented,
465 * and the memory value propagated.
469 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
474 in = (ir_node **) obstack_finish(obst);
476 /* We need the sync only, if we didn't build the stores sequentially. */
478 curr_mem = new_r_Sync(irg, bl, n_pos, in);
479 obstack_free(obst, in);
482 /* Collect caller save registers */
483 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
485 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
486 for(j = 0; j < cls->n_regs; ++j) {
487 const arch_register_t *reg = arch_register_for_index(cls, j);
488 if(arch_register_type_is(reg, caller_save))
489 pset_insert_ptr(caller_save, (void *) reg);
493 /* search the greatest result proj number */
494 foreach_out_edge(irn, edge) {
495 const ir_edge_t *res_edge;
496 ir_node *irn = get_edge_src_irn(edge);
498 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
500 foreach_out_edge(irn, res_edge) {
502 be_abi_call_arg_t *arg;
503 ir_node *res = get_edge_src_irn(res_edge);
505 assert(is_Proj(res));
506 proj = get_Proj_proj(res);
507 arg = get_call_arg(call, 1, proj);
508 if(proj > curr_res_proj)
509 curr_res_proj = proj;
511 pset_remove_ptr(caller_save, arg->reg);
517 /* make the back end call node and set its register requirements. */
518 for(i = 0; i < n_low_args; ++i)
519 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
521 in = obstack_finish(obst);
522 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
523 obstack_free(obst, in);
524 exchange(irn, low_call);
526 /* Make additional projs for the caller save registers
527 and the Keep node which keeps them alive. */
528 if(pset_count(caller_save) > 0) {
529 const arch_register_t *reg;
534 res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
536 for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
537 ir_node *proj = new_r_Proj(irg, bl, res_proj, reg->reg_class->mode, curr_res_proj++);
539 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
540 set_irn_link(proj, (void *) reg);
541 obstack_ptr_grow(obst, proj);
544 in = (ir_node **) obstack_finish(obst);
545 keep = be_new_Keep(NULL, irg, bl, n, in);
546 for(i = 0; i < n; ++i) {
547 const arch_register_t *reg = get_irn_link(in[i]);
548 be_node_set_reg_class(keep, i, reg->reg_class);
550 obstack_free(obst, in);
553 /* Clean up the stack. */
555 ir_node *last_inc_sp;
557 /* Get the result ProjT */
559 res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
561 /* Make a Proj for the stack pointer. */
562 sp_proj = new_r_Proj(irg, bl, res_proj, sp->reg_class->mode, curr_res_proj++);
563 last_inc_sp = be_new_IncSP(sp, irg, bl, sp_proj, no_mem, stack_size, be_stack_dir_against);
566 be_abi_call_free(call);
567 obstack_free(obst, pos);
569 del_pset(caller_save);
572 static void adjust_call_walker(ir_node *irn, void *data)
574 if(get_irn_opcode(irn) == iro_Call)
575 adjust_call(data, irn);
579 * Walker to implement alloca-style allocations.
580 * They are implemented using an add to the stack pointer
581 * and a copy instruction.
583 static void implement_stack_alloc(be_abi_irg_t *env, ir_node *irn)
585 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
586 ir_node *bl = get_nodes_block(irn);
587 ir_node *res = env->init_sp;
590 assert(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc);
592 size = get_Alloc_size(irn);
593 if(isa->stack_dir > 0)
594 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
596 res = be_new_AddSP(isa->sp, env->birg->irg, bl, res, size);
598 if(isa->stack_dir < 0)
599 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
603 static void collect_return_walker(ir_node *irn, void *data)
605 if(get_irn_opcode(irn) == iro_Return) {
606 struct obstack *obst = data;
607 obstack_ptr_grow(obst, irn);
611 static ir_node *setup_frame(be_abi_irg_t *env)
613 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
614 const arch_register_t *sp = isa->sp;
615 const arch_register_t *bp = isa->bp;
616 be_abi_call_flags_bits_t flags = env->call->flags.bits;
617 ir_graph *irg = env->birg->irg;
618 ir_node *bl = get_irg_start_block(irg);
619 ir_node *no_mem = get_irg_no_mem(irg);
620 ir_node *old_frame = get_irg_frame(irg);
621 ir_node *stack = pmap_get(env->regs, (void *) sp);
622 ir_node *frame = pmap_get(env->regs, (void *) bp);
624 int stack_nr = get_Proj_proj(stack);
626 if(flags.try_omit_fp) {
627 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
632 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
634 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
636 be_set_constr_single_reg(frame, -1, bp);
637 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
638 arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
641 stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_along);
644 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
645 env->init_sp = stack;
646 set_irg_frame(irg, frame);
647 edges_reroute(old_frame, frame, irg);
652 static void clearup_frame(be_abi_irg_t *env, ir_node *ret, struct obstack *obst)
654 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
655 const arch_register_t *sp = isa->sp;
656 const arch_register_t *bp = isa->bp;
657 ir_graph *irg = env->birg->irg;
658 ir_node *no_mem = get_irg_no_mem(irg);
659 ir_node *frame = get_irg_frame(irg);
660 ir_node *stack = env->init_sp;
661 ir_node *bl = get_nodes_block(ret);
665 if(env->call->flags.bits.try_omit_fp) {
666 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
670 stack = be_new_SetSP(sp, irg, bl, stack, frame, get_Return_mem(ret));
671 be_set_constr_single_reg(stack, -1, sp);
672 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
675 pmap_foreach(env->regs, ent) {
676 const arch_register_t *reg = ent->key;
677 ir_node *irn = ent->value;
684 obstack_ptr_grow(obst, irn);
688 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
690 int dir = env->call->flags.bits.left_to_right ? 1 : -1;
691 int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
692 int n = get_method_n_params(method_type);
693 int curr = inc > 0 ? 0 : n - 1;
700 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
701 res = new_type_class(new_id_from_str(buf));
703 for(i = 0; i < n; ++i, curr += inc) {
704 type *param_type = get_method_param_type(method_type, curr);
705 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
708 snprintf(buf, sizeof(buf), "param_%d", i);
709 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
710 set_entity_offset_bytes(arg->stack_ent, ofs);
711 ofs += get_type_size_bytes(param_type);
715 set_type_size_bytes(res, ofs);
719 static type *get_bp_type(const arch_register_t *bp)
721 static type *bp_type = NULL;
723 bp_type = new_type_primitive(new_id_from_str("bp_type"), bp->reg_class->mode);
724 set_type_size_bytes(bp_type, get_mode_size_bytes(bp->reg_class->mode));
730 * Modify the irg itself and the frame type.
732 static void modify_irg(be_abi_irg_t *env)
734 firm_dbg_module_t *dbg = env->dbg;
735 be_abi_call_t *call = be_abi_call_new();
736 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
737 const arch_register_t *sp = arch_isa_sp(isa);
738 ir_graph *irg = env->birg->irg;
739 ir_node *bl = get_irg_start_block(irg);
740 ir_node *end = get_irg_end_block(irg);
741 ir_node *arg_tuple = get_irg_args(irg);
742 ir_node *no_mem = get_irg_no_mem(irg);
743 type *method_type = get_entity_type(get_irg_entity(irg));
744 int n_params = get_method_n_params(method_type);
746 int reg_params_nr = 0;
751 ir_node *frame_pointer;
752 ir_node *reg_params_bl;
753 ir_node **args, **args_repl;
754 const ir_edge_t *edge;
759 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
761 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
762 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
764 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
765 env->regs = pmap_create();
767 /* Find the maximum proj number of the argument tuple proj */
768 foreach_out_edge(arg_tuple, edge) {
769 ir_node *irn = get_edge_src_irn(edge);
770 int nr = get_Proj_proj(irn);
771 max_arg = MAX(max_arg, nr);
774 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
775 args_repl = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
776 memset(args, 0, max_arg * sizeof(args[0]));
777 memset(args_repl, 0, max_arg * sizeof(args[0]));
779 /* Fill the argument vector */
780 foreach_out_edge(arg_tuple, edge) {
781 ir_node *irn = get_edge_src_irn(edge);
782 int nr = get_Proj_proj(irn);
784 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
787 /* Get the ABI constraints from the ISA */
788 arch_isa_get_call_abi(isa, method_type, call);
790 arg_type = compute_arg_type(env, call, method_type);
791 stack_frame_init(env->frame, arg_type, call->between_type, get_irg_frame_type(irg), isa->stack_dir);
793 /* Count the register params and add them to the number of Projs for the RegParams node */
794 for(i = 0; i < n_params; ++i) {
795 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
797 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
798 pmap_insert(env->regs, (void *) arg->reg, NULL);
799 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
803 /* Collect all callee-save registers */
804 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
805 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
806 for(j = 0; j < cls->n_regs; ++j) {
807 const arch_register_t *reg = &cls->regs[j];
808 if(arch_register_type_is(reg, callee_save))
809 pmap_insert(env->regs, (void *) reg, NULL);
813 pmap_insert(env->regs, (void *) sp, NULL);
814 pmap_insert(env->regs, (void *) isa->bp, NULL);
816 reg_params_bl = get_irg_start_block(irg);
817 env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
820 * make proj nodes for the callee save registers.
821 * memorize them, since Return nodes get those as inputs.
823 for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs)) {
824 arch_register_t *reg = ent->key;
825 int pos = -(reg_params_nr + 1);
826 ent->value = new_r_Proj(irg, reg_params_bl, env->reg_params, reg->reg_class->mode, reg_params_nr);
827 be_set_constr_single_reg(env->reg_params, pos, reg);
828 arch_set_irn_register(env->birg->main_env->arch_env, ent->value, reg);
831 * If the register is an ignore register,
832 * The Proj for that register shall also be ignored during register allocation.
834 if(arch_register_type_is(reg, ignore))
835 be_node_set_flags(env->reg_params, pos, arch_irn_flags_ignore);
839 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", reg_params_nr - 1, reg->name));
842 /* Insert the code to set up the stack frame */
843 frame_pointer = setup_frame(env);
845 /* Now, introduce stack param nodes for all parameters passed on the stack */
846 for(i = 0; i < max_arg; ++i) {
847 ir_node *arg_proj = args[i];
848 if(arg_proj != NULL) {
849 be_abi_call_arg_t *arg;
851 int nr = get_Proj_proj(arg_proj);
853 nr = MIN(nr, n_params);
854 arg = get_call_arg(call, 0, nr);
855 param_type = get_method_param_type(method_type, nr);
858 args_repl[i] = new_r_Proj(irg, reg_params_bl, env->reg_params, get_irn_mode(arg_proj), reg_params_nr);
859 be_set_constr_single_reg(env->reg_params, -(reg_params_nr + 1), arg->reg);
863 /* when the (stack) parameter is primitive, we insert a StackParam
864 node representing the load of that parameter */
867 /* For atomic parameters which are actually used, we create a StackParam node. */
868 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
869 ir_mode *mode = get_type_mode(param_type);
870 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
871 args_repl[i] = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
874 /* The stack parameter is not primitive (it is a struct or array),
875 we thus will create a node representing the parameter's address
878 args_repl[i] = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
884 /* reroute the edges from the original argument projs to the RegParam ones. */
885 for(i = 0; i < max_arg; ++i) {
886 if(args[i] != NULL) {
887 assert(args_repl[i] != NULL);
888 edges_reroute(args[i], args_repl[i], irg);
892 /* All Return nodes hang on the End node, so look for them there. */
893 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
894 ir_node *irn = get_irn_n(end, i);
896 if(get_irn_opcode(irn) == iro_Return) {
897 ir_node *bl = get_nodes_block(irn);
898 int n_res = get_Return_n_ress(irn);
899 pmap *reg_map = pmap_create_ex(n_res);
904 /* collect all arguments of the return */
905 obstack_ptr_grow(&env->obst, get_Return_mem(irn));
906 for(i = 0; i < n_res; ++i) {
907 ir_node *res = get_Return_res(irn, i);
908 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
910 assert(arg->in_reg && "return value must be passed in register");
911 pmap_insert(reg_map, res, (void *) arg->reg);
912 obstack_ptr_grow(&env->obst, res);
915 /* generate the clean up code and add additional parameters to the return. */
916 clearup_frame(env, irn, &env->obst);
918 /* The in array for the new back end return is now ready. */
919 n = obstack_object_size(&env->obst) / sizeof(in[0]);
920 in = obstack_finish(&env->obst);
921 ret = be_new_Return(irg, bl, n, in);
923 /* Set the constraints for some arguments of the return. */
924 for(i = 0; i < n; i++) {
925 const arch_register_t *reg = pmap_get(reg_map, in[i]);
927 be_set_constr_single_reg(ret, i, reg);
930 obstack_free(&env->obst, in);
931 pmap_destroy(reg_map);
935 obstack_free(&env->obst, args);
936 be_abi_call_free(call);
940 * Walker: puts all Alloc(stack_alloc) on a obstack
942 static void collect_alloca_walker(ir_node *irn, void *data)
944 be_abi_irg_t *env = data;
945 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
946 obstack_ptr_grow(&env->obst, irn);
949 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
951 be_abi_irg_t *env = malloc(sizeof(env[0]));
954 ir_node **stack_allocs;
957 env->isa = birg->main_env->arch_env->isa;
958 env->method_type = get_entity_type(get_irg_entity(birg->irg));
959 env->call = be_abi_call_new();
960 arch_isa_get_call_abi(env->isa, env->method_type, env->call);
962 env->dce_survivor = new_survive_dce();
964 env->dbg = firm_dbg_register("firm.be.abi");
965 env->stack_phis = pset_new_ptr_default();
966 obstack_init(&env->obst);
968 memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
969 env->irn_ops.impl = &abi_irn_ops;
971 /* search for stack allocation nodes and record them */
972 irg_walk_graph(env->birg->irg, collect_alloca_walker, NULL, env);
973 obstack_ptr_grow(&env->obst, NULL);
974 stack_allocs = obstack_finish(&env->obst);
976 /* If there are stack allocations in the irg, we need a frame pointer */
977 if(stack_allocs[0] != NULL)
978 env->call->flags.bits.try_omit_fp = 0;
982 /* Make some important node pointers survive the dead node elimination. */
983 survive_dce_register_irn(env->dce_survivor, &env->init_sp);
984 for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs))
985 survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
987 /* Fix the alloca-style allocations */
988 for(i = 0; stack_allocs[i] != NULL; ++i)
989 implement_stack_alloc(env, stack_allocs[i]);
991 /* Lower all call nodes in the IRG. */
992 irg_walk_graph(env->birg->irg, NULL, adjust_call_walker, env);
994 arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
999 static void collect_stack_nodes(ir_node *irn, void *data)
1003 switch(be_get_irn_opcode(irn)) {
1007 pset_insert_ptr(s, irn);
1013 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
1015 dom_front_info_t *df;
1018 /* We need dominance frontiers for fix up */
1019 df = be_compute_dominance_frontiers(env->birg->irg);
1021 stack_ops = pset_new_ptr_default();
1022 pset_insert_ptr(stack_ops, env->init_sp);
1023 irg_walk_graph(env->birg->irg, collect_stack_nodes, NULL, stack_ops);
1024 be_ssa_constr_set_phis(df, stack_ops, env->stack_phis);
1025 del_pset(stack_ops);
1027 /* free these dominance frontiers */
1028 be_free_dominance_frontiers(df);
1032 * Translates a direction of an IncSP node (either be_stack_dir_against, or ...along)
1033 * into -1 or 1, respectively.
1034 * @param irn The node.
1035 * @return 1, if the direction of the IncSP was along, -1 if against.
1037 static int get_dir(ir_node *irn)
1039 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
1042 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
1044 const arch_env_t *aenv = env->birg->main_env->arch_env;
1046 int start_bias = bias;
1047 int omit_fp = env->call->flags.bits.try_omit_fp;
1049 sched_foreach(bl, irn) {
1052 If the node modifies the stack pointer by a constant offset,
1053 record that in the bias.
1055 if(be_is_IncSP(irn)) {
1056 int ofs = be_get_IncSP_offset(irn);
1057 int dir = get_dir(irn);
1059 if(ofs == BE_STACK_FRAME_SIZE) {
1060 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
1061 be_set_IncSP_offset(irn, ofs);
1069 Else check, if the node relates to an entity on the stack frame.
1070 If so, set the true offset (including the bias) for that
1074 entity *ent = arch_get_frame_entity(aenv, irn);
1076 int offset = get_stack_entity_offset(env->frame, ent, bias);
1077 arch_set_frame_offset(aenv, irn, offset);
1078 DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
1087 * A helper struct for the bias walker.
1090 be_abi_irg_t *env; /**< The ABI irg environment. */
1091 int start_block_bias; /**< The bias at the end of the start block. */
1094 static void stack_bias_walker(ir_node *bl, void *data)
1096 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1097 struct bias_walk *bw = data;
1098 process_stack_bias(bw->env, bl, bw->start_block_bias);
1102 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1104 ir_graph *irg = env->birg->irg;
1105 struct bias_walk bw;
1107 stack_frame_compute_initial_offset(env->frame);
1108 stack_frame_dump(stdout, env->frame);
1110 /* Determine the stack bias at the and of the start block. */
1111 bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1113 /* fix the bias is all other blocks */
1115 irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
1118 void be_abi_free(be_abi_irg_t *env)
1120 free_survive_dce(env->dce_survivor);
1121 del_pset(env->stack_phis);
1122 pmap_destroy(env->regs);
1123 obstack_free(&env->obst, NULL);
1127 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1129 assert(arch_register_type_is(reg, callee_save));
1130 assert(pmap_contains(abi->regs, (void *) reg));
1131 return pmap_get(abi->regs, (void *) reg);
1135 _____ _____ _ _ _ _ _ _
1136 |_ _| __ \| \ | | | | | | | | |
1137 | | | |__) | \| | | |__| | __ _ _ __ __| | | ___ _ __
1138 | | | _ /| . ` | | __ |/ _` | '_ \ / _` | |/ _ \ '__|
1139 _| |_| | \ \| |\ | | | | | (_| | | | | (_| | | __/ |
1140 |_____|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1142 for Phi nodes which are created due to stack modifying nodes
1143 such as IncSP, AddSP and SetSP.
1145 These Phis are always to be ignored by the reg alloc and are
1146 fixed on the SP register of the ISA.
1149 static const arch_irn_ops_t *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1151 const be_abi_irg_t *abi = get_abi_from_handler(handler);
1152 return is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn) != NULL ? &abi->irn_ops : NULL;
1155 static void be_abi_limited(bitset_t *bs, void *data)
1157 be_abi_irg_t *abi = data;
1158 bitset_clear_all(bs);
1159 bitset_set(bs, abi->isa->sp->index);
1162 static const arch_register_req_t *abi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1164 be_abi_irg_t *abi = get_abi_from_ops(self);
1165 const arch_register_t *reg = abi->isa->sp;
1167 req->cls = reg->reg_class;
1168 req->type = arch_register_req_type_limited;
1169 req->limited = be_abi_limited;
1170 req->limited_env = abi;
1174 static void abi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1178 static const arch_register_t *abi_get_irn_reg(const void *self, const ir_node *irn)
1180 const be_abi_irg_t *abi = get_abi_from_ops(self);
1181 return abi->isa->sp;
1184 static arch_irn_class_t abi_classify(const void *_self, const ir_node *irn)
1186 return arch_irn_class_normal;
1189 static arch_irn_flags_t abi_get_flags(const void *_self, const ir_node *irn)
1191 return arch_irn_flags_ignore;
1194 static entity *abi_get_frame_entity(const void *_self, const ir_node *irn)
1199 static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
1203 static const arch_irn_ops_if_t abi_irn_ops = {
1204 abi_get_irn_reg_req,
1209 abi_get_frame_entity,
1213 static const arch_irn_handler_t abi_irn_handler = {