15 #include "irgraph_t.h"
18 #include "iredges_t.h"
26 #include "besched_t.h"
28 #define MAX(x, y) ((x) > (y) ? (x) : (y))
29 #define MIN(x, y) ((x) < (y) ? (x) : (y))
31 typedef struct _be_abi_call_arg_t {
36 const arch_register_t *reg;
40 struct _be_abi_call_t {
41 be_abi_call_flags_t flags;
46 typedef struct _be_stack_frame_t {
51 type *order[3]; /**< arg, between and frame types ordered. */
57 struct _be_stack_slot_t {
58 struct _be_stack_frame_t *frame;
62 struct _be_abi_irg_t {
68 ir_node *init_sp; /**< The node representing the stack pointer
69 at the start of the function. */
73 pset *stack_ops; /**< Contains all nodes modifying the stack pointer. */
79 unsigned dedicated_fp : 1;
80 unsigned left_to_right : 1;
81 unsigned save_old_fp : 1;
83 ir_node *store_bp_mem;
84 be_stack_frame_t *frame;
86 firm_dbg_module_t *dbg; /**< The debugging module. */
89 static int cmp_call_arg(const void *a, const void *b, size_t n)
91 const be_abi_call_arg_t *p = a, *q = b;
92 return !(p->is_res == q->is_res && p->pos == q->pos);
95 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
97 be_abi_call_arg_t arg;
103 hash = is_res * 100 + pos;
106 ? set_insert(call->params, &arg, sizeof(arg), hash)
107 : set_find(call->params, &arg, sizeof(arg), hash);
110 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
112 return get_or_set_call_arg(call, is_res, pos, 0);
115 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, ir_type *between_type)
118 call->between_type = between_type;
121 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
123 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
126 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
128 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
133 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
135 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
140 be_abi_call_t *be_abi_call_new(void)
142 be_abi_call_t *call = malloc(sizeof(call[0]));
143 call->flags = BE_ABI_NONE;
144 call->params = new_set(cmp_call_arg, 16);
148 void be_abi_call_free(be_abi_call_t *call)
150 del_set(call->params);
154 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
156 type *t = get_entity_type(ent);
157 int ofs = get_entity_offset_bytes(ent);
161 /* Find the type the entity is contained in. */
162 for(index = 0; index < 3; ++index) {
163 if(frame->order[index] == t)
167 /* Add the size of all the types below the one of the entity to the entity's offset */
168 for(i = 0; i < index; ++i)
169 ofs += get_type_size_bytes(frame->order[i]);
171 /* correct the offset by the initial position of the frame pointer */
172 ofs -= frame->initial_offset;
174 /* correct the offset with the current bias. */
180 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame, entity *ent)
182 frame->initial_offset = 0;
183 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
184 return frame->initial_offset;
187 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
189 frame->arg_type = args;
190 frame->between_type = between;
191 frame->frame_type = locals;
192 frame->initial_offset = 0;
193 frame->stack_dir = stack_dir;
194 frame->order[1] = between;
197 frame->order[0] = args;
198 frame->order[2] = locals;
202 frame->order[0] = locals;
203 frame->order[2] = args;
210 * If irn is a Sel node computing the address of an entity
211 * on the frame type return the entity, else NULL.
213 static INLINE entity *get_sel_ent(ir_node *irn)
215 if(get_irn_opcode(irn) == iro_Sel
216 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
218 return get_Sel_entity(irn);
225 * Walker: Replaces Loads, Stores and Sels of frame type entities
226 * by FrameLoad, FrameStore and FrameAdress.
228 static void lower_frame_sels_walker(ir_node *irn, void *data)
230 const arch_register_class_t *cls;
231 be_abi_irg_t *env = data;
232 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
233 ir_graph *irg = get_irn_irg(irn);
234 ir_node *frame = get_irg_frame(irg);
236 opcode opc = get_irn_opcode(irn);
238 if(opc == iro_Load) {
239 ir_node *bl = get_nodes_block(irn);
240 ir_node *sel = get_Load_ptr(irn);
241 entity *ent = get_sel_ent(sel);
242 cls = arch_isa_get_reg_class_for_mode(isa, get_Load_mode(irn));
244 nw = be_new_FrameLoad(isa->sp->reg_class, cls, irg, bl, get_Load_mem(irn), frame, ent);
247 else if(opc == iro_Store) {
248 ir_node *bl = get_nodes_block(irn);
249 ir_node *val = get_Store_value(irn);
250 ir_node *sel = get_Store_ptr(irn);
251 entity *ent = get_sel_ent(sel);
252 cls = arch_isa_get_reg_class_for_mode(isa, get_irn_mode(val));
254 nw = be_new_FrameStore(isa->sp->reg_class, cls, irg, bl, get_Store_mem(irn), frame, val, ent);
258 entity *ent = get_sel_ent(irn);
260 ir_node *bl = get_nodes_block(irn);
261 nw = be_new_FrameAddr(isa->sp->reg_class, irg, bl, frame, ent);
269 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
271 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
272 return arg && !arg->in_reg;
275 static void adjust_call(be_abi_irg_t *env, ir_node *irn)
277 ir_graph *irg = env->birg->irg;
278 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
279 be_abi_call_t *call = be_abi_call_new();
280 ir_type *mt = get_Call_type(irn);
281 int n_params = get_method_n_params(mt);
282 ir_node *curr_sp = get_irg_frame(irg);
283 ir_node *curr_mem = get_Call_mem(irn);
284 ir_node *bl = get_nodes_block(irn);
285 pset *results = pset_new_ptr(8);
286 pset *caller_save = pset_new_ptr(8);
288 int stack_dir = arch_isa_stack_dir(isa);
289 const arch_register_t *sp = arch_isa_sp(isa);
290 ir_mode *mach_mode = sp->reg_class->mode;
291 struct obstack *obst = &env->obst;
292 ir_node *no_mem = get_irg_no_mem(irg);
294 ir_node *res_proj = NULL;
295 int curr_res_proj = -1;
302 const ir_edge_t *edge;
307 /* Let the isa fill out the abi description for that call node. */
308 arch_isa_get_call_abi(isa, mt, call);
310 // assert(get_method_variadicity(mt) == variadicity_non_variadic);
312 /* Insert code to put the stack arguments on the stack. */
314 assert(get_Call_n_params(irn) == n_params);
315 for(i = 0; i < n_params; ++i) {
316 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
319 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
320 obstack_int_grow(obst, i);
324 pos = obstack_finish(obst);
326 /* Collect all arguments which are passed in registers. */
327 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
328 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
329 if(arg && arg->in_reg) {
330 obstack_int_grow(obst, i);
334 low_args = obstack_finish(obst);
336 /* If there are some parameters shich shall be passed on the stack. */
339 int do_seq = (call->flags & BE_ABI_USE_PUSH);
341 /* Reverse list of stack parameters if call arguments are from left to right */
342 if(call->flags & BE_ABI_LEFT_TO_RIGHT) {
343 for(i = 0; i < n_pos / 2; ++i) {
344 int other = n_pos - i - 1;
352 * If the stack is decreasing and we do not want to store sequentially,
353 * we allocate as much space on the stack all parameters need, by
354 * moving the stack pointer along the stack's direction.
356 if(stack_dir < 0 && !do_seq) {
357 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
358 pset_insert_ptr(env->stack_ops, curr_sp);
361 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
362 for(i = 0; i < n_pos; ++i) {
364 ir_node *param = get_Call_param(irn, p);
365 ir_node *addr = curr_sp;
367 type *param_type = get_method_param_type(mt, p);
368 int param_size = get_type_size_bytes(param_type);
370 /* Make the expression to compute the argument's offset. */
372 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
373 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
376 /* Insert a store for primitive arguments. */
377 if(is_atomic_type(param_type)) {
378 mem = new_r_Store(irg, bl, curr_mem, addr, param);
379 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
382 /* Make a memcopy for compound arguments. */
384 assert(mode_is_reference(get_irn_mode(param)));
385 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
386 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
389 obstack_ptr_grow(obst, mem);
391 curr_ofs += param_size;
394 * If we wanted to build the arguments sequentially,
395 * the stack pointer for the next must be incremented,
396 * and the memory value propagated.
400 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
404 * only put the first IncSP to the stack fixup set since the other
405 * ones are correctly connected to other nodes and do not need
409 pset_insert_ptr(env->stack_ops, curr_sp);
413 in = (ir_node **) obstack_finish(obst);
415 /* We need the sync only, if we didn't build the stores sequentially. */
417 curr_mem = new_r_Sync(irg, bl, n_pos, in);
418 obstack_free(obst, in);
421 /* Collect caller save registers */
422 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
424 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
425 for(j = 0; j < cls->n_regs; ++j) {
426 const arch_register_t *reg = arch_register_for_index(cls, j);
427 if(arch_register_type_is(reg, caller_save))
428 pset_insert_ptr(caller_save, (void *) reg);
432 /* search the greatest result proj number */
433 foreach_out_edge(irn, edge) {
434 const ir_edge_t *res_edge;
435 ir_node *irn = get_edge_src_irn(edge);
437 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
439 foreach_out_edge(irn, res_edge) {
441 be_abi_call_arg_t *arg;
442 ir_node *res = get_edge_src_irn(res_edge);
444 assert(is_Proj(res));
445 proj = get_Proj_proj(res);
446 arg = get_call_arg(call, 1, proj);
447 if(proj > curr_res_proj)
448 curr_res_proj = proj;
450 pset_remove_ptr(caller_save, arg->reg);
456 /* make the back end call node and set its register requirements. */
457 for(i = 0; i < n_low_args; ++i)
458 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
460 in = obstack_finish(obst);
461 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
462 obstack_free(obst, in);
463 exchange(irn, low_call);
465 /* Make additional projs for the caller save registers
466 and the Keep node which keeps them alive. */
467 if(pset_count(caller_save) > 0) {
468 const arch_register_t *reg;
472 res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
474 for(reg = pset_first(caller_save); reg; reg = pset_next(caller_save))
475 obstack_ptr_grow(obst, new_r_Proj(irg, bl, res_proj, reg->reg_class->mode, curr_res_proj++));
477 in = (ir_node **) obstack_finish(obst);
478 be_new_Keep(NULL, irg, bl, pset_count(caller_save), in);
479 obstack_free(obst, in);
482 /* Clean up the stack. */
484 ir_node *last_inc_sp;
486 /* Get the result ProjT */
488 res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
490 /* Make a Proj for the stack pointer. */
491 sp_proj = new_r_Proj(irg, bl, res_proj, sp->reg_class->mode, curr_res_proj++);
492 last_inc_sp = be_new_IncSP(sp, irg, bl, sp_proj, no_mem, stack_size, be_stack_dir_against);
493 pset_insert_ptr(env->stack_ops, last_inc_sp);
496 be_abi_call_free(call);
497 obstack_free(obst, pos);
499 del_pset(caller_save);
502 static void adjust_call_walker(ir_node *irn, void *data)
504 if(get_irn_opcode(irn) == iro_Call)
505 adjust_call(data, irn);
509 * Walker to implement alloca-style allocations.
510 * They are implemented using an add to the stack pointer
511 * and a copy instruction.
513 static void implement_stack_alloc(be_abi_irg_t *env, ir_node *irn)
515 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
516 ir_node *bl = get_nodes_block(irn);
517 ir_node *res = env->init_sp;
520 assert(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc);
522 size = get_Alloc_size(irn);
523 if(isa->stack_dir > 0)
524 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
526 res = be_new_AddSP(isa->sp, env->birg->irg, bl, res, size);
527 pset_insert_ptr(env->stack_ops, res);
529 if(isa->stack_dir < 0)
530 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
534 static void collect_return_walker(ir_node *irn, void *data)
536 if(get_irn_opcode(irn) == iro_Return) {
537 struct obstack *obst = data;
538 obstack_ptr_grow(obst, irn);
542 static ir_node *setup_frame(be_abi_irg_t *env)
544 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
545 const arch_register_t *sp = isa->sp;
546 const arch_register_t *bp = isa->bp;
547 ir_graph *irg = env->birg->irg;
548 ir_node *bl = get_irg_start_block(irg);
549 ir_node *no_mem = get_irg_no_mem(irg);
550 ir_node *old_frame = get_irg_frame(irg);
551 int store_old_fp = 1;
552 int omit_fp = env->omit_fp;
553 ir_node *stack = pmap_get(env->regs, (void *) sp);
554 ir_node *frame = pmap_get(env->regs, (void *) bp);
556 int stack_nr = get_Proj_proj(stack);
559 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
567 irn = new_r_Store(irg, bl, get_irg_initial_mem(irg), stack, frame);
568 env->store_bp_mem = new_r_Proj(irg, bl, irn, mode_M, pn_Store_M);
569 stack = be_new_IncSP(sp, irg, bl, stack, env->store_bp_mem,
570 get_mode_size_bytes(bp->reg_class->mode), be_stack_dir_along);
573 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
575 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
576 if(env->dedicated_fp) {
577 be_set_constr_single_reg(frame, -1, bp);
578 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
581 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
584 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
585 env->init_sp = stack;
586 set_irg_frame(irg, frame);
587 edges_reroute(old_frame, frame, irg);
592 static void clearup_frame(be_abi_irg_t *env, ir_node *bl, struct obstack *obst)
594 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
595 const arch_register_t *sp = isa->sp;
596 const arch_register_t *bp = isa->bp;
597 ir_graph *irg = env->birg->irg;
598 ir_node *no_mem = get_irg_no_mem(irg);
599 ir_node *frame = get_irg_frame(irg);
600 ir_node *stack = env->init_sp;
601 int store_old_fp = 1;
607 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
611 stack = be_new_Copy(sp->reg_class, irg, bl, frame);
612 be_set_constr_single_reg(stack, -1, sp);
613 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
616 ir_mode *mode = sp->reg_class->mode;
619 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, get_mode_size_bytes(mode), be_stack_dir_against);
620 irn = new_r_Load(irg, bl, env->store_bp_mem, stack, mode);
621 irn = new_r_Proj(irg, bl, irn, mode, pn_Load_res);
622 frame = be_new_Copy(bp->reg_class, irg, bl, irn);
626 pmap_foreach(env->regs, ent) {
627 const arch_register_t *reg = ent->key;
628 ir_node *irn = ent->value;
635 obstack_ptr_grow(obst, irn);
639 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
641 int inc = env->birg->main_env->arch_env->isa->stack_dir * (env->left_to_right ? 1 : -1);
642 int n = get_method_n_params(method_type);
643 int curr = inc > 0 ? 0 : n - 1;
650 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
651 res = new_type_class(new_id_from_str(buf));
653 for(i = 0; i < n; ++i, curr += inc) {
654 type *param_type = get_method_param_type(method_type, curr);
655 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
658 snprintf(buf, sizeof(buf), "param_%d", i);
659 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
660 add_class_member(res, arg->stack_ent);
661 set_entity_offset_bytes(arg->stack_ent, ofs);
662 ofs += get_type_size_bytes(param_type);
666 set_type_size_bytes(res, ofs);
670 static type *get_bp_type(const arch_register_t *bp)
672 static type *bp_type = NULL;
674 bp_type = new_type_primitive(new_id_from_str("bp_type"), bp->reg_class->mode);
675 set_type_size_bytes(bp_type, get_mode_size_bytes(bp->reg_class->mode));
681 * Modify the irg itself and the frame type.
683 static void modify_irg(be_abi_irg_t *env)
685 firm_dbg_module_t *dbg = env->dbg;
686 be_abi_call_t *call = be_abi_call_new();
687 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
688 const arch_register_t *sp = arch_isa_sp(isa);
689 ir_graph *irg = env->birg->irg;
690 ir_node *bl = get_irg_start_block(irg);
691 ir_node *end = get_irg_end_block(irg);
692 ir_node *arg_tuple = get_irg_args(irg);
693 ir_node *no_mem = get_irg_no_mem(irg);
694 type *method_type = get_entity_type(get_irg_entity(irg));
695 int n_params = get_method_n_params(method_type);
697 int reg_params_nr = 0;
702 ir_node *frame_pointer;
703 ir_node *reg_params, *reg_params_bl;
704 ir_node **args, **args_repl;
705 const ir_edge_t *edge;
710 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
712 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
713 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
715 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
716 env->regs = pmap_create();
718 /* Find the maximum proj number of the argument tuple proj */
719 foreach_out_edge(arg_tuple, edge) {
720 ir_node *irn = get_edge_src_irn(edge);
721 int nr = get_Proj_proj(irn);
722 max_arg = MAX(max_arg, nr);
725 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
726 args_repl = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
727 memset(args, 0, max_arg * sizeof(args[0]));
728 memset(args_repl, 0, max_arg * sizeof(args[0]));
730 /* Fill the argument vector */
731 foreach_out_edge(arg_tuple, edge) {
732 ir_node *irn = get_edge_src_irn(edge);
733 int nr = get_Proj_proj(irn);
735 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
738 /* Get the ABI constraints from the ISA */
739 arch_isa_get_call_abi(isa, method_type, call);
741 arg_type = compute_arg_type(env, call, method_type);
742 stack_frame_init(env->frame, arg_type, call->between_type, get_irg_frame_type(irg), isa->stack_dir);
744 /* Count the register params and add them to the number of Projs for the RegParams node */
745 for(i = 0; i < n_params; ++i) {
746 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
748 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
749 pmap_insert(env->regs, (void *) arg->reg, NULL);
750 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
754 /* Collect all callee-save registers */
755 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
756 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
757 for(j = 0; j < cls->n_regs; ++j) {
758 const arch_register_t *reg = &cls->regs[j];
759 if(arch_register_type_is(reg, callee_save))
760 pmap_insert(env->regs, (void *) reg, NULL);
764 pmap_insert(env->regs, (void *) sp, NULL);
765 pmap_insert(env->regs, (void *) isa->bp, NULL);
766 reg_params_bl = get_irg_start_block(irg);
767 env->reg_params = reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
771 * make proj nodes for the callee save registers.
772 * memorize them, since Return nodes get those as inputs.
774 for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs)) {
775 arch_register_t *reg = ent->key;
776 int pos = -(reg_params_nr + 1);
777 ent->value = new_r_Proj(irg, reg_params_bl, reg_params, reg->reg_class->mode, reg_params_nr);
778 be_set_constr_single_reg(reg_params, pos, reg);
781 * If the register is an ignore register,
782 * The Proj for that register shall also be ignored during register allocation.
784 if(arch_register_type_is(reg, ignore))
785 be_node_set_flags(reg_params, pos, arch_irn_flags_ignore);
789 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", reg_params_nr - 1, reg->name));
792 /* Insert the code to set up the stack frame */
793 frame_pointer = setup_frame(env);
795 /* Now, introduce stack param nodes for all parameters passed on the stack */
796 for(i = 0; i < max_arg; ++i) {
797 ir_node *arg_proj = args[i];
798 if(arg_proj != NULL) {
799 be_abi_call_arg_t *arg;
801 int nr = get_Proj_proj(arg_proj);
803 nr = MIN(nr, n_params);
804 arg = get_call_arg(call, 0, nr);
805 param_type = get_method_param_type(method_type, nr);
808 args_repl[i] = new_r_Proj(irg, reg_params_bl, reg_params, get_irn_mode(arg_proj), reg_params_nr);
809 be_set_constr_single_reg(reg_params, -(reg_params_nr + 1), arg->reg);
813 /* when the (stack) parameter is primitive, we insert a StackParam
814 node representing the load of that parameter */
817 /* For atomic parameters which are actually used, we create a StackParam node. */
818 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
819 ir_mode *mode = get_type_mode(param_type);
820 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
821 args_repl[i] = be_new_StackParam(cls, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
824 /* The stack parameter is not primitive (it is a struct or array),
825 we thus will create a node representing the parameter's address
828 args_repl[i] = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
834 /* reroute the edges from the original argument projs to the RegParam ones. */
835 for(i = 0; i < max_arg; ++i) {
836 if(args[i] != NULL) {
837 assert(args_repl[i] != NULL);
838 edges_reroute(args[i], args_repl[i], irg);
842 /* All Return nodes hang on the End node, so look for them there. */
843 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
844 ir_node *irn = get_irn_n(end, i);
846 if(get_irn_opcode(irn) == iro_Return) {
847 ir_node *bl = get_nodes_block(irn);
848 int n_res = get_Return_n_ress(irn);
849 pmap *reg_map = pmap_create_ex(n_res);
854 /* collect all arguments of the return */
855 for(i = 0; i < n_res; ++i) {
856 ir_node *res = get_Return_res(irn, i);
857 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
859 assert(arg->in_reg && "return value must be passed in register");
860 pmap_insert(reg_map, res, (void *) arg->reg);
861 obstack_ptr_grow(&env->obst, res);
864 /* generate the clean up code and add additional parameters to the return. */
865 clearup_frame(env, bl, &env->obst);
867 /* The in array for the new back end return is now ready. */
868 n = obstack_object_size(&env->obst) / sizeof(in[0]);
869 in = obstack_finish(&env->obst);
870 ret = be_new_Return(irg, bl, n, in);
872 /* Set the constraints for some arguments of the return. */
873 for(i = 0; i < n; i++) {
874 const arch_register_t *reg = pmap_get(reg_map, in[i]);
876 be_set_constr_single_reg(ret, i, reg);
879 obstack_free(&env->obst, in);
880 pmap_destroy(reg_map);
884 obstack_free(&env->obst, args);
885 be_abi_call_free(call);
889 * Walker: puts all Alloc(stack_alloc) on a obstack
891 static void collect_alloca_walker(ir_node *irn, void *data)
893 be_abi_irg_t *env = data;
894 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
895 obstack_ptr_grow(&env->obst, irn);
898 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
900 be_abi_irg_t *env = malloc(sizeof(env[0]));
903 ir_node **stack_allocs;
905 env->method_type = get_entity_type(get_irg_entity(birg->irg));
906 env->call = be_abi_call_new();
907 arch_isa_get_call_abi(birg->main_env->arch_env->isa, env->method_type, env->call);
909 env->omit_fp = (env->call->flags & BE_ABI_TRY_OMIT_FRAME_POINTER) != 0;
910 env->dedicated_fp = (env->call->flags & BE_ABI_FRAME_POINTER_DEDICATED) != 0;
911 env->left_to_right = (env->call->flags & BE_ABI_LEFT_TO_RIGHT) != 0;
912 env->save_old_fp = (env->call->flags & BE_ABI_SAVE_OLD_FRAME_POINTER) != 0;
914 env->stack_ops = pset_new_ptr(32);
915 env->dbg = firm_dbg_register("firm.be.abi");
916 obstack_init(&env->obst);
918 /* search for stack allocation nodes and record them */
919 irg_walk_graph(env->birg->irg, collect_alloca_walker, NULL, env);
920 obstack_ptr_grow(&env->obst, NULL);
921 stack_allocs = obstack_finish(&env->obst);
923 /* If there are stack allocations in the irg, we need a frame pointer */
924 if(stack_allocs[0] != NULL)
929 for(i = 0; stack_allocs[i] != NULL; ++i)
930 implement_stack_alloc(env, stack_allocs[i]);
932 irg_walk_graph(env->birg->irg, NULL, adjust_call_walker, env);
936 static void collect_stack_nodes(ir_node *irn, void *data)
940 switch(be_get_irn_opcode(irn)) {
943 pset_insert_ptr(s, irn);
947 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
949 dom_front_info_t *df;
952 /* We need dominance frontiers for fix up */
953 df = be_compute_dominance_frontiers(env->birg->irg);
955 stack_ops = pset_new_ptr_default();
956 pset_insert_ptr(env->stack_ops, env->init_sp);
957 irg_walk_graph(env->birg->irg, collect_stack_nodes, NULL, stack_ops);
958 be_ssa_constr_set(df, stack_ops);
961 /* free these dominance frontiers */
962 be_free_dominance_frontiers(df);
965 static int get_dir(ir_node *irn)
967 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
970 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
972 const arch_env_t *aenv = env->birg->main_env->arch_env;
974 int start_bias = bias;
976 sched_foreach(bl, irn) {
977 if(be_is_IncSP(irn)) {
978 int ofs = be_get_IncSP_offset(irn);
979 int dir = get_dir(irn);
981 if(ofs == BE_STACK_FRAME_SIZE) {
982 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
983 be_set_IncSP_offset(irn, ofs);
990 arch_set_stack_bias(aenv, irn, bias);
997 static void stack_bias_walker(ir_node *bl, void *data)
999 if(bl != get_irg_start_block(get_irn_irg(bl))) {
1000 be_abi_irg_t *env = data;
1001 process_stack_bias(env, bl, env->start_block_bias);
1005 void be_abi_fix_stack_bias(be_abi_irg_t *env)
1007 ir_graph *irg = env->birg->irg;
1009 /* Determine the stack bias at the and of the start block. */
1010 env->start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
1012 /* fix the bias is all other blocks */
1013 irg_block_walk_graph(irg, stack_bias_walker, NULL, env);
1016 void be_abi_free(be_abi_irg_t *env)
1018 del_pset(env->stack_ops);
1019 obstack_free(&env->obst, NULL);
1023 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1025 assert(arch_register_type_is(reg, callee_save));
1026 assert(pmap_contains(abi->regs, (void *) reg));
1027 return pmap_get(abi->regs, (void *) reg);