8 #include "firm_config.h"
13 #include "irgraph_t.h"
16 #include "iredges_t.h"
24 #include "besched_t.h"
26 #define MAX(x, y) ((x) > (y) ? (x) : (y))
27 #define MIN(x, y) ((x) < (y) ? (x) : (y))
29 typedef struct _be_abi_call_arg_t {
34 const arch_register_t *reg;
38 struct _be_abi_call_t {
39 be_abi_call_flags_t flags;
44 typedef struct _be_stack_frame_t {
49 type *order[3]; /**< arg, between and frame types ordered. */
55 struct _be_stack_slot_t {
56 struct _be_stack_frame_t *frame;
60 struct _be_abi_irg_t {
66 ir_node *init_sp; /**< The node representing the stack pointer
67 at the start of the function. */
71 pset *stack_ops; /**< Contains all nodes modifying the stack pointer. */
77 unsigned dedicated_fp : 1;
78 unsigned left_to_right : 1;
79 unsigned save_old_fp : 1;
81 ir_node *store_bp_mem;
82 be_stack_frame_t *frame;
84 firm_dbg_module_t *dbg; /**< The debugging module. */
87 static int cmp_call_arg(const void *a, const void *b, size_t n)
89 const be_abi_call_arg_t *p = a, *q = b;
90 return !(p->is_res == q->is_res && p->pos == q->pos);
93 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
95 be_abi_call_arg_t arg;
101 hash = is_res * 100 + pos;
104 ? set_insert(call->params, &arg, sizeof(arg), hash)
105 : set_find(call->params, &arg, sizeof(arg), hash);
108 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
110 return get_or_set_call_arg(call, is_res, pos, 0);
113 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, ir_type *between_type)
116 call->between_type = between_type;
119 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
121 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
124 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
126 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
131 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
133 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
138 be_abi_call_t *be_abi_call_new(void)
140 be_abi_call_t *call = malloc(sizeof(call[0]));
141 call->flags = BE_ABI_NONE;
142 call->params = new_set(cmp_call_arg, 16);
146 void be_abi_call_free(be_abi_call_t *call)
148 del_set(call->params);
152 static int get_stack_entity_offset(be_stack_frame_t *frame, entity *ent, int bias)
154 type *t = get_entity_type(ent);
155 int ofs = get_entity_offset_bytes(ent);
159 /* Find the type the entity is contained in. */
160 for(index = 0; index < 3; ++index) {
161 if(frame->order[index] == t)
165 /* Add the size of all the types below the one of the entity to the entity's offset */
166 for(i = 0; i < index; ++i)
167 ofs += get_type_size_bytes(frame->order[i]);
169 /* correct the offset by the initial position of the frame pointer */
170 ofs -= frame->initial_offset;
172 /* correct the offset with the current bias. */
178 static int stack_frame_compute_initial_offset(be_stack_frame_t *frame, entity *ent)
180 frame->initial_offset = 0;
181 frame->initial_offset = get_stack_entity_offset(frame, ent, 0);
182 return frame->initial_offset;
185 static be_stack_frame_t *stack_frame_init(be_stack_frame_t *frame, type *args, type *between, type *locals, int stack_dir)
187 frame->arg_type = args;
188 frame->between_type = between;
189 frame->frame_type = locals;
190 frame->initial_offset = 0;
191 frame->stack_dir = stack_dir;
192 frame->order[1] = between;
195 frame->order[0] = args;
196 frame->order[2] = locals;
200 frame->order[0] = locals;
201 frame->order[2] = args;
207 static INLINE entity *get_sel_ent(ir_node *irn)
209 if(get_irn_opcode(irn) == iro_Sel
210 && get_Sel_ptr(irn) == get_irg_frame(get_irn_irg(irn))) {
212 return get_Sel_entity(irn);
218 static void lower_frame_sels_walker(ir_node *irn, void *data)
220 const arch_register_class_t *cls;
221 be_abi_irg_t *env = data;
222 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
223 ir_graph *irg = get_irn_irg(irn);
224 ir_node *frame = get_irg_frame(irg);
226 opcode opc = get_irn_opcode(irn);
228 if(opc == iro_Load) {
229 ir_node *bl = get_nodes_block(irn);
230 ir_node *sel = get_Load_ptr(irn);
231 entity *ent = get_sel_ent(sel);
232 cls = arch_isa_get_reg_class_for_mode(isa, get_Load_mode(irn));
234 nw = be_new_FrameLoad(isa->sp->reg_class, cls, irg, bl, get_Load_mem(irn), frame, ent);
237 else if(opc == iro_Store) {
238 ir_node *bl = get_nodes_block(irn);
239 ir_node *val = get_Store_value(irn);
240 ir_node *sel = get_Store_ptr(irn);
241 entity *ent = get_sel_ent(sel);
242 cls = arch_isa_get_reg_class_for_mode(isa, get_irn_mode(val));
244 nw = be_new_FrameStore(isa->sp->reg_class, cls, irg, bl, get_Store_mem(irn), frame, val, ent);
248 entity *ent = get_sel_ent(irn);
250 ir_node *bl = get_nodes_block(irn);
251 nw = be_new_FrameAddr(isa->sp->reg_class, irg, bl, frame, ent);
259 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
261 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
262 return arg && !arg->in_reg;
265 static void adjust_call(be_abi_irg_t *env, ir_node *irn)
267 ir_graph *irg = env->birg->irg;
268 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
269 be_abi_call_t *call = be_abi_call_new();
270 ir_type *mt = get_Call_type(irn);
271 int n_params = get_method_n_params(mt);
272 ir_node *curr_sp = get_irg_frame(irg);
273 ir_node *curr_mem = get_Call_mem(irn);
274 ir_node *bl = get_nodes_block(irn);
275 pset *results = pset_new_ptr(8);
276 pset *caller_save = pset_new_ptr(8);
278 int stack_dir = arch_isa_stack_dir(isa);
279 const arch_register_t *sp = arch_isa_sp(isa);
280 ir_mode *mach_mode = sp->reg_class->mode;
281 struct obstack *obst = &env->obst;
282 ir_node *no_mem = get_irg_no_mem(irg);
284 ir_node *res_proj = NULL;
285 int curr_res_proj = -1;
292 const ir_edge_t *edge;
297 /* Let the isa fill out the abi description for that call node. */
298 arch_isa_get_call_abi(isa, mt, call);
300 // assert(get_method_variadicity(mt) == variadicity_non_variadic);
302 /* Insert code to put the stack arguments on the stack. */
304 assert(get_Call_n_params(irn) == n_params);
305 for(i = 0; i < n_params; ++i) {
306 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
309 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
310 obstack_int_grow(obst, i);
314 pos = obstack_finish(obst);
316 /* Collect all arguments which are passed in registers. */
317 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
318 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
319 if(arg && arg->in_reg) {
320 obstack_int_grow(obst, i);
324 low_args = obstack_finish(obst);
326 /* If there are some parameters shich shall be passed on the stack. */
329 int do_seq = (call->flags & BE_ABI_USE_PUSH);
331 /* Reverse list of stack parameters if call arguments are from left to right */
332 if(call->flags & BE_ABI_LEFT_TO_RIGHT) {
333 for(i = 0; i < n_pos / 2; ++i) {
334 int other = n_pos - i - 1;
342 * If the stack is decreasing and we do not want to store sequentially,
343 * we allocate as much space on the stack all parameters need, by
344 * moving the stack pointer along the stack's direction.
346 if(stack_dir < 0 && !do_seq) {
347 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
348 pset_insert_ptr(env->stack_ops, curr_sp);
351 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
352 for(i = 0; i < n_pos; ++i) {
354 ir_node *param = get_Call_param(irn, p);
355 ir_node *addr = curr_sp;
357 type *param_type = get_method_param_type(mt, p);
358 int param_size = get_type_size_bytes(param_type);
360 /* Make the expression to compute the argument's offset. */
362 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
363 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
366 /* Insert a store for primitive arguments. */
367 if(is_atomic_type(param_type)) {
368 mem = new_r_Store(irg, bl, curr_mem, addr, param);
369 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
372 /* Make a memcopy for compound arguments. */
374 assert(mode_is_reference(get_irn_mode(param)));
375 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
376 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
379 obstack_ptr_grow(obst, mem);
381 curr_ofs += param_size;
384 * If we wanted to build the arguments sequentially,
385 * the stack pointer for the next must be incremented,
386 * and the memory value propagated.
390 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
394 * only put the first IncSP to the stack fixup set since the other
395 * ones are correctly connected to other nodes and do not need
399 pset_insert_ptr(env->stack_ops, curr_sp);
403 in = (ir_node **) obstack_finish(obst);
405 /* We need the sync only, if we didn't build the stores sequentially. */
407 curr_mem = new_r_Sync(irg, bl, n_pos, in);
408 obstack_free(obst, in);
411 /* Collect caller save registers */
412 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
414 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
415 for(j = 0; j < cls->n_regs; ++j) {
416 const arch_register_t *reg = arch_register_for_index(cls, j);
417 if(arch_register_type_is(reg, caller_save))
418 pset_insert_ptr(caller_save, (void *) reg);
422 /* search the greatest result proj number */
423 foreach_out_edge(irn, edge) {
424 const ir_edge_t *res_edge;
425 ir_node *irn = get_edge_src_irn(edge);
427 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
429 foreach_out_edge(irn, res_edge) {
431 be_abi_call_arg_t *arg;
432 ir_node *res = get_edge_src_irn(res_edge);
434 assert(is_Proj(res));
435 proj = get_Proj_proj(res);
436 arg = get_call_arg(call, 1, proj);
437 if(proj > curr_res_proj)
438 curr_res_proj = proj;
440 pset_remove_ptr(caller_save, arg->reg);
446 /* make the back end call node and set its register requirements. */
447 for(i = 0; i < n_low_args; ++i)
448 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
450 in = obstack_finish(obst);
451 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
452 obstack_free(obst, in);
453 exchange(irn, low_call);
455 /* Make additional projs for the caller save registers
456 and the Keep node which keeps them alive. */
457 if(pset_count(caller_save) > 0) {
458 const arch_register_t *reg;
462 res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
464 for(reg = pset_first(caller_save); reg; reg = pset_next(caller_save))
465 obstack_ptr_grow(obst, new_r_Proj(irg, bl, res_proj, reg->reg_class->mode, curr_res_proj++));
467 in = (ir_node **) obstack_finish(obst);
468 be_new_Keep(NULL, irg, bl, pset_count(caller_save), in);
469 obstack_free(obst, in);
472 /* Clean up the stack. */
474 ir_node *last_inc_sp;
476 /* Get the result ProjT */
478 res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
480 /* Make a Proj for the stack pointer. */
481 sp_proj = new_r_Proj(irg, bl, res_proj, sp->reg_class->mode, curr_res_proj++);
482 last_inc_sp = be_new_IncSP(sp, irg, bl, sp_proj, no_mem, stack_size, be_stack_dir_against);
483 pset_insert_ptr(env->stack_ops, last_inc_sp);
486 be_abi_call_free(call);
487 obstack_free(obst, pos);
489 del_pset(caller_save);
492 static void adjust_call_walker(ir_node *irn, void *data)
494 if(get_irn_opcode(irn) == iro_Call)
495 adjust_call(data, irn);
499 * Walker to implement alloca-style allocations.
500 * They are implemented using an add to the stack pointer
501 * and a copy instruction.
503 static void implement_stack_alloc(be_abi_irg_t *env, ir_node *irn)
505 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
506 ir_node *bl = get_nodes_block(irn);
507 ir_node *res = env->init_sp;
510 assert(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc);
512 size = get_Alloc_size(irn);
513 if(isa->stack_dir > 0)
514 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
516 res = be_new_AddSP(isa->sp, env->birg->irg, bl, res, size);
517 pset_insert_ptr(env->stack_ops, res);
519 if(isa->stack_dir < 0)
520 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
524 static void collect_return_walker(ir_node *irn, void *data)
526 if(get_irn_opcode(irn) == iro_Return) {
527 struct obstack *obst = data;
528 obstack_ptr_grow(obst, irn);
532 static ir_node *setup_frame(be_abi_irg_t *env)
534 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
535 const arch_register_t *sp = isa->sp;
536 const arch_register_t *bp = isa->bp;
537 ir_graph *irg = env->birg->irg;
538 ir_node *bl = get_irg_start_block(irg);
539 ir_node *no_mem = get_irg_no_mem(irg);
540 ir_node *old_frame = get_irg_frame(irg);
541 int store_old_fp = 1;
542 int omit_fp = env->omit_fp;
543 ir_node *stack = pmap_get(env->regs, (void *) sp);
544 ir_node *frame = pmap_get(env->regs, (void *) bp);
546 int stack_nr = get_Proj_proj(stack);
549 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
557 irn = new_r_Store(irg, bl, get_irg_initial_mem(irg), stack, frame);
558 env->store_bp_mem = new_r_Proj(irg, bl, irn, mode_M, pn_Store_M);
559 stack = be_new_IncSP(sp, irg, bl, stack, env->store_bp_mem,
560 get_mode_size_bytes(bp->reg_class->mode), be_stack_dir_along);
563 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
565 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
566 if(env->dedicated_fp) {
567 be_set_constr_single_reg(frame, -1, bp);
568 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
571 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
574 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
575 env->init_sp = stack;
576 set_irg_frame(irg, frame);
577 edges_reroute(old_frame, frame, irg);
582 static void clearup_frame(be_abi_irg_t *env, ir_node *bl, struct obstack *obst)
584 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
585 const arch_register_t *sp = isa->sp;
586 const arch_register_t *bp = isa->bp;
587 ir_graph *irg = env->birg->irg;
588 ir_node *no_mem = get_irg_no_mem(irg);
589 ir_node *frame = get_irg_frame(irg);
590 ir_node *stack = env->init_sp;
591 int store_old_fp = 1;
597 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
601 stack = be_new_Copy(sp->reg_class, irg, bl, frame);
602 be_set_constr_single_reg(stack, -1, sp);
603 be_node_set_flags(stack, -1, arch_irn_flags_ignore);
606 ir_mode *mode = sp->reg_class->mode;
609 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, get_mode_size_bytes(mode), be_stack_dir_against);
610 irn = new_r_Load(irg, bl, env->store_bp_mem, stack, mode);
611 irn = new_r_Proj(irg, bl, irn, mode, pn_Load_res);
612 frame = be_new_Copy(bp->reg_class, irg, bl, irn);
616 pmap_foreach(env->regs, ent) {
617 const arch_register_t *reg = ent->key;
618 ir_node *irn = ent->value;
625 obstack_ptr_grow(obst, irn);
629 static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
631 int inc = env->birg->main_env->arch_env->isa->stack_dir * (env->left_to_right ? 1 : -1);
632 int n = get_method_n_params(method_type);
633 int curr = inc > 0 ? 0 : n - 1;
640 snprintf(buf, sizeof(buf), "%s_arg_type", get_entity_name(get_irg_entity(env->birg->irg)));
641 res = new_type_class(new_id_from_str(buf));
643 for(i = 0; i < n; ++i, curr += inc) {
644 type *param_type = get_method_param_type(method_type, curr);
645 be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
648 snprintf(buf, sizeof(buf), "param_%d", i);
649 arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
650 add_class_member(res, arg->stack_ent);
651 set_entity_offset_bytes(arg->stack_ent, ofs);
652 ofs += get_type_size_bytes(param_type);
656 set_type_size_bytes(res, ofs);
660 static type *get_bp_type(const arch_register_t *bp)
662 static type *bp_type = NULL;
664 bp_type = new_type_primitive(new_id_from_str("bp_type"), bp->reg_class->mode);
665 set_type_size_bytes(bp_type, get_mode_size_bytes(bp->reg_class->mode));
671 * Modify the irg itself and the frame type.
673 static void modify_irg(be_abi_irg_t *env)
675 firm_dbg_module_t *dbg = env->dbg;
676 be_abi_call_t *call = be_abi_call_new();
677 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
678 const arch_register_t *sp = arch_isa_sp(isa);
679 ir_graph *irg = env->birg->irg;
680 ir_node *bl = get_irg_start_block(irg);
681 ir_node *end = get_irg_end_block(irg);
682 ir_node *arg_tuple = get_irg_args(irg);
683 ir_node *no_mem = get_irg_no_mem(irg);
684 type *method_type = get_entity_type(get_irg_entity(irg));
685 int n_params = get_method_n_params(method_type);
687 int reg_params_nr = 0;
692 ir_node *frame_pointer;
693 ir_node *reg_params, *reg_params_bl;
694 ir_node **args, **args_repl;
695 const ir_edge_t *edge;
700 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
702 /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
703 irg_walk_graph(irg, lower_frame_sels_walker, NULL, env);
705 env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
706 env->regs = pmap_create();
708 /* Find the maximum proj number of the argument tuple proj */
709 foreach_out_edge(arg_tuple, edge) {
710 ir_node *irn = get_edge_src_irn(edge);
711 int nr = get_Proj_proj(irn);
712 max_arg = MAX(max_arg, nr);
715 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
716 args_repl = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
717 memset(args, 0, max_arg * sizeof(args[0]));
718 memset(args_repl, 0, max_arg * sizeof(args[0]));
720 /* Fill the argument vector */
721 foreach_out_edge(arg_tuple, edge) {
722 ir_node *irn = get_edge_src_irn(edge);
723 int nr = get_Proj_proj(irn);
725 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
728 /* Get the ABI constraints from the ISA */
729 arch_isa_get_call_abi(isa, method_type, call);
731 arg_type = compute_arg_type(env, call, method_type);
732 stack_frame_init(env->frame, arg_type, call->between_type, get_irg_frame_type(irg), isa->stack_dir);
734 /* Count the register params and add them to the number of Projs for the RegParams node */
735 for(i = 0; i < n_params; ++i) {
736 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
738 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
739 pmap_insert(env->regs, (void *) arg->reg, NULL);
740 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
744 /* Collect all callee-save registers */
745 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
746 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
747 for(j = 0; j < cls->n_regs; ++j) {
748 const arch_register_t *reg = &cls->regs[j];
749 if(arch_register_type_is(reg, callee_save))
750 pmap_insert(env->regs, (void *) reg, NULL);
754 pmap_insert(env->regs, (void *) sp, NULL);
755 pmap_insert(env->regs, (void *) isa->bp, NULL);
756 reg_params_bl = get_irg_start_block(irg);
757 env->reg_params = reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
761 * make proj nodes for the callee save registers.
762 * memorize them, since Return nodes get those as inputs.
764 for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs)) {
765 arch_register_t *reg = ent->key;
766 int pos = -(reg_params_nr + 1);
767 ent->value = new_r_Proj(irg, reg_params_bl, reg_params, reg->reg_class->mode, reg_params_nr);
768 be_set_constr_single_reg(reg_params, pos, reg);
771 * If the register is an ignore register,
772 * The Proj for that register shall also be ignored during register allocation.
774 if(arch_register_type_is(reg, ignore))
775 be_node_set_flags(reg_params, pos, arch_irn_flags_ignore);
779 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", reg_params_nr - 1, reg->name));
782 /* Insert the code to set up the stack frame */
783 frame_pointer = setup_frame(env);
785 /* Now, introduce stack param nodes for all parameters passed on the stack */
786 for(i = 0; i < max_arg; ++i) {
787 ir_node *arg_proj = args[i];
788 if(arg_proj != NULL) {
789 be_abi_call_arg_t *arg;
791 int nr = get_Proj_proj(arg_proj);
793 nr = MIN(nr, n_params);
794 arg = get_call_arg(call, 0, nr);
795 param_type = get_method_param_type(method_type, nr);
798 args_repl[i] = new_r_Proj(irg, reg_params_bl, reg_params, get_irn_mode(arg_proj), reg_params_nr);
799 be_set_constr_single_reg(reg_params, -(reg_params_nr + 1), arg->reg);
803 /* when the (stack) parameter is primitive, we insert a StackParam
804 node representing the load of that parameter */
807 /* For atomic parameters which are actually used, we create a StackParam node. */
808 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
809 ir_mode *mode = get_type_mode(param_type);
810 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
811 args_repl[i] = be_new_StackParam(cls, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
814 /* The stack parameter is not primitive (it is a struct or array),
815 we thus will create a node representing the parameter's address
818 args_repl[i] = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
824 /* reroute the edges from the original argument projs to the RegParam ones. */
825 for(i = 0; i < max_arg; ++i) {
826 if(args[i] != NULL) {
827 assert(args_repl[i] != NULL);
828 edges_reroute(args[i], args_repl[i], irg);
832 /* All Return nodes hang on the End node, so look for them there. */
833 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
834 ir_node *irn = get_irn_n(end, i);
836 if(get_irn_opcode(irn) == iro_Return) {
837 ir_node *bl = get_nodes_block(irn);
838 int n_res = get_Return_n_ress(irn);
839 pmap *reg_map = pmap_create_ex(n_res);
844 /* collect all arguments of the return */
845 for(i = 0; i < n_res; ++i) {
846 ir_node *res = get_Return_res(irn, i);
847 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
849 assert(arg->in_reg && "return value must be passed in register");
850 pmap_insert(reg_map, res, (void *) arg->reg);
851 obstack_ptr_grow(&env->obst, res);
854 /* generate the clean up code and add additional parameters to the return. */
855 clearup_frame(env, bl, &env->obst);
857 /* The in array for the new back end return is now ready. */
858 n = obstack_object_size(&env->obst) / sizeof(in[0]);
859 in = obstack_finish(&env->obst);
860 ret = be_new_Return(irg, bl, n, in);
862 /* Set the constraints for some arguments of the return. */
863 for(i = 0; i < n; i++) {
864 const arch_register_t *reg = pmap_get(reg_map, in[i]);
866 be_set_constr_single_reg(ret, i, reg);
869 obstack_free(&env->obst, in);
870 pmap_destroy(reg_map);
874 obstack_free(&env->obst, args);
875 be_abi_call_free(call);
878 static void collect_alloca_walker(ir_node *irn, void *data)
880 be_abi_irg_t *env = data;
881 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
882 obstack_ptr_grow(&env->obst, irn);
885 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
887 be_abi_irg_t *env = malloc(sizeof(env[0]));
890 ir_node **stack_allocs;
892 env->method_type = get_entity_type(get_irg_entity(birg->irg));
893 env->call = be_abi_call_new();
894 arch_isa_get_call_abi(birg->main_env->arch_env->isa, env->method_type, env->call);
896 env->omit_fp = (env->call->flags & BE_ABI_TRY_OMIT_FRAME_POINTER) != 0;
897 env->dedicated_fp = (env->call->flags & BE_ABI_FRAME_POINTER_DEDICATED) != 0;
898 env->left_to_right = (env->call->flags & BE_ABI_LEFT_TO_RIGHT) != 0;
899 env->save_old_fp = (env->call->flags & BE_ABI_SAVE_OLD_FRAME_POINTER) != 0;
901 env->stack_ops = pset_new_ptr(32);
902 env->dbg = firm_dbg_register("firm.be.abi");
903 obstack_init(&env->obst);
905 /* search for stack allocation nodes and record them */
906 irg_walk_graph(env->birg->irg, collect_alloca_walker, NULL, env);
907 obstack_ptr_grow(&env->obst, NULL);
908 stack_allocs = obstack_finish(&env->obst);
910 /* If there are stack allocations in the irg, we need a frame pointer */
911 if(stack_allocs[0] != NULL)
916 for(i = 0; stack_allocs[i] != NULL; ++i)
917 implement_stack_alloc(env, stack_allocs[i]);
919 irg_walk_graph(env->birg->irg, NULL, adjust_call_walker, env);
923 static void collect_stack_nodes(ir_node *irn, void *data)
927 switch(be_get_irn_opcode(irn)) {
930 pset_insert_ptr(s, irn);
934 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
936 dom_front_info_t *df;
939 /* We need dominance frontiers for fix up */
940 df = be_compute_dominance_frontiers(env->birg->irg);
942 stack_ops = pset_new_ptr_default();
943 pset_insert_ptr(env->stack_ops, env->init_sp);
944 irg_walk_graph(env->birg->irg, collect_stack_nodes, NULL, stack_ops);
945 be_ssa_constr_set(df, stack_ops);
948 /* free these dominance frontiers */
949 be_free_dominance_frontiers(df);
952 static int get_dir(ir_node *irn)
954 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
957 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
959 const arch_env_t *aenv = env->birg->main_env->arch_env;
961 int start_bias = bias;
963 sched_foreach(bl, irn) {
964 if(be_is_IncSP(irn)) {
965 int ofs = be_get_IncSP_offset(irn);
966 int dir = get_dir(irn);
968 if(ofs == BE_STACK_FRAME_SIZE) {
969 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
970 be_set_IncSP_offset(irn, ofs);
977 arch_set_stack_bias(aenv, irn, bias);
984 static void stack_bias_walker(ir_node *bl, void *data)
986 if(bl != get_irg_start_block(get_irn_irg(bl))) {
987 be_abi_irg_t *env = data;
988 process_stack_bias(env, bl, env->start_block_bias);
992 void be_abi_fix_stack_bias(be_abi_irg_t *env)
994 ir_graph *irg = env->birg->irg;
996 /* Determine the stack bias at the and of the start block. */
997 env->start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
999 /* fix the bias is all other blocks */
1000 irg_block_walk_graph(irg, stack_bias_walker, NULL, env);
1003 void be_abi_free(be_abi_irg_t *env)
1005 del_pset(env->stack_ops);
1006 obstack_free(&env->obst, NULL);
1010 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
1012 assert(arch_register_type_is(reg, callee_save));
1013 assert(pmap_contains(abi->regs, (void *) reg));
1014 return pmap_get(abi->regs, (void *) reg);