8 #include "firm_config.h"
13 #include "irgraph_t.h"
16 #include "iredges_t.h"
24 #include "besched_t.h"
26 #define MAX(x, y) ((x) > (y) ? (x) : (y))
27 #define MIN(x, y) ((x) < (y) ? (x) : (y))
29 typedef struct _be_abi_call_arg_t {
34 const arch_register_t *reg;
37 struct _be_abi_call_t {
38 be_abi_call_flags_t flags;
43 struct _be_abi_irg_t {
49 ir_node *init_sp; /**< The node representing the stack pointer
50 at the start of the function. */
54 pset *stack_ops; /**< Contains all nodes modifying the stack pointer. */
60 unsigned dedicated_fp : 1;
61 unsigned left_to_right : 1;
63 firm_dbg_module_t *dbg; /**< The debugging module. */
66 static int cmp_call_arg(const void *a, const void *b, size_t n)
68 const be_abi_call_arg_t *p = a, *q = b;
69 return !(p->is_res == q->is_res && p->pos == q->pos);
72 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
74 be_abi_call_arg_t arg;
80 hash = is_res * 100 + pos;
83 ? set_insert(call->params, &arg, sizeof(arg), hash)
84 : set_find(call->params, &arg, sizeof(arg), hash);
87 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
89 return get_or_set_call_arg(call, is_res, pos, 0);
92 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, unsigned arg_gap)
95 call->arg_gap = arg_gap;
98 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
100 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
103 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
105 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
109 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
111 be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
115 be_abi_call_t *be_abi_call_new(void)
117 be_abi_call_t *call = malloc(sizeof(call[0]));
118 call->flags = BE_ABI_NONE;
119 call->params = new_set(cmp_call_arg, 16);
123 void be_abi_call_free(be_abi_call_t *call)
125 del_set(call->params);
129 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
131 be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
132 return arg && !arg->in_reg;
135 static void adjust_call(be_abi_irg_t *env, ir_node *irn)
137 ir_graph *irg = env->birg->irg;
138 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
139 be_abi_call_t *call = be_abi_call_new();
140 ir_type *mt = get_Call_type(irn);
141 int n_params = get_method_n_params(mt);
142 ir_node *curr_sp = get_irg_frame(irg);
143 ir_node *curr_mem = get_Call_mem(irn);
144 ir_node *bl = get_nodes_block(irn);
145 pset *results = pset_new_ptr(8);
146 pset *caller_save = pset_new_ptr(8);
148 int stack_dir = arch_isa_stack_dir(isa);
149 const arch_register_t *sp = arch_isa_sp(isa);
150 ir_mode *mach_mode = sp->reg_class->mode;
151 struct obstack *obst = &env->obst;
152 ir_node *no_mem = get_irg_no_mem(irg);
154 ir_node *res_proj = NULL;
155 int curr_res_proj = -1;
162 const ir_edge_t *edge;
167 /* Let the isa fill out the abi description for that call node. */
168 arch_isa_get_call_abi(isa, mt, call);
170 assert(get_method_variadicity(mt) == variadicity_non_variadic);
172 /* Insert code to put the stack arguments on the stack. */
174 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
175 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
176 if(arg && !arg->in_reg) {
177 stack_size += get_type_size_bytes(get_method_param_type(mt, i));
178 obstack_int_grow(obst, i);
182 pos = obstack_finish(obst);
184 /* Collect all arguments which are passed in registers. */
185 for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
186 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
187 if(arg && arg->in_reg) {
188 obstack_int_grow(obst, i);
192 low_args = obstack_finish(obst);
194 /* If there are some parameters shich shall be passed on the stack. */
197 int do_seq = (call->flags & BE_ABI_USE_PUSH);
199 /* Reverse list of stack parameters if call arguments are from left to right */
200 if(call->flags & BE_ABI_LEFT_TO_RIGHT) {
201 for(i = 0; i < n_pos / 2; ++i) {
202 int other = n_pos - i - 1;
210 * If the stack is decreasing and we do not want to store sequentially,
211 * we allocate as much space on the stack all parameters need, by
212 * moving the stack pointer along the stack's direction.
214 if(stack_dir < 0 && !do_seq) {
215 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
216 pset_insert_ptr(env->stack_ops, curr_sp);
219 assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
220 for(i = 0; i < n_pos; ++i) {
222 ir_node *param = get_Call_param(irn, p);
223 ir_node *addr = curr_sp;
225 type *param_type = get_method_param_type(mt, p);
226 int param_size = get_type_size_bytes(param_type);
228 /* Make the expression to compute the argument's offset. */
230 addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
231 addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
234 /* Insert a store for primitive arguments. */
235 if(is_atomic_type(param_type)) {
236 mem = new_r_Store(irg, bl, curr_mem, addr, param);
237 mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
240 /* Make a memcopy for compound arguments. */
242 assert(mode_is_reference(get_irn_mode(param)));
243 mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
244 mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
247 obstack_ptr_grow(obst, mem);
249 curr_ofs += param_size;
252 * If we wanted to build the arguments sequentially,
253 * the stack pointer for the next must be incremented,
254 * and the memory value propagated.
258 curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
262 * only put the first IncSP to the stack fixup set since the other
263 * ones are correctly connected to other nodes and do not need
267 pset_insert_ptr(env->stack_ops, curr_sp);
271 in = (ir_node **) obstack_finish(obst);
273 /* We need the sync only, if we didn't build the stores sequentially. */
275 curr_mem = new_r_Sync(irg, bl, n_pos, in);
276 obstack_free(obst, in);
279 /* Collect caller save registers */
280 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
282 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
283 for(j = 0; j < cls->n_regs; ++j) {
284 const arch_register_t *reg = arch_register_for_index(cls, j);
285 if(arch_register_type_is(reg, caller_save))
286 pset_insert_ptr(caller_save, (void *) reg);
290 /* search the greatest result proj number */
291 foreach_out_edge(irn, edge) {
292 const ir_edge_t *res_edge;
293 ir_node *irn = get_edge_src_irn(edge);
295 if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
297 foreach_out_edge(irn, res_edge) {
299 be_abi_call_arg_t *arg;
300 ir_node *res = get_edge_src_irn(res_edge);
302 assert(is_Proj(res));
303 proj = get_Proj_proj(res);
304 arg = get_call_arg(call, 1, proj);
305 if(proj > curr_res_proj)
306 curr_res_proj = proj;
308 pset_remove_ptr(caller_save, arg->reg);
314 /* Make additional projs for the caller save registers
315 and the Keep node which keeps them alive. */
316 if(pset_count(caller_save) > 0) {
317 const arch_register_t *reg;
321 res_proj = new_r_Proj(irg, bl, irn, mode_T, pn_Call_T_result);
323 for(reg = pset_first(caller_save); reg; reg = pset_next(caller_save))
324 obstack_ptr_grow(obst, new_r_Proj(irg, bl, res_proj, reg->reg_class->mode, curr_res_proj++));
326 in = (ir_node **) obstack_finish(obst);
327 be_new_Keep(NULL, irg, bl, pset_count(caller_save), in);
328 obstack_free(obst, in);
331 /* Clean up the stack. */
333 ir_node *last_inc_sp;
335 /* Get the result ProjT */
337 res_proj = new_r_Proj(irg, bl, irn, mode_T, pn_Call_T_result);
339 /* Make a Proj for the stack pointer. */
340 sp_proj = new_r_Proj(irg, bl, res_proj, sp->reg_class->mode, curr_res_proj++);
341 last_inc_sp = be_new_IncSP(sp, irg, bl, sp_proj, no_mem, stack_size, be_stack_dir_against);
342 pset_insert_ptr(env->stack_ops, last_inc_sp);
345 /* at last make the backend call node and set its register requirements. */
346 for(i = 0; i < n_low_args; ++i)
347 obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
349 in = obstack_finish(obst);
350 low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
351 obstack_free(obst, in);
353 exchange(irn, low_call);
355 be_abi_call_free(call);
356 obstack_free(obst, pos);
358 del_pset(caller_save);
361 static void adjust_call_walker(ir_node *irn, void *data)
363 if(get_irn_opcode(irn) == iro_Call)
364 adjust_call(data, irn);
368 * Walker to implement alloca-style allocations.
369 * They are implemented using an add to the stack pointer
370 * and a copy instruction.
372 static void implement_stack_alloc(be_abi_irg_t *env, ir_node *irn)
374 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
375 ir_node *bl = get_nodes_block(irn);
376 ir_node *res = env->init_sp;
379 assert(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc);
381 size = get_Alloc_size(irn);
382 if(isa->stack_dir > 0)
383 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
385 res = be_new_AddSP(isa->sp, env->birg->irg, bl, res, size);
386 pset_insert_ptr(env->stack_ops, res);
388 if(isa->stack_dir < 0)
389 res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
393 static void collect_return_walker(ir_node *irn, void *data)
395 if(get_irn_opcode(irn) == iro_Return) {
396 struct obstack *obst = data;
397 obstack_ptr_grow(obst, irn);
401 static ir_node *setup_frame(be_abi_irg_t *env)
403 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
404 const arch_register_t *sp = isa->sp;
405 const arch_register_t *bp = isa->bp;
406 ir_graph *irg = env->birg->irg;
407 ir_node *bl = get_irg_start_block(irg);
408 ir_node *no_mem = get_irg_no_mem(irg);
409 ir_node *old_frame = get_irg_frame(irg);
410 int store_old_fp = 1;
411 int omit_fp = env->omit_fp;
412 ir_node *stack = pmap_get(env->regs, (void *) sp);
413 ir_node *frame = pmap_get(env->regs, (void *) bp);
415 int stack_nr = get_Proj_proj(stack);
418 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
426 irn = new_r_Store(irg, bl, get_irg_initial_mem(irg), stack, frame);
427 irn = new_r_Proj(irg, bl, irn, mode_M, pn_Store_M);
428 stack = be_new_IncSP(sp, irg, bl, stack, irn, get_mode_size_bytes(bp->reg_class->mode), be_stack_dir_along);
431 frame = be_new_Copy(bp->reg_class, irg, bl, stack);
433 be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
434 if(env->dedicated_fp) {
435 be_set_constr_single_reg(frame, -1, bp);
436 be_node_set_flags(frame, -1, arch_irn_flags_ignore);
439 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
442 be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
443 env->init_sp = stack;
444 set_irg_frame(irg, frame);
445 edges_reroute(old_frame, frame, irg);
450 static void clearup_frame(be_abi_irg_t *env, ir_node *bl, struct obstack *obst)
452 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
453 const arch_register_t *sp = isa->sp;
454 const arch_register_t *bp = isa->bp;
455 ir_graph *irg = env->birg->irg;
456 ir_node *no_mem = get_irg_no_mem(irg);
457 ir_node *frame = get_irg_frame(irg);
458 ir_node *stack = env->init_sp;
459 int store_old_fp = 1;
465 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
469 stack = be_new_Copy(sp->reg_class, irg, bl, frame);
472 ir_mode *mode = sp->reg_class->mode;
475 stack = be_new_IncSP(sp, irg, bl, stack, no_mem, get_mode_size_bytes(mode), be_stack_dir_against);
476 irn = new_r_Load(irg, bl, no_mem, stack, mode);
477 irn = new_r_Proj(irg, bl, irn, mode, pn_Load_res);
478 frame = be_new_Copy(bp->reg_class, irg, bl, irn);
481 if(env->dedicated_fp) {
482 be_set_constr_single_reg(frame, -1, bp);
487 pmap_foreach(env->regs, ent) {
488 const arch_register_t *reg = ent->key;
489 ir_node *irn = ent->value;
496 obstack_ptr_grow(obst, irn);
501 * Modify the irg itself and the frame type.
503 static void modify_irg(be_abi_irg_t *env)
505 firm_dbg_module_t *dbg = env->dbg;
506 be_abi_call_t *call = be_abi_call_new();
507 const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
508 const arch_register_t *sp = arch_isa_sp(isa);
509 ir_graph *irg = env->birg->irg;
510 ir_node *bl = get_irg_start_block(irg);
511 ir_node *end = get_irg_end_block(irg);
512 ir_node *arg_tuple = get_irg_args(irg);
513 ir_node *no_mem = get_irg_no_mem(irg);
514 type *method_type = get_entity_type(get_irg_entity(irg));
515 int n_params = get_method_n_params(method_type);
518 int reg_params_nr = 0;
523 ir_node *frame_pointer;
524 ir_node *reg_params, *reg_params_bl;
525 ir_node **args, **args_repl;
526 const ir_edge_t *edge;
530 env->regs = pmap_create();
532 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
534 /* Find the maximum proj number of the argument tuple proj */
535 foreach_out_edge(arg_tuple, edge) {
536 ir_node *irn = get_edge_src_irn(edge);
537 int nr = get_Proj_proj(irn);
538 max_arg = MAX(max_arg, nr);
541 args = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
542 args_repl = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
543 memset(args, 0, max_arg * sizeof(args[0]));
544 memset(args_repl, 0, max_arg * sizeof(args[0]));
546 /* Fill the argument vector */
547 foreach_out_edge(arg_tuple, edge) {
548 ir_node *irn = get_edge_src_irn(edge);
549 int nr = get_Proj_proj(irn);
551 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
554 /* Get the ABI constraints from the ISA */
555 arch_isa_get_call_abi(isa, method_type, call);
557 /* Count the register params and add them to the number of Projs for the RegParams node */
558 for(i = 0; i < n_params; ++i) {
559 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
561 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
562 pmap_insert(env->regs, (void *) arg->reg, NULL);
563 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
567 /* Collect all callee-save registers */
568 for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
569 const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
570 for(j = 0; j < cls->n_regs; ++j) {
571 const arch_register_t *reg = &cls->regs[j];
572 if(arch_register_type_is(reg, callee_save))
573 pmap_insert(env->regs, (void *) reg, NULL);
577 pmap_insert(env->regs, (void *) sp, NULL);
578 pmap_insert(env->regs, (void *) isa->bp, NULL);
579 reg_params_bl = get_irg_start_block(irg);
580 env->reg_params = reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
584 * make proj nodes for the callee save registers.
585 * memorize them, since Return nodes get those as inputs.
587 for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs)) {
588 arch_register_t *reg = ent->key;
589 int pos = -(reg_params_nr + 1);
590 ent->value = new_r_Proj(irg, reg_params_bl, reg_params, reg->reg_class->mode, reg_params_nr);
591 be_set_constr_single_reg(reg_params, pos, reg);
594 * If the register is an ignore register,
595 * The Proj for that register shall also be ignored during register allocation.
597 if(arch_register_type_is(reg, ignore))
598 be_node_set_flags(reg_params, pos, arch_irn_flags_ignore);
602 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", reg_params_nr - 1, reg->name));
605 /* Insert the code to set up the stack frame */
606 frame_pointer = setup_frame(env);
609 proj_sp = pmap_get(regs, (void *) sp);
610 proj_bp = pmap_get(regs, (void *) bp);
611 assert(proj_sp != NULL && "There must be a Proj for the stack pointer");
612 assert(proj_sp != NULL && "There must be a Proj for the base pointer");
614 /* Set the Proj for the stack pointer to ignore. */
615 be_node_set_flags(reg_params, -(get_Proj_proj(proj_sp) + 1), arch_irn_flags_ignore);
618 * If a frame pointer is needed and the frame pointer is in a dedicated register,
619 * also exclude that from register allocation by setting the corresponding
622 if(!env->omit_fp && env->dedicated_fp)
623 be_node_set_flags(reg_params, -(get_Proj_proj(proj_bp) + 1), arch_irn_flags_ignore);
627 /* This is the stack pointer add/sub which allocates the frame. remind it for later fix up. */
628 env->init_sp = be_new_IncSP(sp, irg, reg_params_bl, proj_sp, no_mem, 0, be_stack_dir_along);
629 frame_pointer = env->init_sp;
633 env->init_sp = proj_sp;
634 frame_pointer = be_new_Copy(sp->reg_class, irg, reg_params_bl, proj_sp);
637 /* Set the new frame pointer. */
638 exchange(get_irg_frame(irg), frame_pointer);
639 set_irg_frame(irg, frame_pointer);
642 /* compute the start offset for the stack parameters. */
646 int inc_dir = isa->stack_dir * (env->left_to_right ? 1 : -1);
648 for(i = 0; i < n_params; ++i) {
649 be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
651 arg_size += get_type_size_bytes(get_method_param_type(method_type, i));
654 arg_offset = -isa->stack_dir * call->arg_gap + env->left_to_right * arg_size;
656 /* Now, introduce stack param nodes for all parameters passed on the stack */
657 for(i = 0; i < max_arg; ++i) {
658 ir_node *arg_proj = args[i];
659 if(arg_proj != NULL) {
660 be_abi_call_arg_t *arg;
662 int nr = get_Proj_proj(arg_proj);
664 nr = MIN(nr, n_params);
665 arg = get_call_arg(call, 0, nr);
666 param_type = get_method_param_type(method_type, nr);
669 args_repl[i] = new_r_Proj(irg, reg_params_bl, reg_params, get_irn_mode(arg_proj), reg_params_nr);
670 be_set_constr_single_reg(reg_params, -(reg_params_nr + 1), arg->reg);
674 /* when the (stack) parameter is primitive, we insert a StackParam
675 node representing the load of that parameter */
677 int size = get_type_size_bytes(param_type) * isa->stack_dir;
682 /* For atomic parameters which are actually used, we create a StackParam node. */
683 if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
684 ir_mode *mode = get_type_mode(param_type);
685 const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
686 args_repl[i] = be_new_StackParam(cls, irg, reg_params_bl, mode, frame_pointer, arg_offset);
689 /* The stack parameter is not primitive (it is a struct or array),
690 we thus will create a node representing the parameter's address
693 assert(0 && "struct parameters are not supported");
703 /* reroute the edges from the original argument projs to the RegParam ones. */
704 for(i = 0; i < max_arg; ++i) {
705 if(args[i] != NULL) {
706 assert(args_repl[i] != NULL);
707 edges_reroute(args[i], args_repl[i], irg);
711 /* All Return nodes hang on the End node, so look for them there. */
712 for(i = 0, n = get_irn_arity(end); i < n; ++i) {
713 ir_node *irn = get_irn_n(end, i);
715 if(get_irn_opcode(irn) == iro_Return) {
716 ir_node *bl = get_nodes_block(irn);
717 int n_res = get_Return_n_ress(irn);
718 pmap *reg_map = pmap_create_ex(n_res);
723 /* collect all arguments of the return */
724 for(i = 0; i < n_res; ++i) {
725 ir_node *res = get_Return_res(irn, i);
726 be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
728 assert(arg->in_reg && "return value must be passed in register");
729 pmap_insert(reg_map, res, (void *) arg->reg);
730 obstack_ptr_grow(&env->obst, res);
733 /* generate the clean up code and add additional parameters to the return. */
734 clearup_frame(env, bl, &env->obst);
736 /* The in array for the new back end return is now ready. */
737 n = obstack_object_size(&env->obst) / sizeof(in[0]);
738 in = obstack_finish(&env->obst);
739 ret = be_new_Return(irg, bl, n, in);
741 /* Set the constraints for some arguments of the return. */
742 for(i = 0; i < n; i++) {
743 const arch_register_t *reg = pmap_get(reg_map, in[i]);
745 be_set_constr_single_reg(ret, i, reg);
748 obstack_free(&env->obst, in);
749 pmap_destroy(reg_map);
753 obstack_free(&env->obst, args);
754 be_abi_call_free(call);
757 static void collect_alloca_walker(ir_node *irn, void *data)
759 be_abi_irg_t *env = data;
760 if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
761 obstack_ptr_grow(&env->obst, irn);
764 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
766 be_abi_irg_t *env = malloc(sizeof(env[0]));
769 ir_node **stack_allocs;
771 env->method_type = get_entity_type(get_irg_entity(birg->irg));
772 env->call = be_abi_call_new();
773 arch_isa_get_call_abi(birg->main_env->arch_env->isa, env->method_type, env->call);
775 env->omit_fp = (env->call->flags & BE_ABI_TRY_OMIT_FRAME_POINTER) != 0;
776 env->dedicated_fp = (env->call->flags & BE_ABI_FRAME_POINTER_DEDICATED) != 0;
777 env->left_to_right = (env->call->flags & BE_ABI_LEFT_TO_RIGHT) != 0;
779 env->stack_ops = pset_new_ptr(32);
780 env->dbg = firm_dbg_register("firm.be.abi");
781 obstack_init(&env->obst);
783 /* search for stack allocation nodes and record them */
784 irg_walk_graph(env->birg->irg, collect_alloca_walker, NULL, env);
785 obstack_ptr_grow(&env->obst, NULL);
786 stack_allocs = obstack_finish(&env->obst);
788 /* If there are stack allocations in the irg, we need a frame pointer */
789 if(stack_allocs[0] != NULL)
794 for(i = 0; stack_allocs[i] != NULL; ++i)
795 implement_stack_alloc(env, stack_allocs[i]);
797 irg_walk_graph(env->birg->irg, NULL, adjust_call_walker, env);
801 static void collect_stack_nodes(ir_node *irn, void *data)
805 switch(be_get_irn_opcode(irn)) {
808 pset_insert_ptr(s, irn);
812 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
814 dom_front_info_t *df;
817 /* We need dominance frontiers for fix up */
818 df = be_compute_dominance_frontiers(env->birg->irg);
820 stack_ops = pset_new_ptr_default();
821 pset_insert_ptr(env->stack_ops, env->init_sp);
822 irg_walk_graph(env->birg->irg, collect_stack_nodes, NULL, stack_ops);
823 be_ssa_constr_set(df, stack_ops);
826 /* free these dominance frontiers */
827 be_free_dominance_frontiers(df);
830 static int get_dir(ir_node *irn)
832 return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
835 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
837 const arch_env_t *aenv = env->birg->main_env->arch_env;
839 int start_bias = bias;
841 sched_foreach(bl, irn) {
842 if(be_is_IncSP(irn)) {
843 int ofs = be_get_IncSP_offset(irn);
844 int dir = get_dir(irn);
846 if(ofs == BE_STACK_FRAME_SIZE) {
847 ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
848 be_set_IncSP_offset(irn, ofs);
855 arch_set_stack_bias(aenv, irn, bias);
861 static void stack_bias_walker(ir_node *bl, void *data)
863 if(bl != get_irg_start_block(get_irn_irg(bl))) {
864 be_abi_irg_t *env = data;
865 process_stack_bias(env, bl, env->start_block_bias);
869 void be_abi_fix_stack_bias(be_abi_irg_t *env)
871 ir_graph *irg = env->birg->irg;
873 /* Determine the stack bias at the and of the start block. */
874 env->start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
876 /* fix the bias is all other blocks */
877 irg_block_walk_graph(irg, stack_bias_walker, NULL, env);
880 void be_abi_free(be_abi_irg_t *env)
882 del_pset(env->stack_ops);
883 obstack_free(&env->obst, NULL);
887 ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
889 assert(arch_register_type_is(reg, callee_save));
890 assert(pmap_contains(abi->regs, (void *) reg));
891 return pmap_get(abi->regs, (void *) reg);