2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
31 #include "irgraph_t.h"
34 #include "iredges_t.h"
44 #include "raw_bitset.h"
50 #include "beabihelper.h"
56 #include "bessaconstr.h"
58 #include "betranshlp.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
62 typedef struct be_abi_call_arg_t {
63 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
64 unsigned in_reg : 1; /**< 1: this argument is transmitted 1: in registers, 0: on stack. */
65 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
84 * The ABI information for the current graph.
87 be_abi_call_t *call; /**< The ABI call information. */
89 ir_node *init_sp; /**< The node representing the stack pointer
90 at the start of the function. */
92 pmap *regs; /**< A map of all callee-save and ignore regs to
93 their Projs to the RegParams node. */
94 pmap *keep_map; /**< mapping blocks to keep nodes. */
96 ir_node **calls; /**< flexible array containing all be_Call nodes */
99 static ir_heights_t *ir_heights;
101 static ir_node *be_abi_reg_map_get(pmap *map, const arch_register_t *reg)
103 return pmap_get(ir_node, map, reg);
106 static void be_abi_reg_map_set(pmap *map, const arch_register_t* reg,
109 pmap_insert(map, reg, node);
113 * Check if the given register is callee save, ie. will be saved by the callee.
115 static bool arch_register_is_callee_save(
116 const arch_env_t *arch_env,
117 const arch_register_t *reg)
119 if (arch_env->impl->register_saved_by)
120 return arch_env->impl->register_saved_by(reg, /*callee=*/1);
125 * Check if the given register is caller save, ie. must be saved by the caller.
127 static bool arch_register_is_caller_save(
128 const arch_env_t *arch_env,
129 const arch_register_t *reg)
131 if (arch_env->impl->register_saved_by)
132 return arch_env->impl->register_saved_by(reg, /*callee=*/0);
139 _ ____ ___ ____ _ _ _ _
140 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
141 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
142 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
143 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
145 These callbacks are used by the backend to set the parameters
146 for a specific call type.
150 * Set compare function: compares two ABI call object arguments.
152 static int cmp_call_arg(const void *a, const void *b, size_t n)
154 const be_abi_call_arg_t *p = (const be_abi_call_arg_t*)a;
155 const be_abi_call_arg_t *q = (const be_abi_call_arg_t*)b;
157 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
161 * Get an ABI call object argument.
163 * @param call the abi call
164 * @param is_res true for call results, false for call arguments
165 * @param pos position of the argument
166 * @param callee context type - if we are callee or caller
168 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
170 be_abi_call_arg_t arg;
173 memset(&arg, 0, sizeof(arg));
178 hash = is_res * 128 + pos;
180 return set_find(be_abi_call_arg_t, call->params, &arg, sizeof(arg), hash);
184 * Set an ABI call object argument.
186 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
188 unsigned hash = arg->is_res * 128 + arg->pos;
189 if (context & ABI_CONTEXT_CALLEE) {
191 (void)set_insert(be_abi_call_arg_t, call->params, arg, sizeof(*arg), hash);
193 if (context & ABI_CONTEXT_CALLER) {
195 (void)set_insert(be_abi_call_arg_t, call->params, arg, sizeof(*arg), hash);
199 /* Set the flags for a call. */
200 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
206 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
207 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
213 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
214 ir_mode *load_mode, unsigned alignment,
215 unsigned space_before, unsigned space_after,
216 be_abi_context_t context)
218 be_abi_call_arg_t arg;
219 memset(&arg, 0, sizeof(arg));
220 assert(alignment > 0 && "Alignment must be greater than 0");
221 arg.load_mode = load_mode;
222 arg.alignment = alignment;
223 arg.space_before = space_before;
224 arg.space_after = space_after;
228 remember_call_arg(&arg, call, context);
231 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
233 be_abi_call_arg_t arg;
234 memset(&arg, 0, sizeof(arg));
241 remember_call_arg(&arg, call, context);
244 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
246 be_abi_call_arg_t arg;
247 memset(&arg, 0, sizeof(arg));
254 remember_call_arg(&arg, call, context);
257 /* Get the flags of a ABI call object. */
258 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
264 * Constructor for a new ABI call object.
266 * @return the new ABI call object
268 static be_abi_call_t *be_abi_call_new(void)
270 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
272 call->params = new_set(cmp_call_arg, 16);
274 call->flags.try_omit_fp = be_options.omit_fp;
280 * Destructor for an ABI call object.
282 static void be_abi_call_free(be_abi_call_t *call)
284 del_set(call->params);
289 * Initializes the frame layout from parts
291 * @param frame the stack layout that will be initialized
292 * @param args the stack argument layout type
293 * @param between the between layout type
294 * @param locals the method frame type
296 * @return the initialized stack layout
298 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
299 ir_type *between, ir_type *locals)
301 frame->arg_type = args;
302 frame->between_type = between;
303 frame->frame_type = locals;
304 frame->initial_offset = 0;
305 frame->initial_bias = 0;
306 frame->order[1] = between;
308 /* typical decreasing stack: locals have the
309 * lowest addresses, arguments the highest */
310 frame->order[0] = locals;
311 frame->order[2] = args;
322 Adjustment of the calls inside a graph.
327 * Transform a call node into a be_Call node.
329 * @param env The ABI environment for the current irg.
330 * @param irn The call node.
331 * @param curr_sp The stack pointer node to use.
332 * @return The stack pointer after the call.
334 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
336 ir_graph *irg = get_irn_irg(irn);
337 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
338 ir_type *call_tp = get_Call_type(irn);
339 ir_node *call_ptr = get_Call_ptr(irn);
340 size_t n_params = get_method_n_params(call_tp);
341 ir_node *curr_mem = get_Call_mem(irn);
342 ir_node *bl = get_nodes_block(irn);
344 const arch_register_t *sp = arch_env->sp;
345 be_abi_call_t *call = be_abi_call_new();
346 ir_mode *mach_mode = sp->reg_class->mode;
347 int n_res = get_method_n_ress(call_tp);
349 ir_node *res_proj = NULL;
350 int n_reg_params = 0;
351 int n_stack_params = 0;
354 const arch_register_t **states = NEW_ARR_F(const arch_register_t*, 0);
355 const arch_register_t **destroyed_regs = NEW_ARR_F(const arch_register_t*, 0);
359 int n_reg_results = 0;
361 int *stack_param_idx;
363 int throws_exception;
368 /* Let the isa fill out the abi description for that call node. */
369 arch_env_get_call_abi(arch_env, call_tp, call);
371 /* Insert code to put the stack arguments on the stack. */
372 assert((size_t)get_Call_n_params(irn) == n_params);
373 stack_param_idx = ALLOCAN(int, n_params);
374 for (p = 0; p < n_params; ++p) {
375 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
378 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, p));
380 stack_size += round_up2(arg->space_before, arg->alignment);
381 stack_size += round_up2(arg_size, arg->alignment);
382 stack_size += round_up2(arg->space_after, arg->alignment);
384 stack_param_idx[n_stack_params++] = p;
388 /* Collect all arguments which are passed in registers. */
389 reg_param_idxs = ALLOCAN(int, n_params);
390 for (p = 0; p < n_params; ++p) {
391 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
392 if (arg && arg->in_reg) {
393 reg_param_idxs[n_reg_params++] = p;
398 * If the stack is decreasing and we do not want to store sequentially,
399 * or someone else allocated the call frame
400 * we allocate as much space on the stack all parameters need, by
401 * moving the stack pointer along the stack's direction.
403 * Note: we also have to do this for stack_size == 0, because we may have
404 * to adjust stack alignment for the call.
406 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
408 dbgi = get_irn_dbg_info(irn);
409 /* If there are some parameters which shall be passed on the stack. */
410 if (n_stack_params > 0) {
412 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
415 curr_mem = get_Call_mem(irn);
416 in[n_in++] = curr_mem;
418 for (i = 0; i < n_stack_params; ++i) {
419 int p = stack_param_idx[i];
420 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
421 ir_node *param = get_Call_param(irn, p);
422 ir_node *addr = curr_sp;
424 ir_type *param_type = get_method_param_type(call_tp, p);
425 int param_size = get_type_size_bytes(param_type) + arg->space_after;
428 * If we wanted to build the arguments sequentially,
429 * the stack pointer for the next must be incremented,
430 * and the memory value propagated.
432 curr_ofs += arg->space_before;
433 curr_ofs = round_up2(curr_ofs, arg->alignment);
435 /* Make the expression to compute the argument's offset. */
437 ir_mode *constmode = mach_mode;
438 if (mode_is_reference(mach_mode)) {
441 addr = new_r_Const_long(irg, constmode, curr_ofs);
442 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
445 /* Insert a store for primitive arguments. */
446 if (is_atomic_type(param_type)) {
447 ir_node *nomem = get_irg_no_mem(irg);
448 ir_node *mem_input = nomem;
449 ir_node *store = new_rd_Store(dbgi, bl, mem_input, addr, param, cons_none);
450 mem = new_r_Proj(store, mode_M, pn_Store_M);
452 /* Make a mem copy for compound arguments. */
455 assert(mode_is_reference(get_irn_mode(param)));
456 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
457 mem = new_r_Proj(copy, mode_M, pn_CopyB_M);
460 curr_ofs += param_size;
465 /* We need the sync only, if we didn't build the stores sequentially. */
466 if (n_stack_params >= 1) {
467 curr_mem = new_r_Sync(bl, n_in, in);
469 curr_mem = get_Call_mem(irn);
473 /* Put caller save into the destroyed set and state registers in the states
475 for (i = 0, n = arch_env->n_register_classes; i < n; ++i) {
477 const arch_register_class_t *cls = &arch_env->register_classes[i];
478 for (j = 0; j < cls->n_regs; ++j) {
479 const arch_register_t *reg = arch_register_for_index(cls, j);
481 /* even if destroyed all is specified, neither SP nor FP are
482 * destroyed (else bad things will happen) */
483 if (reg == arch_env->sp || reg == arch_env->bp)
486 if (reg->type & arch_register_type_state) {
487 ARR_APP1(const arch_register_t*, destroyed_regs, reg);
488 ARR_APP1(const arch_register_t*, states, reg);
489 /* we're already in the destroyed set so no need for further
493 if (arch_register_is_caller_save(arch_env, reg))
494 ARR_APP1(const arch_register_t*, destroyed_regs, reg);
498 /* search the largest result proj number */
499 res_projs = ALLOCANZ(ir_node*, n_res);
501 foreach_out_edge(irn, edge) {
502 ir_node *irn = get_edge_src_irn(edge);
504 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
507 foreach_out_edge(irn, res_edge) {
509 ir_node *res = get_edge_src_irn(res_edge);
511 assert(is_Proj(res));
513 proj = get_Proj_proj(res);
514 assert(proj < n_res);
515 assert(res_projs[proj] == NULL);
516 res_projs[proj] = res;
522 /** TODO: this is not correct for cases where return values are passed
523 * on the stack, but no known ABI does this currently...
525 n_reg_results = n_res;
528 in = ALLOCAN(ir_node*, n_reg_params + ARR_LEN(states));
530 /* make the back end call node and set its register requirements. */
531 for (i = 0; i < n_reg_params; ++i) {
532 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
535 /* add state registers ins */
536 for (s = 0; s < ARR_LEN(states); ++s) {
537 const arch_register_t *reg = states[s];
538 const arch_register_class_t *cls = reg->reg_class;
539 ir_node *regnode = new_r_Unknown(irg, cls->mode);
540 in[n_ins++] = regnode;
542 assert(n_ins == (int) (n_reg_params + ARR_LEN(states)));
544 /* ins collected, build the call */
545 throws_exception = ir_throws_exception(irn);
546 if (env->call->flags.call_has_imm && is_SymConst(call_ptr)) {
548 low_call = be_new_Call(dbgi, irg, bl, curr_mem, sp->single_req, curr_sp,
549 sp->single_req, curr_sp,
550 n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
551 n_ins, in, get_Call_type(irn));
552 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
555 low_call = be_new_Call(dbgi, irg, bl, curr_mem, sp->single_req, curr_sp,
556 sp->reg_class->class_req, call_ptr,
557 n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
558 n_ins, in, get_Call_type(irn));
560 ir_set_throws_exception(low_call, throws_exception);
561 be_Call_set_pop(low_call, call->pop);
563 /* put the call into the list of all calls for later processing */
564 ARR_APP1(ir_node *, env->calls, low_call);
566 /* create new stack pointer */
567 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
568 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
569 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
570 arch_set_irn_register(curr_sp, sp);
572 /* now handle results */
573 for (i = 0; i < n_res; ++i) {
574 ir_node *proj = res_projs[i];
575 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
577 /* returns values on stack not supported yet */
581 shift the proj number to the right, since we will drop the
582 unspeakable Proj_T from the Call. Therefore, all real argument
583 Proj numbers must be increased by pn_be_Call_first_res
585 long pn = i + pn_be_Call_first_res;
588 ir_type *res_type = get_method_res_type(call_tp, i);
589 ir_mode *mode = get_type_mode(res_type);
590 proj = new_r_Proj(low_call, mode, pn);
593 set_Proj_pred(proj, low_call);
594 set_Proj_proj(proj, pn);
598 /* remove register from destroyed regs */
600 size_t n = ARR_LEN(destroyed_regs);
601 for (j = 0; j < n; ++j) {
602 if (destroyed_regs[j] == arg->reg) {
603 destroyed_regs[j] = destroyed_regs[n-1];
604 ARR_SHRINKLEN(destroyed_regs,n-1);
611 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
613 /* Set the register classes and constraints of the Call parameters. */
614 for (i = 0; i < n_reg_params; ++i) {
615 int index = reg_param_idxs[i];
616 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
617 assert(arg->reg != NULL);
619 be_set_constr_single_reg_in(low_call, n_be_Call_first_arg + i,
620 arg->reg, arch_register_req_type_none);
623 /* Set the register constraints of the results. */
624 for (i = 0; i < n_res; ++i) {
625 ir_node *proj = res_projs[i];
626 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
627 int pn = get_Proj_proj(proj);
630 be_set_constr_single_reg_out(low_call, pn, arg->reg,
631 arch_register_req_type_none);
632 arch_set_irn_register(proj, arg->reg);
634 exchange(irn, low_call);
636 /* kill the ProjT node */
637 if (res_proj != NULL) {
641 /* Make additional projs for the caller save registers
642 and the Keep node which keeps them alive. */
648 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
651 n_ins = ARR_LEN(destroyed_regs) + n_reg_results + 1;
652 in = ALLOCAN(ir_node *, n_ins);
654 /* also keep the stack pointer */
655 set_irn_link(curr_sp, (void*) sp);
658 for (d = 0; d < ARR_LEN(destroyed_regs); ++d) {
659 const arch_register_t *reg = destroyed_regs[d];
660 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
662 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
663 be_set_constr_single_reg_out(low_call, curr_res_proj, reg,
664 arch_register_req_type_none);
665 arch_set_irn_register(proj, reg);
667 set_irn_link(proj, (void*) reg);
672 for (i = 0; i < n_reg_results; ++i) {
673 ir_node *proj = res_projs[i];
674 const arch_register_t *reg = arch_get_irn_register(proj);
675 set_irn_link(proj, (void*) reg);
680 /* create the Keep for the caller save registers */
681 keep = be_new_Keep(bl, n, in);
682 for (i = 0; i < n; ++i) {
683 const arch_register_t *reg = (const arch_register_t*)get_irn_link(in[i]);
684 be_node_set_reg_class_in(keep, i, reg->reg_class);
688 /* Clean up the stack. */
689 assert(stack_size >= call->pop);
690 stack_size -= call->pop;
692 if (stack_size > 0) {
693 ir_node *mem_proj = NULL;
695 foreach_out_edge(low_call, edge) {
696 ir_node *irn = get_edge_src_irn(edge);
697 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
704 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M);
705 keep_alive(mem_proj);
708 /* Clean up the stack frame or revert alignment fixes if we allocated it */
709 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
711 be_abi_call_free(call);
714 DEL_ARR_F(destroyed_regs);
720 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
722 * @param alignment the minimum stack alignment
723 * @param size the node containing the non-aligned size
724 * @param block the block where new nodes are allocated on
725 * @param dbg debug info for new nodes
727 * @return a node representing the aligned size
729 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
730 ir_node *block, dbg_info *dbg)
732 if (stack_alignment > 1) {
738 assert(is_po2(stack_alignment));
740 mode = get_irn_mode(size);
741 tv = new_tarval_from_long(stack_alignment-1, mode);
742 irg = get_Block_irg(block);
743 mask = new_r_Const(irg, tv);
744 size = new_rd_Add(dbg, block, size, mask, mode);
746 tv = new_tarval_from_long(-(long)stack_alignment, mode);
747 mask = new_r_Const(irg, tv);
748 size = new_rd_And(dbg, block, size, mask, mode);
754 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
756 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
758 ir_node *block = get_nodes_block(alloc);
759 ir_graph *irg = get_Block_irg(block);
760 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
761 ir_node *alloc_mem = NULL;
762 ir_node *alloc_res = NULL;
763 ir_type *type = get_Alloc_type(alloc);
770 unsigned stack_alignment;
772 /* all non-stack Alloc nodes should already be lowered before the backend */
773 assert(get_Alloc_where(alloc) == stack_alloc);
775 foreach_out_edge(alloc, edge) {
776 ir_node *irn = get_edge_src_irn(edge);
778 assert(is_Proj(irn));
779 switch (get_Proj_proj(irn)) {
791 /* Beware: currently Alloc nodes without a result might happen,
792 only escape analysis kills them and this phase runs only for object
793 oriented source. We kill the Alloc here. */
794 if (alloc_res == NULL && alloc_mem) {
795 exchange(alloc_mem, get_Alloc_mem(alloc));
799 dbg = get_irn_dbg_info(alloc);
800 count = get_Alloc_count(alloc);
802 /* we might need to multiply the count with the element size */
803 if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) {
804 ir_mode *mode = get_irn_mode(count);
805 ir_tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
807 ir_node *cnst = new_rd_Const(dbg, irg, tv);
808 size = new_rd_Mul(dbg, block, count, cnst, mode);
813 /* The stack pointer will be modified in an unknown manner.
814 We cannot omit it. */
815 env->call->flags.try_omit_fp = 0;
817 stack_alignment = 1 << arch_env->stack_alignment;
818 size = adjust_alloc_size(stack_alignment, size, block, dbg);
819 new_alloc = be_new_AddSP(arch_env->sp, block, curr_sp, size);
820 set_irn_dbg_info(new_alloc, dbg);
822 if (alloc_mem != NULL) {
826 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
828 /* We need to sync the output mem of the AddSP with the input mem
829 edge into the alloc node. */
830 ins[0] = get_Alloc_mem(alloc);
832 sync = new_r_Sync(block, 2, ins);
834 exchange(alloc_mem, sync);
837 exchange(alloc, new_alloc);
839 /* fix projnum of alloca res */
840 set_Proj_proj(alloc_res, pn_be_AddSP_res);
842 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
849 * The Free is transformed into a back end free node and connected to the stack nodes.
851 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
853 ir_node *block = get_nodes_block(free);
854 ir_graph *irg = get_irn_irg(free);
855 ir_type *type = get_Free_type(free);
856 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
857 ir_mode *sp_mode = arch_env->sp->reg_class->mode;
858 dbg_info *dbg = get_irn_dbg_info(free);
859 ir_node *subsp, *mem, *res, *size, *sync;
861 unsigned stack_alignment;
863 /* all non-stack-alloc Free nodes should already be lowered before the
865 assert(get_Free_where(free) == stack_alloc);
867 /* we might need to multiply the size with the element size */
868 if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) {
869 ir_tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
870 ir_node *cnst = new_rd_Const(dbg, irg, tv);
871 ir_node *mul = new_rd_Mul(dbg, block, get_Free_count(free),
875 size = get_Free_count(free);
878 stack_alignment = 1 << arch_env->stack_alignment;
879 size = adjust_alloc_size(stack_alignment, size, block, dbg);
881 /* The stack pointer will be modified in an unknown manner.
882 We cannot omit it. */
883 env->call->flags.try_omit_fp = 0;
884 subsp = be_new_SubSP(arch_env->sp, block, curr_sp, size);
885 set_irn_dbg_info(subsp, dbg);
887 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
888 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
890 /* we need to sync the memory */
891 in[0] = get_Free_mem(free);
893 sync = new_r_Sync(block, 2, in);
895 /* and make the AddSP dependent on the former memory */
896 add_irn_dep(subsp, get_Free_mem(free));
899 exchange(free, sync);
906 * Check if a node is somehow data dependent on another one.
907 * both nodes must be in the same basic block.
908 * @param n1 The first node.
909 * @param n2 The second node.
910 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
912 static int dependent_on(ir_node *n1, ir_node *n2)
914 assert(get_nodes_block(n1) == get_nodes_block(n2));
916 return heights_reachable_in_block(ir_heights, n1, n2);
919 static int cmp_call_dependency(const void *c1, const void *c2)
921 ir_node *n1 = *(ir_node **) c1;
922 ir_node *n2 = *(ir_node **) c2;
926 Classical qsort() comparison function behavior:
927 0 if both elements are equal
928 1 if second is "smaller" that first
929 -1 if first is "smaller" that second
931 if (dependent_on(n1, n2))
934 if (dependent_on(n2, n1))
937 /* The nodes have no depth order, but we need a total order because qsort()
940 * Additionally, we need to respect transitive dependencies. Consider a
941 * Call a depending on Call b and an independent Call c.
942 * We MUST NOT order c > a and b > c. */
943 h1 = get_irn_height(ir_heights, n1);
944 h2 = get_irn_height(ir_heights, n2);
945 if (h1 < h2) return -1;
946 if (h1 > h2) return 1;
947 /* Same height, so use a random (but stable) order */
948 return get_irn_idx(n1) - get_irn_idx(n2);
952 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
954 static void link_ops_in_block_walker(ir_node *irn, void *data)
956 be_abi_irg_t *env = (be_abi_irg_t*)data;
957 unsigned code = get_irn_opcode(irn);
959 if (code == iro_Call ||
960 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
961 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
962 ir_node *bl = get_nodes_block(irn);
963 void *save = get_irn_link(bl);
965 set_irn_link(irn, save);
966 set_irn_link(bl, irn);
969 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
970 ir_node *param = get_Builtin_param(irn, 0);
971 ir_tarval *tv = get_Const_tarval(param);
972 unsigned long value = get_tarval_long(tv);
973 /* use ebp, so the climbframe algo works... */
975 env->call->flags.try_omit_fp = 0;
982 * Process all Call/Alloc/Free nodes inside a basic block.
983 * Note that the link field of the block must contain a linked list of all
984 * nodes inside the Block. We first order this list according to data dependency
985 * and that connect the nodes together.
987 static void process_ops_in_block(ir_node *bl, void *data)
989 be_abi_irg_t *env = (be_abi_irg_t*)data;
990 ir_node *curr_sp = env->init_sp;
997 for (irn = (ir_node*)get_irn_link(bl); irn != NULL;
998 irn = (ir_node*)get_irn_link(irn)) {
1002 nodes = ALLOCAN(ir_node*, n_nodes);
1003 for (irn = (ir_node*)get_irn_link(bl), n = 0; irn != NULL;
1004 irn = (ir_node*)get_irn_link(irn), ++n) {
1008 /* If there were call nodes in the block. */
1013 /* order the call nodes according to data dependency */
1014 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1016 for (i = n_nodes - 1; i >= 0; --i) {
1017 ir_node *irn = nodes[i];
1019 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1020 switch (get_irn_opcode(irn)) {
1022 curr_sp = adjust_call(env, irn, curr_sp);
1025 if (get_Alloc_where(irn) == stack_alloc)
1026 curr_sp = adjust_alloc(env, irn, curr_sp);
1029 if (get_Free_where(irn) == stack_alloc)
1030 curr_sp = adjust_free(env, irn, curr_sp);
1033 panic("invalid call");
1037 /* Keep the last stack state in the block by tying it to Keep node,
1038 * the proj from calls is already kept */
1039 if (curr_sp != env->init_sp &&
1040 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1042 keep = be_new_Keep(bl, 1, nodes);
1043 pmap_insert(env->keep_map, bl, keep);
1047 set_irn_link(bl, curr_sp);
1051 * Adjust all call nodes in the graph to the ABI conventions.
1053 static void process_calls(ir_graph *const irg, be_abi_irg_t *const abi)
1055 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi);
1057 ir_heights = heights_new(irg);
1058 irg_block_walk_graph(irg, NULL, process_ops_in_block, abi);
1059 heights_free(ir_heights);
1063 * Computes the stack argument layout type.
1064 * Changes a possibly allocated value param type by moving
1065 * entities to the stack layout type.
1067 * @param call the current call ABI
1068 * @param method_type the method type
1070 * @return the stack argument layout type
1072 static ir_type *compute_arg_type(ir_graph *irg, be_abi_call_t *call,
1073 ir_type *method_type)
1075 struct obstack *obst = be_get_be_obst(irg);
1076 ir_type *frame_type = get_irg_frame_type(irg);
1077 size_t n_params = get_method_n_params(method_type);
1078 size_t n_frame_members = get_compound_n_members(frame_type);
1079 ir_entity *va_start_entity = NULL;
1085 ir_entity **map = OALLOCNZ(obst, ir_entity*, n_params);
1086 res = new_type_struct(new_id_from_chars("arg_type", 8));
1088 /* collect existing entities for value_param_types */
1089 for (f = n_frame_members; f > 0; ) {
1090 ir_entity *entity = get_compound_member(frame_type, --f);
1093 set_entity_link(entity, NULL);
1094 if (!is_parameter_entity(entity))
1096 num = get_entity_parameter_number(entity);
1097 if (num == IR_VA_START_PARAMETER_NUMBER) {
1098 /* move entity to new arg_type */
1099 set_entity_owner(entity, res);
1100 va_start_entity = entity;
1103 assert(num < n_params);
1104 if (map[num] != NULL)
1105 panic("multiple entities for parameter %u in %+F found", f, irg);
1107 if (num != n_params && get_call_arg(call, 0, num, 1)->in_reg) {
1108 /* don't move this entity */
1113 /* move entity to new arg_type */
1114 set_entity_owner(entity, res);
1117 for (i = 0; i < n_params; ++i) {
1118 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1119 ir_type *param_type = get_method_param_type(method_type, i);
1125 if (entity == NULL) {
1126 /* create a new entity */
1127 entity = new_parameter_entity(res, i, param_type);
1129 ofs += arg->space_before;
1130 ofs = round_up2(ofs, arg->alignment);
1131 set_entity_offset(entity, ofs);
1132 ofs += arg->space_after;
1133 ofs += get_type_size_bytes(param_type);
1134 arg->stack_ent = entity;
1136 if (va_start_entity != NULL) {
1137 set_entity_offset(va_start_entity, ofs);
1139 set_type_size_bytes(res, ofs);
1140 set_type_state(res, layout_fixed);
1145 static int cmp_regs(const void *a, const void *b)
1147 arch_register_t const *const p = *(arch_register_t const**)a;
1148 arch_register_t const *const q = *(arch_register_t const**)b;
1150 if (p->reg_class == q->reg_class)
1151 return p->index - q->index;
1153 return p->reg_class < q->reg_class ? -1 : +1;
1156 static void reg_map_to_arr(arch_register_t const **const res, pmap *const reg_map)
1159 size_t n = pmap_count(reg_map);
1162 foreach_pmap(reg_map, ent) {
1163 res[i++] = (arch_register_t const*)ent->key;
1166 qsort(res, n, sizeof(res[0]), cmp_regs);
1170 * Creates a be_Return for a Return node.
1172 * @param @env the abi environment
1173 * @param irn the Return node
1175 static ir_node *create_be_return(be_abi_irg_t *const env, ir_node *const irn)
1177 ir_node *const bl = get_nodes_block(irn);
1178 be_abi_call_t *call = env->call;
1179 ir_graph *irg = get_Block_irg(bl);
1180 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1181 pmap *reg_map = pmap_create();
1182 ir_node *keep = pmap_get(ir_node, env->keep_map, bl);
1187 const arch_register_t **regs;
1191 get the valid stack node in this block.
1192 If we had a call in that block there is a Keep constructed by process_calls()
1193 which points to the last stack modification in that block. we'll use
1194 it then. Else we use the stack from the start block and let
1195 the ssa construction fix the usage.
1197 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1199 stack = get_irn_n(keep, 0);
1201 remove_End_keepalive(get_irg_end(irg), keep);
1204 int const n_res = get_Return_n_ress(irn);
1205 /* Insert results for Return into the register map. */
1206 for (i = 0; i < n_res; ++i) {
1207 ir_node *res = get_Return_res(irn, i);
1208 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1209 assert(arg->in_reg && "return value must be passed in register");
1210 pmap_insert(reg_map, (void *) arg->reg, res);
1213 /* Add uses of the callee save registers. */
1214 foreach_pmap(env->regs, ent) {
1215 const arch_register_t *reg = (const arch_register_t*)ent->key;
1216 if (arch_register_is_callee_save(arch_env, reg))
1217 pmap_insert(reg_map, ent->key, ent->value);
1220 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1223 Maximum size of the in array for Return nodes is
1224 return args + callee save/ignore registers + memory + stack pointer
1226 in_max = pmap_count(reg_map) + n_res + 2;
1228 in = ALLOCAN(ir_node*, in_max);
1229 regs = ALLOCAN(arch_register_t const*, in_max);
1231 in[0] = get_Return_mem(irn);
1232 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1234 regs[1] = arch_env->sp;
1237 /* clear SP entry, since it has already been grown. */
1238 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1239 for (i = 0; i < n_res; ++i) {
1240 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1242 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1243 regs[n++] = arg->reg;
1245 /* Clear the map entry to mark the register as processed. */
1246 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1249 /* grow the rest of the stuff. */
1250 foreach_pmap(reg_map, ent) {
1252 in[n] = (ir_node*)ent->value;
1253 regs[n++] = (const arch_register_t*)ent->key;
1257 /* The in array for the new back end return is now ready. */
1258 dbg_info *const dbgi = get_irn_dbg_info(irn);
1259 ir_node *const ret = be_new_Return(dbgi, irg, bl, n_res, call->pop, n, in);
1261 /* Set the register classes of the return's parameter accordingly. */
1262 for (i = 0; i < n; ++i) {
1263 if (regs[i] == NULL)
1266 be_set_constr_single_reg_in(ret, i, regs[i], arch_register_req_type_none);
1269 /* Free the space of the Epilog's in array and the register <-> proj map. */
1270 pmap_destroy(reg_map);
1275 typedef struct lower_frame_sels_env_t {
1276 ir_node *frame; /**< the current frame */
1277 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1278 } lower_frame_sels_env_t;
1281 * Walker: Replaces Sels of frame type and
1282 * value param type entities by FrameAddress.
1283 * Links all used entities.
1285 static void lower_frame_sels_walker(ir_node *irn, void *data)
1287 lower_frame_sels_env_t *ctx = (lower_frame_sels_env_t*)data;
1290 ir_node *ptr = get_Sel_ptr(irn);
1292 if (ptr == ctx->frame) {
1293 ir_entity *ent = get_Sel_entity(irn);
1294 ir_node *bl = get_nodes_block(irn);
1297 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1304 * The start block has no jump, instead it has an initial exec Proj.
1305 * The backend wants to handle all blocks the same way, so we replace
1306 * the out cfg edge with a real jump.
1308 static void fix_start_block(ir_graph *irg)
1310 ir_node *initial_X = get_irg_initial_exec(irg);
1311 ir_node *start_block = get_irg_start_block(irg);
1312 ir_node *jmp = new_r_Jmp(start_block);
1314 assert(is_Proj(initial_X));
1315 exchange(initial_X, jmp);
1316 set_irg_initial_exec(irg, new_r_Bad(irg, mode_X));
1318 /* merge start block with successor if possible */
1320 foreach_out_edge(jmp, edge) {
1321 ir_node *succ = get_edge_src_irn(edge);
1322 if (!is_Block(succ))
1325 if (get_irn_arity(succ) == 1) {
1326 exchange(succ, start_block);
1334 * Modify the irg itself and the frame type.
1336 static void modify_irg(ir_graph *const irg, be_abi_irg_t *const env)
1338 be_abi_call_t *call = env->call;
1339 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1340 const arch_register_t *sp = arch_env->sp;
1341 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1342 be_irg_t *birg = be_birg_from_irg(irg);
1343 struct obstack *obst = be_get_be_obst(irg);
1344 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1347 ir_node *new_mem_proj;
1354 const arch_register_t *fp_reg;
1355 ir_node *frame_pointer;
1359 ir_type *arg_type, *bet_type;
1360 lower_frame_sels_env_t ctx;
1362 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1364 old_mem = get_irg_initial_mem(irg);
1366 irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
1368 arg_type = compute_arg_type(irg, call, method_type);
1370 /* Convert the Sel nodes in the irg to frame addr nodes: */
1371 ctx.frame = get_irg_frame(irg);
1372 ctx.sp_class = arch_env->sp->reg_class;
1374 ir_type *const frame_tp = get_irg_frame_type(irg);
1375 /* layout the stackframe now */
1376 if (get_type_state(frame_tp) == layout_undefined) {
1377 default_layout_compound_type(frame_tp);
1380 /* align stackframe */
1381 unsigned const alignment = 1U << arch_env->stack_alignment;
1382 unsigned const frame_size = round_up2(get_type_size_bytes(frame_tp), alignment);
1383 set_type_size_bytes(frame_tp, frame_size);
1385 env->regs = pmap_create();
1387 n_params = get_method_n_params(method_type);
1388 args = OALLOCNZ(obst, ir_node*, n_params);
1390 be_add_parameter_entity_stores(irg);
1392 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1394 irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
1396 /* Fill the argument vector */
1397 arg_tuple = get_irg_args(irg);
1398 foreach_out_edge(arg_tuple, edge) {
1399 ir_node *irn = get_edge_src_irn(edge);
1400 if (! is_Anchor(irn)) {
1401 int nr = get_Proj_proj(irn);
1403 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1407 stack_layout->sp_relative = call->flags.try_omit_fp;
1408 bet_type = call->cb->get_between_type(irg);
1409 stack_frame_init(stack_layout, arg_type, bet_type,
1410 get_irg_frame_type(irg));
1412 /* Count the register params and add them to the number of Projs for the RegParams node */
1413 for (i = 0; i < n_params; ++i) {
1414 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1415 if (arg->in_reg && args[i]) {
1416 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1417 assert(i == get_Proj_proj(args[i]));
1419 /* For now, associate the register with the old Proj from Start representing that argument. */
1420 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1421 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1425 /* Collect all callee-save registers */
1426 for (i = 0, n = arch_env->n_register_classes; i < n; ++i) {
1427 const arch_register_class_t *cls = &arch_env->register_classes[i];
1428 for (j = 0; j < cls->n_regs; ++j) {
1429 const arch_register_t *reg = &cls->regs[j];
1430 if ((reg->type & arch_register_type_state) || arch_register_is_callee_save(arch_env, reg)) {
1431 pmap_insert(env->regs, (void *) reg, NULL);
1436 fp_reg = call->flags.try_omit_fp ? arch_env->sp : arch_env->bp;
1437 rbitset_clear(birg->allocatable_regs, fp_reg->global_index);
1439 /* handle start block here (place a jump in the block) */
1440 fix_start_block(irg);
1442 pmap_insert(env->regs, (void *) sp, NULL);
1443 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1444 start_bl = get_irg_start_block(irg);
1445 ir_node *const start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1446 set_irg_start(irg, start);
1449 * make proj nodes for the callee save registers.
1450 * memorize them, since Return nodes get those as inputs.
1452 * Note, that if a register corresponds to an argument, the regs map
1453 * contains the old Proj from start for that argument.
1455 arch_register_t const **const regs = ALLOCAN(arch_register_t const*, pmap_count(env->regs));
1456 reg_map_to_arr(regs, env->regs);
1457 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1458 const arch_register_t *reg = regs[i];
1459 ir_mode *mode = reg->reg_class->mode;
1461 arch_register_req_type_t add_type = arch_register_req_type_none;
1465 add_type |= arch_register_req_type_produces_sp;
1466 if (!rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
1467 add_type |= arch_register_req_type_ignore;
1471 proj = new_r_Proj(start, mode, nr + 1);
1472 pmap_insert(env->regs, (void *) reg, proj);
1473 be_set_constr_single_reg_out(start, nr + 1, reg, add_type);
1474 arch_set_irn_register(proj, reg);
1476 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1479 /* create a new initial memory proj */
1480 assert(is_Proj(old_mem));
1481 arch_set_irn_register_req_out(start, 0, arch_no_register_req);
1482 new_mem_proj = new_r_Proj(start, mode_M, 0);
1484 set_irg_initial_mem(irg, mem);
1486 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1488 /* set new frame_pointer */
1489 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1490 set_irg_frame(irg, frame_pointer);
1492 /* rewire old mem users to new mem */
1493 exchange(old_mem, mem);
1495 /* keep the mem (for functions with an endless loop = no return) */
1498 set_irg_initial_mem(irg, mem);
1500 /* Now, introduce stack param nodes for all parameters passed on the stack */
1501 for (i = 0; i < n_params; ++i) {
1502 ir_node *arg_proj = args[i];
1503 ir_node *repl = NULL;
1505 if (arg_proj != NULL) {
1506 be_abi_call_arg_t *arg;
1507 ir_type *param_type;
1508 int nr = get_Proj_proj(arg_proj);
1511 nr = MIN(nr, n_params);
1512 arg = get_call_arg(call, 0, nr, 1);
1513 param_type = get_method_param_type(method_type, nr);
1516 repl = pmap_get(ir_node, env->regs, arg->reg);
1518 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
1520 /* For atomic parameters which are actually used, we create a Load node. */
1521 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1522 ir_mode *mode = get_type_mode(param_type);
1523 ir_mode *load_mode = arg->load_mode;
1524 ir_node *nomem = get_irg_no_mem(irg);
1526 ir_node *load = new_r_Load(start_bl, nomem, addr, load_mode, cons_floats);
1527 repl = new_r_Proj(load, load_mode, pn_Load_res);
1529 if (mode != load_mode) {
1530 repl = new_r_Conv(start_bl, repl, mode);
1533 /* The stack parameter is not primitive (it is a struct or array),
1534 * we thus will create a node representing the parameter's address
1540 assert(repl != NULL);
1542 /* Beware: the mode of the register parameters is always the mode of the register class
1543 which may be wrong. Add Conv's then. */
1544 mode = get_irn_mode(args[i]);
1545 if (mode != get_irn_mode(repl)) {
1546 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
1548 exchange(args[i], repl);
1552 /* the arg proj is not needed anymore now and should be only used by the anchor */
1553 assert(get_irn_n_edges(arg_tuple) == 1);
1554 kill_node(arg_tuple);
1555 set_irg_args(irg, new_r_Bad(irg, mode_T));
1557 /* All Return nodes hang on the End node, so look for them there. */
1558 end = get_irg_end_block(irg);
1559 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1560 ir_node *irn = get_Block_cfgpred(end, i);
1562 if (is_Return(irn)) {
1563 ir_node *const ret = create_be_return(env, irn);
1568 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
1569 the code is dead and will never be executed. */
1572 /** Fix the state inputs of calls that still hang on unknowns */
1573 static void fix_call_state_inputs(ir_graph *const irg, be_abi_irg_t *const env)
1575 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1577 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
1579 /* Collect caller save registers */
1580 n = arch_env->n_register_classes;
1581 for (i = 0; i < n; ++i) {
1583 const arch_register_class_t *cls = &arch_env->register_classes[i];
1584 for (j = 0; j < cls->n_regs; ++j) {
1585 const arch_register_t *reg = arch_register_for_index(cls, j);
1586 if (reg->type & arch_register_type_state) {
1587 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
1592 n = ARR_LEN(env->calls);
1593 n_states = ARR_LEN(stateregs);
1594 for (i = 0; i < n; ++i) {
1596 ir_node *call = env->calls[i];
1598 arity = get_irn_arity(call);
1600 /* the state reg inputs are the last n inputs of the calls */
1601 for (s = 0; s < n_states; ++s) {
1602 int inp = arity - n_states + s;
1603 const arch_register_t *reg = stateregs[s];
1604 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
1606 set_irn_n(call, inp, regnode);
1610 DEL_ARR_F(stateregs);
1614 * Create a trampoline entity for the given method.
1616 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
1618 ir_type *type = get_entity_type(method);
1619 ident *old_id = get_entity_ld_ident(method);
1620 ident *id = id_mangle3("", old_id, "$stub");
1621 ir_type *parent = be->pic_trampolines_type;
1622 ir_entity *ent = new_entity(parent, old_id, type);
1623 set_entity_ld_ident(ent, id);
1624 set_entity_visibility(ent, ir_visibility_private);
1630 * Returns the trampoline entity for the given method.
1632 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
1634 ir_entity *result = pmap_get(ir_entity, env->ent_trampoline_map, method);
1635 if (result == NULL) {
1636 result = create_trampoline(env, method);
1637 pmap_insert(env->ent_trampoline_map, method, result);
1643 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
1645 ident *old_id = get_entity_ld_ident(entity);
1646 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
1647 ir_type *e_type = get_entity_type(entity);
1648 ir_type *type = new_type_pointer(e_type);
1649 ir_type *parent = be->pic_symbols_type;
1650 ir_entity *ent = new_entity(parent, old_id, type);
1651 set_entity_ld_ident(ent, id);
1652 set_entity_visibility(ent, ir_visibility_private);
1657 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
1659 ir_entity *result = pmap_get(ir_entity, env->ent_pic_symbol_map, entity);
1660 if (result == NULL) {
1661 result = create_pic_symbol(env, entity);
1662 pmap_insert(env->ent_pic_symbol_map, entity, result);
1671 * Returns non-zero if a given entity can be accessed using a relative address.
1673 static int can_address_relative(ir_entity *entity)
1675 return entity_has_definition(entity) && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
1678 static ir_node *get_pic_base(ir_graph *irg)
1680 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1681 if (arch_env->impl->get_pic_base == NULL)
1683 return arch_env->impl->get_pic_base(irg);
1686 /** patches SymConsts to work in position independent code */
1687 static void fix_pic_symconsts(ir_node *node, void *data)
1689 ir_graph *irg = get_irn_irg(node);
1690 be_main_env_t *be = be_get_irg_main_env(irg);
1700 arity = get_irn_arity(node);
1701 for (i = 0; i < arity; ++i) {
1703 ir_node *pred = get_irn_n(node, i);
1705 ir_entity *pic_symbol;
1706 ir_node *pic_symconst;
1708 if (!is_SymConst(pred))
1711 entity = get_SymConst_entity(pred);
1712 block = get_nodes_block(pred);
1714 /* calls can jump to relative addresses, so we can directly jump to
1715 the (relatively) known call address or the trampoline */
1716 if (i == 1 && is_Call(node)) {
1717 ir_entity *trampoline;
1718 ir_node *trampoline_const;
1720 if (can_address_relative(entity))
1723 dbgi = get_irn_dbg_info(pred);
1724 trampoline = get_trampoline(be, entity);
1725 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
1727 set_irn_n(node, i, trampoline_const);
1731 /* everything else is accessed relative to EIP */
1732 mode = get_irn_mode(pred);
1733 pic_base = get_pic_base(irg);
1735 /* all ok now for locally constructed stuff */
1736 if (can_address_relative(entity)) {
1737 ir_node *add = new_r_Add(block, pic_base, pred, mode);
1739 /* make sure the walker doesn't visit this add again */
1740 mark_irn_visited(add);
1741 set_irn_n(node, i, add);
1745 /* get entry from pic symbol segment */
1746 dbgi = get_irn_dbg_info(pred);
1747 pic_symbol = get_pic_symbol(be, entity);
1748 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
1750 add = new_r_Add(block, pic_base, pic_symconst, mode);
1751 mark_irn_visited(add);
1753 /* we need an extra indirection for global data outside our current
1754 module. The loads are always safe and can therefore float
1755 and need no memory input */
1756 load = new_r_Load(block, get_irg_no_mem(irg), add, mode, cons_floats);
1757 load_res = new_r_Proj(load, mode, pn_Load_res);
1759 set_irn_n(node, i, load_res);
1763 void be_abi_introduce(ir_graph *irg)
1765 ir_node *old_frame = get_irg_frame(irg);
1766 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1767 ir_entity *entity = get_irg_entity(irg);
1768 ir_type *method_type = get_entity_type(entity);
1769 be_irg_t *birg = be_birg_from_irg(irg);
1770 struct obstack *obst = &birg->obst;
1771 ir_node *dummy = new_r_Dummy(irg,
1772 arch_env->sp->reg_class->mode);
1775 /* determine allocatable registers */
1776 assert(birg->allocatable_regs == NULL);
1777 birg->allocatable_regs = rbitset_obstack_alloc(obst, arch_env->n_registers);
1778 for (r = 0; r < arch_env->n_registers; ++r) {
1779 const arch_register_t *reg = &arch_env->registers[r];
1780 if ( !(reg->type & arch_register_type_ignore)) {
1781 rbitset_set(birg->allocatable_regs, r);
1785 /* Break here if backend provides a custom API. */
1788 env.keep_map = pmap_create();
1789 env.call = be_abi_call_new();
1790 arch_env_get_call_abi(arch_env, method_type, env.call);
1792 env.init_sp = dummy;
1793 env.calls = NEW_ARR_F(ir_node*, 0);
1797 if (be_options.pic) {
1798 irg_walk_graph(irg, fix_pic_symconsts, NULL, NULL);
1801 /* Lower all call nodes in the IRG. */
1802 process_calls(irg, &env);
1804 /* Process the IRG */
1805 modify_irg(irg, &env);
1807 /* fix call inputs for state registers */
1808 fix_call_state_inputs(irg, &env);
1810 be_abi_call_free(env.call);
1812 /* We don't need the keep map anymore. */
1813 pmap_destroy(env.keep_map);
1815 /* calls array is not needed anymore */
1816 DEL_ARR_F(env.calls);
1818 /* reroute the stack origin of the calls to the true stack origin. */
1819 exchange(dummy, env.init_sp);
1820 exchange(old_frame, get_irg_frame(irg));
1822 pmap_destroy(env.regs);
1825 void be_put_allocatable_regs(const ir_graph *irg,
1826 const arch_register_class_t *cls, bitset_t *bs)
1828 be_irg_t *birg = be_birg_from_irg(irg);
1829 unsigned *allocatable_regs = birg->allocatable_regs;
1832 assert(bitset_size(bs) == cls->n_regs);
1833 bitset_clear_all(bs);
1834 for (i = 0; i < cls->n_regs; ++i) {
1835 const arch_register_t *reg = &cls->regs[i];
1836 if (rbitset_is_set(allocatable_regs, reg->global_index))
1841 unsigned be_get_n_allocatable_regs(const ir_graph *irg,
1842 const arch_register_class_t *cls)
1844 bitset_t *bs = bitset_alloca(cls->n_regs);
1845 be_put_allocatable_regs(irg, cls, bs);
1846 return bitset_popcount(bs);
1849 void be_set_allocatable_regs(const ir_graph *irg,
1850 const arch_register_class_t *cls,
1851 unsigned *raw_bitset)
1853 be_irg_t *birg = be_birg_from_irg(irg);
1854 unsigned *allocatable_regs = birg->allocatable_regs;
1857 rbitset_clear_all(raw_bitset, cls->n_regs);
1858 for (i = 0; i < cls->n_regs; ++i) {
1859 const arch_register_t *reg = &cls->regs[i];
1860 if (rbitset_is_set(allocatable_regs, reg->global_index))
1861 rbitset_set(raw_bitset, i);
1865 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi)
1866 void be_init_abi(void)
1868 FIRM_DBG_REGISTER(dbg, "firm.be.abi");