2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend ABI implementation.
23 * @author Sebastian Hack, Michael Beck
31 #include "irgraph_t.h"
34 #include "iredges_t.h"
44 #include "raw_bitset.h"
50 #include "beabihelper.h"
56 #include "bessaconstr.h"
58 #include "betranshlp.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
62 typedef struct be_abi_call_arg_t {
63 unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
64 unsigned in_reg : 1; /**< 1: this argument is transmitted 1: in registers, 0: on stack. */
65 unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
68 const arch_register_t *reg;
71 unsigned alignment; /**< stack alignment */
72 unsigned space_before; /**< allocate space before */
73 unsigned space_after; /**< allocate space after */
76 struct be_abi_call_t {
77 be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
78 int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
79 const be_abi_callbacks_t *cb;
84 * The ABI information for the current graph.
87 be_abi_call_t *call; /**< The ABI call information. */
89 ir_node *init_sp; /**< The node representing the stack pointer
90 at the start of the function. */
92 pmap *regs; /**< A map of all callee-save and ignore regs to
93 their Projs to the RegParams node. */
94 pmap *keep_map; /**< mapping blocks to keep nodes. */
96 ir_node **calls; /**< flexible array containing all be_Call nodes */
99 static ir_heights_t *ir_heights;
101 static ir_node *be_abi_reg_map_get(pmap *map, const arch_register_t *reg)
103 return pmap_get(ir_node, map, reg);
106 static void be_abi_reg_map_set(pmap *map, const arch_register_t* reg,
109 pmap_insert(map, reg, node);
113 * Check if the given register is callee save, i.e. will be saved by the callee.
115 static bool arch_register_is_callee_save(
116 const arch_env_t *arch_env,
117 const arch_register_t *reg)
119 if (arch_env->impl->register_saved_by)
120 return arch_env->impl->register_saved_by(reg, /*callee=*/1);
125 * Check if the given register is caller save, i.e. must be saved by the caller.
127 static bool arch_register_is_caller_save(
128 const arch_env_t *arch_env,
129 const arch_register_t *reg)
131 if (arch_env->impl->register_saved_by)
132 return arch_env->impl->register_saved_by(reg, /*callee=*/0);
139 _ ____ ___ ____ _ _ _ _
140 / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
141 / _ \ | _ \| | | | / _` | | | '_ \ / _` |/ __| |/ / __|
142 / ___ \| |_) | | | |__| (_| | | | |_) | (_| | (__| <\__ \
143 /_/ \_\____/___| \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
145 These callbacks are used by the backend to set the parameters
146 for a specific call type.
150 * Set compare function: compares two ABI call object arguments.
152 static int cmp_call_arg(const void *a, const void *b, size_t n)
154 const be_abi_call_arg_t *p = (const be_abi_call_arg_t*)a;
155 const be_abi_call_arg_t *q = (const be_abi_call_arg_t*)b;
157 return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
161 * Get an ABI call object argument.
163 * @param call the abi call
164 * @param is_res true for call results, false for call arguments
165 * @param pos position of the argument
166 * @param callee context type - if we are callee or caller
168 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
170 be_abi_call_arg_t arg;
173 memset(&arg, 0, sizeof(arg));
178 hash = is_res * 128 + pos;
180 return set_find(be_abi_call_arg_t, call->params, &arg, sizeof(arg), hash);
184 * Set an ABI call object argument.
186 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
188 unsigned hash = arg->is_res * 128 + arg->pos;
189 if (context & ABI_CONTEXT_CALLEE) {
191 (void)set_insert(be_abi_call_arg_t, call->params, arg, sizeof(*arg), hash);
193 if (context & ABI_CONTEXT_CALLER) {
195 (void)set_insert(be_abi_call_arg_t, call->params, arg, sizeof(*arg), hash);
199 /* Set the flags for a call. */
200 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
206 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
207 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
213 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
214 ir_mode *load_mode, unsigned alignment,
215 unsigned space_before, unsigned space_after,
216 be_abi_context_t context)
218 be_abi_call_arg_t arg;
219 memset(&arg, 0, sizeof(arg));
220 assert(alignment > 0 && "Alignment must be greater than 0");
221 arg.load_mode = load_mode;
222 arg.alignment = alignment;
223 arg.space_before = space_before;
224 arg.space_after = space_after;
228 remember_call_arg(&arg, call, context);
231 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
233 be_abi_call_arg_t arg;
234 memset(&arg, 0, sizeof(arg));
241 remember_call_arg(&arg, call, context);
244 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
246 be_abi_call_arg_t arg;
247 memset(&arg, 0, sizeof(arg));
254 remember_call_arg(&arg, call, context);
257 /* Get the flags of a ABI call object. */
258 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
264 * Constructor for a new ABI call object.
266 * @return the new ABI call object
268 static be_abi_call_t *be_abi_call_new(void)
270 be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
272 call->params = new_set(cmp_call_arg, 16);
274 call->flags.try_omit_fp = be_options.omit_fp;
280 * Destructor for an ABI call object.
282 static void be_abi_call_free(be_abi_call_t *call)
284 del_set(call->params);
289 * Initializes the frame layout from parts
291 * @param frame the stack layout that will be initialized
292 * @param args the stack argument layout type
293 * @param between the between layout type
294 * @param locals the method frame type
296 * @return the initialized stack layout
298 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
299 ir_type *between, ir_type *locals)
301 frame->arg_type = args;
302 frame->between_type = between;
303 frame->frame_type = locals;
304 frame->initial_offset = 0;
305 frame->initial_bias = 0;
306 frame->order[1] = between;
308 /* typical decreasing stack: locals have the
309 * lowest addresses, arguments the highest */
310 frame->order[0] = locals;
311 frame->order[2] = args;
322 Adjustment of the calls inside a graph.
327 * Transform a call node into a be_Call node.
329 * @param env The ABI environment for the current irg.
330 * @param irn The call node.
331 * @param curr_sp The stack pointer node to use.
332 * @return The stack pointer after the call.
334 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
336 ir_graph *irg = get_irn_irg(irn);
337 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
338 ir_type *call_tp = get_Call_type(irn);
339 ir_node *call_ptr = get_Call_ptr(irn);
340 size_t n_params = get_method_n_params(call_tp);
341 ir_node *curr_mem = get_Call_mem(irn);
342 ir_node *bl = get_nodes_block(irn);
344 const arch_register_t *sp = arch_env->sp;
345 be_abi_call_t *call = be_abi_call_new();
346 ir_mode *mach_mode = sp->reg_class->mode;
347 int n_res = get_method_n_ress(call_tp);
349 ir_node *res_proj = NULL;
350 int n_reg_params = 0;
351 int n_stack_params = 0;
354 const arch_register_t **states = NEW_ARR_F(const arch_register_t*, 0);
355 const arch_register_t **destroyed_regs = NEW_ARR_F(const arch_register_t*, 0);
359 int n_reg_results = 0;
361 int *stack_param_idx;
363 int throws_exception;
368 /* Let the isa fill out the abi description for that call node. */
369 arch_env_get_call_abi(arch_env, call_tp, call);
371 /* Insert code to put the stack arguments on the stack. */
372 assert((size_t)get_Call_n_params(irn) == n_params);
373 stack_param_idx = ALLOCAN(int, n_params);
374 for (p = 0; p < n_params; ++p) {
375 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
378 int arg_size = get_type_size_bytes(get_method_param_type(call_tp, p));
380 stack_size += round_up2(arg->space_before, arg->alignment);
381 stack_size += round_up2(arg_size, arg->alignment);
382 stack_size += round_up2(arg->space_after, arg->alignment);
384 stack_param_idx[n_stack_params++] = p;
388 /* Collect all arguments which are passed in registers. */
389 reg_param_idxs = ALLOCAN(int, n_params);
390 for (p = 0; p < n_params; ++p) {
391 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
392 if (arg && arg->in_reg) {
393 reg_param_idxs[n_reg_params++] = p;
398 * If the stack is decreasing and we do not want to store sequentially,
399 * or someone else allocated the call frame
400 * we allocate as much space on the stack all parameters need, by
401 * moving the stack pointer along the stack's direction.
403 * Note: we also have to do this for stack_size == 0, because we may have
404 * to adjust stack alignment for the call.
406 curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
408 dbgi = get_irn_dbg_info(irn);
409 /* If there are some parameters which shall be passed on the stack. */
410 if (n_stack_params > 0) {
412 ir_node **in = ALLOCAN(ir_node*, n_stack_params+1);
415 curr_mem = get_Call_mem(irn);
416 in[n_in++] = curr_mem;
418 for (i = 0; i < n_stack_params; ++i) {
419 int p = stack_param_idx[i];
420 be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
421 ir_node *param = get_Call_param(irn, p);
422 ir_node *addr = curr_sp;
424 ir_type *param_type = get_method_param_type(call_tp, p);
425 int param_size = get_type_size_bytes(param_type) + arg->space_after;
428 * If we wanted to build the arguments sequentially,
429 * the stack pointer for the next must be incremented,
430 * and the memory value propagated.
432 curr_ofs += arg->space_before;
433 curr_ofs = round_up2(curr_ofs, arg->alignment);
435 /* Make the expression to compute the argument's offset. */
437 ir_mode *constmode = mach_mode;
438 if (mode_is_reference(mach_mode)) {
441 addr = new_r_Const_long(irg, constmode, curr_ofs);
442 addr = new_r_Add(bl, curr_sp, addr, mach_mode);
445 /* Insert a store for primitive arguments. */
446 if (is_atomic_type(param_type)) {
447 ir_node *nomem = get_irg_no_mem(irg);
448 ir_node *mem_input = nomem;
449 ir_node *store = new_rd_Store(dbgi, bl, mem_input, addr, param, cons_none);
450 mem = new_r_Proj(store, mode_M, pn_Store_M);
452 /* Make a mem copy for compound arguments. */
455 assert(mode_is_reference(get_irn_mode(param)));
456 copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
457 mem = new_r_Proj(copy, mode_M, pn_CopyB_M);
460 curr_ofs += param_size;
465 /* We need the sync only, if we didn't build the stores sequentially. */
466 if (n_stack_params >= 1) {
467 curr_mem = new_r_Sync(bl, n_in, in);
469 curr_mem = get_Call_mem(irn);
473 /* Put caller save into the destroyed set and state registers in the states
475 for (i = 0, n = arch_env->n_register_classes; i < n; ++i) {
477 const arch_register_class_t *cls = &arch_env->register_classes[i];
478 for (j = 0; j < cls->n_regs; ++j) {
479 const arch_register_t *reg = arch_register_for_index(cls, j);
481 /* even if destroyed all is specified, neither SP nor FP are
482 * destroyed (else bad things will happen) */
483 if (reg == arch_env->sp || reg == arch_env->bp)
486 if (reg->type & arch_register_type_state) {
487 ARR_APP1(const arch_register_t*, destroyed_regs, reg);
488 ARR_APP1(const arch_register_t*, states, reg);
489 /* we're already in the destroyed set so no need for further
493 if (arch_register_is_caller_save(arch_env, reg))
494 ARR_APP1(const arch_register_t*, destroyed_regs, reg);
498 /* search the largest result proj number */
499 res_projs = ALLOCANZ(ir_node*, n_res);
501 foreach_out_edge(irn, edge) {
502 ir_node *irn = get_edge_src_irn(edge);
504 if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
507 foreach_out_edge(irn, res_edge) {
508 ir_node *const res = get_edge_src_irn(res_edge);
509 long const proj = get_Proj_proj(res);
510 assert(proj < n_res);
511 assert(res_projs[proj] == NULL);
512 res_projs[proj] = res;
518 /** TODO: this is not correct for cases where return values are passed
519 * on the stack, but no known ABI does this currently...
521 n_reg_results = n_res;
524 in = ALLOCAN(ir_node*, n_reg_params + ARR_LEN(states));
526 /* make the back end call node and set its register requirements. */
527 for (i = 0; i < n_reg_params; ++i) {
528 in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
531 /* add state registers ins */
532 for (s = 0; s < ARR_LEN(states); ++s) {
533 const arch_register_t *reg = states[s];
534 const arch_register_class_t *cls = reg->reg_class;
535 ir_node *regnode = new_r_Unknown(irg, cls->mode);
536 in[n_ins++] = regnode;
538 assert(n_ins == (int) (n_reg_params + ARR_LEN(states)));
540 /* ins collected, build the call */
541 throws_exception = ir_throws_exception(irn);
542 if (env->call->flags.call_has_imm && is_SymConst(call_ptr)) {
544 low_call = be_new_Call(dbgi, bl, curr_mem, sp->single_req, curr_sp,
545 sp->single_req, curr_sp,
546 n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
547 n_ins, in, get_Call_type(irn));
548 be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
551 low_call = be_new_Call(dbgi, bl, curr_mem, sp->single_req, curr_sp,
552 sp->reg_class->class_req, call_ptr,
553 n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
554 n_ins, in, get_Call_type(irn));
556 ir_set_throws_exception(low_call, throws_exception);
557 be_Call_set_pop(low_call, call->pop);
559 /* put the call into the list of all calls for later processing */
560 ARR_APP1(ir_node *, env->calls, low_call);
562 /* create new stack pointer */
563 curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
564 be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
565 arch_register_req_type_ignore | arch_register_req_type_produces_sp);
566 arch_set_irn_register(curr_sp, sp);
568 /* now handle results */
569 for (i = 0; i < n_res; ++i) {
570 ir_node *proj = res_projs[i];
571 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
573 /* returns values on stack not supported yet */
577 shift the proj number to the right, since we will drop the
578 unspeakable Proj_T from the Call. Therefore, all real argument
579 Proj numbers must be increased by pn_be_Call_first_res
581 long pn = i + pn_be_Call_first_res;
584 ir_type *res_type = get_method_res_type(call_tp, i);
585 ir_mode *mode = get_type_mode(res_type);
586 proj = new_r_Proj(low_call, mode, pn);
589 set_Proj_pred(proj, low_call);
590 set_Proj_proj(proj, pn);
594 /* remove register from destroyed regs */
596 size_t n = ARR_LEN(destroyed_regs);
597 for (j = 0; j < n; ++j) {
598 if (destroyed_regs[j] == arg->reg) {
599 destroyed_regs[j] = destroyed_regs[n-1];
600 ARR_SHRINKLEN(destroyed_regs,n-1);
607 DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
609 /* Set the register classes and constraints of the Call parameters. */
610 for (i = 0; i < n_reg_params; ++i) {
611 int index = reg_param_idxs[i];
612 be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
613 assert(arg->reg != NULL);
615 be_set_constr_single_reg_in(low_call, n_be_Call_first_arg + i,
616 arg->reg, arch_register_req_type_none);
619 /* Set the register constraints of the results. */
620 for (i = 0; i < n_res; ++i) {
621 ir_node *proj = res_projs[i];
622 const be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
623 int pn = get_Proj_proj(proj);
626 be_set_constr_single_reg_out(low_call, pn, arg->reg,
627 arch_register_req_type_none);
628 arch_set_irn_register(proj, arg->reg);
630 exchange(irn, low_call);
632 /* kill the ProjT node */
633 if (res_proj != NULL) {
637 /* Make additional projs for the caller save registers
638 and the Keep node which keeps them alive. */
644 int curr_res_proj = pn_be_Call_first_res + n_reg_results;
647 n_ins = ARR_LEN(destroyed_regs) + n_reg_results + 1;
648 in = ALLOCAN(ir_node *, n_ins);
650 /* also keep the stack pointer */
651 set_irn_link(curr_sp, (void*) sp);
654 for (d = 0; d < ARR_LEN(destroyed_regs); ++d) {
655 const arch_register_t *reg = destroyed_regs[d];
656 ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
658 /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
659 be_set_constr_single_reg_out(low_call, curr_res_proj, reg,
660 arch_register_req_type_none);
661 arch_set_irn_register(proj, reg);
663 set_irn_link(proj, (void*) reg);
668 for (i = 0; i < n_reg_results; ++i) {
669 ir_node *proj = res_projs[i];
670 const arch_register_t *reg = arch_get_irn_register(proj);
671 set_irn_link(proj, (void*) reg);
676 /* create the Keep for the caller save registers */
677 keep = be_new_Keep(bl, n, in);
678 for (i = 0; i < n; ++i) {
679 const arch_register_t *reg = (const arch_register_t*)get_irn_link(in[i]);
680 be_node_set_reg_class_in(keep, i, reg->reg_class);
684 /* Clean up the stack. */
685 assert(stack_size >= call->pop);
686 stack_size -= call->pop;
688 if (stack_size > 0) {
689 ir_node *mem_proj = NULL;
691 foreach_out_edge(low_call, edge) {
692 ir_node *irn = get_edge_src_irn(edge);
693 if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
700 mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M);
701 keep_alive(mem_proj);
704 /* Clean up the stack frame or revert alignment fixes if we allocated it */
705 curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
707 be_abi_call_free(call);
710 DEL_ARR_F(destroyed_regs);
716 * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
718 * @param alignment the minimum stack alignment
719 * @param size the node containing the non-aligned size
720 * @param block the block where new nodes are allocated on
721 * @param dbg debug info for new nodes
723 * @return a node representing the aligned size
725 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
726 ir_node *block, dbg_info *dbg)
728 if (stack_alignment > 1) {
734 assert(is_po2(stack_alignment));
736 mode = get_irn_mode(size);
737 tv = new_tarval_from_long(stack_alignment-1, mode);
738 irg = get_Block_irg(block);
739 mask = new_r_Const(irg, tv);
740 size = new_rd_Add(dbg, block, size, mask, mode);
742 tv = new_tarval_from_long(-(long)stack_alignment, mode);
743 mask = new_r_Const(irg, tv);
744 size = new_rd_And(dbg, block, size, mask, mode);
750 * The alloca is transformed into a back end alloca node and connected to the stack nodes.
752 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
754 ir_node *block = get_nodes_block(alloc);
755 ir_graph *irg = get_Block_irg(block);
756 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
757 ir_node *alloc_mem = NULL;
758 ir_node *alloc_res = NULL;
759 ir_type *type = get_Alloc_type(alloc);
766 unsigned stack_alignment;
768 /* all non-stack Alloc nodes should already be lowered before the backend */
769 assert(get_Alloc_where(alloc) == stack_alloc);
771 foreach_out_edge(alloc, edge) {
772 ir_node *irn = get_edge_src_irn(edge);
774 switch (get_Proj_proj(irn)) {
786 /* Beware: currently Alloc nodes without a result might happen,
787 only escape analysis kills them and this phase runs only for object
788 oriented source. We kill the Alloc here. */
789 if (alloc_res == NULL && alloc_mem) {
790 exchange(alloc_mem, get_Alloc_mem(alloc));
794 dbg = get_irn_dbg_info(alloc);
795 count = get_Alloc_count(alloc);
797 /* we might need to multiply the count with the element size */
798 if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) {
799 ir_mode *mode = get_irn_mode(count);
800 ir_tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
802 ir_node *cnst = new_rd_Const(dbg, irg, tv);
803 size = new_rd_Mul(dbg, block, count, cnst, mode);
808 /* The stack pointer will be modified in an unknown manner.
809 We cannot omit it. */
810 env->call->flags.try_omit_fp = 0;
812 stack_alignment = 1 << arch_env->stack_alignment;
813 size = adjust_alloc_size(stack_alignment, size, block, dbg);
814 new_alloc = be_new_AddSP(arch_env->sp, block, curr_sp, size);
815 set_irn_dbg_info(new_alloc, dbg);
817 if (alloc_mem != NULL) {
821 addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
823 /* We need to sync the output mem of the AddSP with the input mem
824 edge into the alloc node. */
825 ins[0] = get_Alloc_mem(alloc);
827 sync = new_r_Sync(block, 2, ins);
829 exchange(alloc_mem, sync);
832 exchange(alloc, new_alloc);
834 /* fix projnum of alloca res */
835 set_Proj_proj(alloc_res, pn_be_AddSP_res);
837 curr_sp = new_r_Proj(new_alloc, get_irn_mode(curr_sp), pn_be_AddSP_sp);
844 * The Free is transformed into a back end free node and connected to the stack nodes.
846 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
848 ir_node *block = get_nodes_block(free);
849 ir_graph *irg = get_irn_irg(free);
850 ir_type *type = get_Free_type(free);
851 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
852 ir_mode *sp_mode = arch_env->sp->reg_class->mode;
853 dbg_info *dbg = get_irn_dbg_info(free);
854 ir_node *subsp, *mem, *res, *size, *sync;
856 unsigned stack_alignment;
858 /* all non-stack-alloc Free nodes should already be lowered before the
860 assert(get_Free_where(free) == stack_alloc);
862 /* we might need to multiply the size with the element size */
863 if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) {
864 ir_tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
865 ir_node *cnst = new_rd_Const(dbg, irg, tv);
866 ir_node *mul = new_rd_Mul(dbg, block, get_Free_count(free),
870 size = get_Free_count(free);
873 stack_alignment = 1 << arch_env->stack_alignment;
874 size = adjust_alloc_size(stack_alignment, size, block, dbg);
876 /* The stack pointer will be modified in an unknown manner.
877 We cannot omit it. */
878 env->call->flags.try_omit_fp = 0;
879 subsp = be_new_SubSP(arch_env->sp, block, curr_sp, size);
880 set_irn_dbg_info(subsp, dbg);
882 mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
883 res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
885 /* we need to sync the memory */
886 in[0] = get_Free_mem(free);
888 sync = new_r_Sync(block, 2, in);
890 /* and make the AddSP dependent on the former memory */
891 add_irn_dep(subsp, get_Free_mem(free));
894 exchange(free, sync);
901 * Check if a node is somehow data dependent on another one.
902 * both nodes must be in the same basic block.
903 * @param n1 The first node.
904 * @param n2 The second node.
905 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
907 static int dependent_on(ir_node *n1, ir_node *n2)
909 assert(get_nodes_block(n1) == get_nodes_block(n2));
911 return heights_reachable_in_block(ir_heights, n1, n2);
914 static int cmp_call_dependency(const void *c1, const void *c2)
916 ir_node *n1 = *(ir_node **) c1;
917 ir_node *n2 = *(ir_node **) c2;
921 Classical qsort() comparison function behavior:
922 0 if both elements are equal
923 1 if second is "smaller" that first
924 -1 if first is "smaller" that second
926 if (dependent_on(n1, n2))
929 if (dependent_on(n2, n1))
932 /* The nodes have no depth order, but we need a total order because qsort()
935 * Additionally, we need to respect transitive dependencies. Consider a
936 * Call a depending on Call b and an independent Call c.
937 * We MUST NOT order c > a and b > c. */
938 h1 = get_irn_height(ir_heights, n1);
939 h2 = get_irn_height(ir_heights, n2);
940 if (h1 < h2) return -1;
941 if (h1 > h2) return 1;
942 /* Same height, so use a random (but stable) order */
943 return get_irn_idx(n1) - get_irn_idx(n2);
947 * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
949 static void link_ops_in_block_walker(ir_node *irn, void *data)
951 be_abi_irg_t *env = (be_abi_irg_t*)data;
952 unsigned code = get_irn_opcode(irn);
954 if (code == iro_Call ||
955 (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
956 (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
957 ir_node *bl = get_nodes_block(irn);
958 void *save = get_irn_link(bl);
960 set_irn_link(irn, save);
961 set_irn_link(bl, irn);
964 if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
965 ir_node *param = get_Builtin_param(irn, 0);
966 ir_tarval *tv = get_Const_tarval(param);
967 unsigned long value = get_tarval_long(tv);
968 /* use ebp, so the climbframe algo works... */
970 env->call->flags.try_omit_fp = 0;
977 * Process all Call/Alloc/Free nodes inside a basic block.
978 * Note that the link field of the block must contain a linked list of all
979 * nodes inside the Block. We first order this list according to data dependency
980 * and that connect the nodes together.
982 static void process_ops_in_block(ir_node *bl, void *data)
984 be_abi_irg_t *env = (be_abi_irg_t*)data;
985 ir_node *curr_sp = env->init_sp;
992 for (irn = (ir_node*)get_irn_link(bl); irn != NULL;
993 irn = (ir_node*)get_irn_link(irn)) {
997 nodes = ALLOCAN(ir_node*, n_nodes);
998 for (irn = (ir_node*)get_irn_link(bl), n = 0; irn != NULL;
999 irn = (ir_node*)get_irn_link(irn), ++n) {
1003 /* If there were call nodes in the block. */
1008 /* order the call nodes according to data dependency */
1009 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1011 for (i = n_nodes - 1; i >= 0; --i) {
1012 ir_node *irn = nodes[i];
1014 DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1015 switch (get_irn_opcode(irn)) {
1017 curr_sp = adjust_call(env, irn, curr_sp);
1020 if (get_Alloc_where(irn) == stack_alloc)
1021 curr_sp = adjust_alloc(env, irn, curr_sp);
1024 if (get_Free_where(irn) == stack_alloc)
1025 curr_sp = adjust_free(env, irn, curr_sp);
1028 panic("invalid call");
1032 /* Keep the last stack state in the block by tying it to Keep node,
1033 * the proj from calls is already kept */
1034 if (curr_sp != env->init_sp &&
1035 !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1037 keep = be_new_Keep(bl, 1, nodes);
1038 pmap_insert(env->keep_map, bl, keep);
1042 set_irn_link(bl, curr_sp);
1046 * Adjust all call nodes in the graph to the ABI conventions.
1048 static void process_calls(ir_graph *const irg, be_abi_irg_t *const abi)
1050 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi);
1052 ir_heights = heights_new(irg);
1053 irg_block_walk_graph(irg, NULL, process_ops_in_block, abi);
1054 heights_free(ir_heights);
1058 * Computes the stack argument layout type.
1059 * Changes a possibly allocated value param type by moving
1060 * entities to the stack layout type.
1062 * @param call the current call ABI
1063 * @param method_type the method type
1065 * @return the stack argument layout type
1067 static ir_type *compute_arg_type(ir_graph *irg, be_abi_call_t *call,
1068 ir_type *method_type)
1070 struct obstack *obst = be_get_be_obst(irg);
1071 ir_type *frame_type = get_irg_frame_type(irg);
1072 size_t n_params = get_method_n_params(method_type);
1073 size_t n_frame_members = get_compound_n_members(frame_type);
1074 ir_entity *va_start_entity = NULL;
1080 ir_entity **map = OALLOCNZ(obst, ir_entity*, n_params);
1081 res = new_type_struct(new_id_from_chars("arg_type", 8));
1083 /* collect existing entities for value_param_types */
1084 for (f = n_frame_members; f > 0; ) {
1085 ir_entity *entity = get_compound_member(frame_type, --f);
1088 set_entity_link(entity, NULL);
1089 if (!is_parameter_entity(entity))
1091 num = get_entity_parameter_number(entity);
1092 if (num == IR_VA_START_PARAMETER_NUMBER) {
1093 /* move entity to new arg_type */
1094 set_entity_owner(entity, res);
1095 va_start_entity = entity;
1098 assert(num < n_params);
1099 if (map[num] != NULL)
1100 panic("multiple entities for parameter %u in %+F found", f, irg);
1102 if (num != n_params && get_call_arg(call, 0, num, 1)->in_reg) {
1103 /* don't move this entity */
1108 /* move entity to new arg_type */
1109 set_entity_owner(entity, res);
1112 for (i = 0; i < n_params; ++i) {
1113 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1114 ir_type *param_type = get_method_param_type(method_type, i);
1120 if (entity == NULL) {
1121 /* create a new entity */
1122 entity = new_parameter_entity(res, i, param_type);
1124 ofs += arg->space_before;
1125 ofs = round_up2(ofs, arg->alignment);
1126 set_entity_offset(entity, ofs);
1127 ofs += arg->space_after;
1128 ofs += get_type_size_bytes(param_type);
1129 arg->stack_ent = entity;
1131 if (va_start_entity != NULL) {
1132 set_entity_offset(va_start_entity, ofs);
1134 set_type_size_bytes(res, ofs);
1135 set_type_state(res, layout_fixed);
1140 static int cmp_regs(const void *a, const void *b)
1142 arch_register_t const *const p = *(arch_register_t const**)a;
1143 arch_register_t const *const q = *(arch_register_t const**)b;
1145 if (p->reg_class == q->reg_class)
1146 return p->index - q->index;
1148 return p->reg_class < q->reg_class ? -1 : +1;
1151 static void reg_map_to_arr(arch_register_t const **const res, pmap *const reg_map)
1154 size_t n = pmap_count(reg_map);
1157 foreach_pmap(reg_map, ent) {
1158 res[i++] = (arch_register_t const*)ent->key;
1161 qsort(res, n, sizeof(res[0]), cmp_regs);
1165 * Creates a be_Return for a Return node.
1167 * @param @env the abi environment
1168 * @param irn the Return node
1170 static ir_node *create_be_return(be_abi_irg_t *const env, ir_node *const irn)
1172 ir_node *const bl = get_nodes_block(irn);
1173 be_abi_call_t *call = env->call;
1174 ir_graph *irg = get_Block_irg(bl);
1175 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1176 pmap *reg_map = pmap_create();
1177 ir_node *keep = pmap_get(ir_node, env->keep_map, bl);
1182 const arch_register_t **regs;
1186 get the valid stack node in this block.
1187 If we had a call in that block there is a Keep constructed by process_calls()
1188 which points to the last stack modification in that block. we'll use
1189 it then. Else we use the stack from the start block and let
1190 the ssa construction fix the usage.
1192 stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1194 stack = get_irn_n(keep, 0);
1196 remove_End_keepalive(get_irg_end(irg), keep);
1199 int const n_res = get_Return_n_ress(irn);
1200 /* Insert results for Return into the register map. */
1201 for (i = 0; i < n_res; ++i) {
1202 ir_node *res = get_Return_res(irn, i);
1203 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1204 assert(arg->in_reg && "return value must be passed in register");
1205 pmap_insert(reg_map, (void *) arg->reg, res);
1208 /* Add uses of the callee save registers. */
1209 foreach_pmap(env->regs, ent) {
1210 const arch_register_t *reg = (const arch_register_t*)ent->key;
1211 if (arch_register_is_callee_save(arch_env, reg))
1212 pmap_insert(reg_map, ent->key, ent->value);
1215 be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1218 Maximum size of the in array for Return nodes is
1219 return args + callee save/ignore registers + memory + stack pointer
1221 in_max = pmap_count(reg_map) + n_res + 2;
1223 in = ALLOCAN(ir_node*, in_max);
1224 regs = ALLOCAN(arch_register_t const*, in_max);
1226 in[0] = get_Return_mem(irn);
1227 in[1] = be_abi_reg_map_get(reg_map, arch_env->sp);
1229 regs[1] = arch_env->sp;
1232 /* clear SP entry, since it has already been grown. */
1233 pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1234 for (i = 0; i < n_res; ++i) {
1235 be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1237 in[n] = be_abi_reg_map_get(reg_map, arg->reg);
1238 regs[n++] = arg->reg;
1240 /* Clear the map entry to mark the register as processed. */
1241 be_abi_reg_map_set(reg_map, arg->reg, NULL);
1244 /* grow the rest of the stuff. */
1245 foreach_pmap(reg_map, ent) {
1247 in[n] = (ir_node*)ent->value;
1248 regs[n++] = (const arch_register_t*)ent->key;
1252 /* The in array for the new back end return is now ready. */
1253 dbg_info *const dbgi = get_irn_dbg_info(irn);
1254 ir_node *const ret = be_new_Return(dbgi, bl, n_res, call->pop, n, in);
1256 /* Set the register classes of the return's parameter accordingly. */
1257 for (i = 0; i < n; ++i) {
1258 if (regs[i] == NULL)
1261 be_set_constr_single_reg_in(ret, i, regs[i], arch_register_req_type_none);
1264 /* Free the space of the Epilog's in array and the register <-> proj map. */
1265 pmap_destroy(reg_map);
1270 typedef struct lower_frame_sels_env_t {
1271 ir_node *frame; /**< the current frame */
1272 const arch_register_class_t *sp_class; /**< register class of the stack pointer */
1273 } lower_frame_sels_env_t;
1276 * Walker: Replaces Sels of frame type and
1277 * value param type entities by FrameAddress.
1278 * Links all used entities.
1280 static void lower_frame_sels_walker(ir_node *irn, void *data)
1282 lower_frame_sels_env_t *ctx = (lower_frame_sels_env_t*)data;
1285 ir_node *ptr = get_Sel_ptr(irn);
1287 if (ptr == ctx->frame) {
1288 ir_entity *ent = get_Sel_entity(irn);
1289 ir_node *bl = get_nodes_block(irn);
1292 nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1299 * The start block has no jump, instead it has an initial exec Proj.
1300 * The backend wants to handle all blocks the same way, so we replace
1301 * the out cfg edge with a real jump.
1303 static void fix_start_block(ir_graph *irg)
1305 ir_node *initial_X = get_irg_initial_exec(irg);
1306 ir_node *start_block = get_irg_start_block(irg);
1307 ir_node *jmp = new_r_Jmp(start_block);
1309 assert(is_Proj(initial_X));
1310 exchange(initial_X, jmp);
1311 set_irg_initial_exec(irg, new_r_Bad(irg, mode_X));
1313 /* merge start block with successor if possible */
1315 foreach_out_edge(jmp, edge) {
1316 ir_node *succ = get_edge_src_irn(edge);
1317 if (!is_Block(succ))
1320 if (get_irn_arity(succ) == 1) {
1321 exchange(succ, start_block);
1329 * Modify the irg itself and the frame type.
1331 static void modify_irg(ir_graph *const irg, be_abi_irg_t *const env)
1333 be_abi_call_t *call = env->call;
1334 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1335 const arch_register_t *sp = arch_env->sp;
1336 ir_type *method_type = get_entity_type(get_irg_entity(irg));
1337 be_irg_t *birg = be_birg_from_irg(irg);
1338 struct obstack *obst = be_get_be_obst(irg);
1339 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1342 ir_node *new_mem_proj;
1349 const arch_register_t *fp_reg;
1350 ir_node *frame_pointer;
1354 ir_type *arg_type, *bet_type;
1355 lower_frame_sels_env_t ctx;
1357 DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1359 old_mem = get_irg_initial_mem(irg);
1361 irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
1363 arg_type = compute_arg_type(irg, call, method_type);
1365 /* Convert the Sel nodes in the irg to frame addr nodes: */
1366 ctx.frame = get_irg_frame(irg);
1367 ctx.sp_class = arch_env->sp->reg_class;
1369 ir_type *const frame_tp = get_irg_frame_type(irg);
1370 /* layout the stackframe now */
1371 if (get_type_state(frame_tp) == layout_undefined) {
1372 default_layout_compound_type(frame_tp);
1375 /* align stackframe */
1376 unsigned const alignment = 1U << arch_env->stack_alignment;
1377 unsigned const frame_size = round_up2(get_type_size_bytes(frame_tp), alignment);
1378 set_type_size_bytes(frame_tp, frame_size);
1380 env->regs = pmap_create();
1382 n_params = get_method_n_params(method_type);
1383 args = OALLOCNZ(obst, ir_node*, n_params);
1385 be_add_parameter_entity_stores(irg);
1387 irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1389 irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
1391 /* Fill the argument vector */
1392 arg_tuple = get_irg_args(irg);
1393 foreach_out_edge(arg_tuple, edge) {
1394 ir_node *irn = get_edge_src_irn(edge);
1395 if (! is_Anchor(irn)) {
1396 int nr = get_Proj_proj(irn);
1398 DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1402 stack_layout->sp_relative = call->flags.try_omit_fp;
1403 bet_type = call->cb->get_between_type(irg);
1404 stack_frame_init(stack_layout, arg_type, bet_type,
1405 get_irg_frame_type(irg));
1407 /* Count the register params and add them to the number of Projs for the RegParams node */
1408 for (i = 0; i < n_params; ++i) {
1409 be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1410 if (arg->in_reg && args[i]) {
1411 assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1412 assert(i == get_Proj_proj(args[i]));
1414 /* For now, associate the register with the old Proj from Start representing that argument. */
1415 pmap_insert(env->regs, (void *) arg->reg, args[i]);
1416 DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1420 /* Collect all callee-save registers */
1421 for (i = 0, n = arch_env->n_register_classes; i < n; ++i) {
1422 const arch_register_class_t *cls = &arch_env->register_classes[i];
1423 for (j = 0; j < cls->n_regs; ++j) {
1424 const arch_register_t *reg = &cls->regs[j];
1425 if ((reg->type & arch_register_type_state) || arch_register_is_callee_save(arch_env, reg)) {
1426 pmap_insert(env->regs, (void *) reg, NULL);
1431 fp_reg = call->flags.try_omit_fp ? arch_env->sp : arch_env->bp;
1432 rbitset_clear(birg->allocatable_regs, fp_reg->global_index);
1434 /* handle start block here (place a jump in the block) */
1435 fix_start_block(irg);
1437 pmap_insert(env->regs, (void *) sp, NULL);
1438 pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1439 start_bl = get_irg_start_block(irg);
1440 ir_node *const start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1441 set_irg_start(irg, start);
1444 * make proj nodes for the callee save registers.
1445 * memorize them, since Return nodes get those as inputs.
1447 * Note, that if a register corresponds to an argument, the regs map
1448 * contains the old Proj from start for that argument.
1450 arch_register_t const **const regs = ALLOCAN(arch_register_t const*, pmap_count(env->regs));
1451 reg_map_to_arr(regs, env->regs);
1452 for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1453 const arch_register_t *reg = regs[i];
1454 ir_mode *mode = reg->reg_class->mode;
1456 arch_register_req_type_t add_type = arch_register_req_type_none;
1460 add_type |= arch_register_req_type_produces_sp;
1461 if (!rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
1462 add_type |= arch_register_req_type_ignore;
1466 proj = new_r_Proj(start, mode, nr + 1);
1467 pmap_insert(env->regs, (void *) reg, proj);
1468 be_set_constr_single_reg_out(start, nr + 1, reg, add_type);
1469 arch_set_irn_register(proj, reg);
1471 DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1474 /* create a new initial memory proj */
1475 assert(is_Proj(old_mem));
1476 arch_set_irn_register_req_out(start, 0, arch_no_register_req);
1477 new_mem_proj = new_r_Proj(start, mode_M, 0);
1479 set_irg_initial_mem(irg, mem);
1481 env->init_sp = be_abi_reg_map_get(env->regs, sp);
1483 /* set new frame_pointer */
1484 frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1485 set_irg_frame(irg, frame_pointer);
1487 /* rewire old mem users to new mem */
1488 exchange(old_mem, mem);
1490 /* keep the mem (for functions with an endless loop = no return) */
1493 set_irg_initial_mem(irg, mem);
1495 /* Now, introduce stack param nodes for all parameters passed on the stack */
1496 for (i = 0; i < n_params; ++i) {
1497 ir_node *arg_proj = args[i];
1498 ir_node *repl = NULL;
1500 if (arg_proj != NULL) {
1501 be_abi_call_arg_t *arg;
1502 ir_type *param_type;
1503 int nr = get_Proj_proj(arg_proj);
1506 nr = MIN(nr, n_params);
1507 arg = get_call_arg(call, 0, nr, 1);
1508 param_type = get_method_param_type(method_type, nr);
1511 repl = pmap_get(ir_node, env->regs, arg->reg);
1513 ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
1515 /* For atomic parameters which are actually used, we create a Load node. */
1516 if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1517 ir_mode *mode = get_type_mode(param_type);
1518 ir_mode *load_mode = arg->load_mode;
1519 ir_node *nomem = get_irg_no_mem(irg);
1521 ir_node *load = new_r_Load(start_bl, nomem, addr, load_mode, cons_floats);
1522 repl = new_r_Proj(load, load_mode, pn_Load_res);
1524 if (mode != load_mode) {
1525 repl = new_r_Conv(start_bl, repl, mode);
1528 /* The stack parameter is not primitive (it is a struct or array),
1529 * we thus will create a node representing the parameter's address
1535 assert(repl != NULL);
1537 /* Beware: the mode of the register parameters is always the mode of the register class
1538 which may be wrong. Add Conv's then. */
1539 mode = get_irn_mode(args[i]);
1540 if (mode != get_irn_mode(repl)) {
1541 repl = new_r_Conv(get_nodes_block(repl), repl, mode);
1543 exchange(args[i], repl);
1547 /* the arg proj is not needed anymore now and should be only used by the anchor */
1548 assert(get_irn_n_edges(arg_tuple) == 1);
1549 kill_node(arg_tuple);
1550 set_irg_args(irg, new_r_Bad(irg, mode_T));
1552 /* All Return nodes hang on the End node, so look for them there. */
1553 end = get_irg_end_block(irg);
1554 for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1555 ir_node *irn = get_Block_cfgpred(end, i);
1557 if (is_Return(irn)) {
1558 ir_node *const ret = create_be_return(env, irn);
1563 /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
1564 the code is dead and will never be executed. */
1567 /** Fix the state inputs of calls that still hang on unknowns */
1568 static void fix_call_state_inputs(ir_graph *const irg, be_abi_irg_t *const env)
1570 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1572 arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
1574 /* Collect caller save registers */
1575 n = arch_env->n_register_classes;
1576 for (i = 0; i < n; ++i) {
1578 const arch_register_class_t *cls = &arch_env->register_classes[i];
1579 for (j = 0; j < cls->n_regs; ++j) {
1580 const arch_register_t *reg = arch_register_for_index(cls, j);
1581 if (reg->type & arch_register_type_state) {
1582 ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
1587 n = ARR_LEN(env->calls);
1588 n_states = ARR_LEN(stateregs);
1589 for (i = 0; i < n; ++i) {
1591 ir_node *call = env->calls[i];
1593 arity = get_irn_arity(call);
1595 /* the state reg inputs are the last n inputs of the calls */
1596 for (s = 0; s < n_states; ++s) {
1597 int inp = arity - n_states + s;
1598 const arch_register_t *reg = stateregs[s];
1599 ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
1601 set_irn_n(call, inp, regnode);
1605 DEL_ARR_F(stateregs);
1609 * Create a trampoline entity for the given method.
1611 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
1613 ir_type *type = get_entity_type(method);
1614 ident *old_id = get_entity_ld_ident(method);
1615 ident *id = id_mangle3("", old_id, "$stub");
1616 ir_type *parent = be->pic_trampolines_type;
1617 ir_entity *ent = new_entity(parent, old_id, type);
1618 set_entity_ld_ident(ent, id);
1619 set_entity_visibility(ent, ir_visibility_private);
1625 * Returns the trampoline entity for the given method.
1627 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
1629 ir_entity *result = pmap_get(ir_entity, env->ent_trampoline_map, method);
1630 if (result == NULL) {
1631 result = create_trampoline(env, method);
1632 pmap_insert(env->ent_trampoline_map, method, result);
1638 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
1640 ident *old_id = get_entity_ld_ident(entity);
1641 ident *id = id_mangle3("", old_id, "$non_lazy_ptr");
1642 ir_type *e_type = get_entity_type(entity);
1643 ir_type *type = new_type_pointer(e_type);
1644 ir_type *parent = be->pic_symbols_type;
1645 ir_entity *ent = new_entity(parent, old_id, type);
1646 set_entity_ld_ident(ent, id);
1647 set_entity_visibility(ent, ir_visibility_private);
1652 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
1654 ir_entity *result = pmap_get(ir_entity, env->ent_pic_symbol_map, entity);
1655 if (result == NULL) {
1656 result = create_pic_symbol(env, entity);
1657 pmap_insert(env->ent_pic_symbol_map, entity, result);
1666 * Returns non-zero if a given entity can be accessed using a relative address.
1668 static int can_address_relative(ir_entity *entity)
1670 return entity_has_definition(entity) && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
1673 static ir_node *get_pic_base(ir_graph *irg)
1675 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1676 if (arch_env->impl->get_pic_base == NULL)
1678 return arch_env->impl->get_pic_base(irg);
1681 /** patches SymConsts to work in position independent code */
1682 static void fix_pic_symconsts(ir_node *node, void *data)
1684 ir_graph *irg = get_irn_irg(node);
1685 be_main_env_t *be = be_get_irg_main_env(irg);
1695 arity = get_irn_arity(node);
1696 for (i = 0; i < arity; ++i) {
1698 ir_node *pred = get_irn_n(node, i);
1700 ir_entity *pic_symbol;
1701 ir_node *pic_symconst;
1703 if (!is_SymConst(pred))
1706 entity = get_SymConst_entity(pred);
1707 block = get_nodes_block(pred);
1709 /* calls can jump to relative addresses, so we can directly jump to
1710 the (relatively) known call address or the trampoline */
1711 if (i == 1 && is_Call(node)) {
1712 ir_entity *trampoline;
1713 ir_node *trampoline_const;
1715 if (can_address_relative(entity))
1718 dbgi = get_irn_dbg_info(pred);
1719 trampoline = get_trampoline(be, entity);
1720 trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
1722 set_irn_n(node, i, trampoline_const);
1726 /* everything else is accessed relative to EIP */
1727 mode = get_irn_mode(pred);
1728 pic_base = get_pic_base(irg);
1730 /* all ok now for locally constructed stuff */
1731 if (can_address_relative(entity)) {
1732 ir_node *add = new_r_Add(block, pic_base, pred, mode);
1734 /* make sure the walker doesn't visit this add again */
1735 mark_irn_visited(add);
1736 set_irn_n(node, i, add);
1740 /* get entry from pic symbol segment */
1741 dbgi = get_irn_dbg_info(pred);
1742 pic_symbol = get_pic_symbol(be, entity);
1743 pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
1745 add = new_r_Add(block, pic_base, pic_symconst, mode);
1746 mark_irn_visited(add);
1748 /* we need an extra indirection for global data outside our current
1749 module. The loads are always safe and can therefore float
1750 and need no memory input */
1751 load = new_r_Load(block, get_irg_no_mem(irg), add, mode, cons_floats);
1752 load_res = new_r_Proj(load, mode, pn_Load_res);
1754 set_irn_n(node, i, load_res);
1758 void be_abi_introduce(ir_graph *irg)
1760 ir_node *old_frame = get_irg_frame(irg);
1761 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1762 ir_entity *entity = get_irg_entity(irg);
1763 ir_type *method_type = get_entity_type(entity);
1764 be_irg_t *birg = be_birg_from_irg(irg);
1765 struct obstack *obst = &birg->obst;
1766 ir_node *dummy = new_r_Dummy(irg,
1767 arch_env->sp->reg_class->mode);
1770 /* determine allocatable registers */
1771 assert(birg->allocatable_regs == NULL);
1772 birg->allocatable_regs = rbitset_obstack_alloc(obst, arch_env->n_registers);
1773 for (r = 0; r < arch_env->n_registers; ++r) {
1774 const arch_register_t *reg = &arch_env->registers[r];
1775 if ( !(reg->type & arch_register_type_ignore)) {
1776 rbitset_set(birg->allocatable_regs, r);
1780 /* Break here if backend provides a custom API. */
1783 env.keep_map = pmap_create();
1784 env.call = be_abi_call_new();
1785 arch_env_get_call_abi(arch_env, method_type, env.call);
1787 env.init_sp = dummy;
1788 env.calls = NEW_ARR_F(ir_node*, 0);
1792 if (be_options.pic) {
1793 irg_walk_graph(irg, fix_pic_symconsts, NULL, NULL);
1796 /* Lower all call nodes in the IRG. */
1797 process_calls(irg, &env);
1799 /* Process the IRG */
1800 modify_irg(irg, &env);
1802 /* fix call inputs for state registers */
1803 fix_call_state_inputs(irg, &env);
1805 be_abi_call_free(env.call);
1807 /* We don't need the keep map anymore. */
1808 pmap_destroy(env.keep_map);
1810 /* calls array is not needed anymore */
1811 DEL_ARR_F(env.calls);
1813 /* reroute the stack origin of the calls to the true stack origin. */
1814 exchange(dummy, env.init_sp);
1815 exchange(old_frame, get_irg_frame(irg));
1817 pmap_destroy(env.regs);
1820 void be_put_allocatable_regs(const ir_graph *irg,
1821 const arch_register_class_t *cls, bitset_t *bs)
1823 be_irg_t *birg = be_birg_from_irg(irg);
1824 unsigned *allocatable_regs = birg->allocatable_regs;
1827 assert(bitset_size(bs) == cls->n_regs);
1828 bitset_clear_all(bs);
1829 for (i = 0; i < cls->n_regs; ++i) {
1830 const arch_register_t *reg = &cls->regs[i];
1831 if (rbitset_is_set(allocatable_regs, reg->global_index))
1836 unsigned be_get_n_allocatable_regs(const ir_graph *irg,
1837 const arch_register_class_t *cls)
1839 bitset_t *bs = bitset_alloca(cls->n_regs);
1840 be_put_allocatable_regs(irg, cls, bs);
1841 return bitset_popcount(bs);
1844 void be_set_allocatable_regs(const ir_graph *irg,
1845 const arch_register_class_t *cls,
1846 unsigned *raw_bitset)
1848 be_irg_t *birg = be_birg_from_irg(irg);
1849 unsigned *allocatable_regs = birg->allocatable_regs;
1852 rbitset_clear_all(raw_bitset, cls->n_regs);
1853 for (i = 0; i < cls->n_regs; ++i) {
1854 const arch_register_t *reg = &cls->regs[i];
1855 if (rbitset_is_set(allocatable_regs, reg->global_index))
1856 rbitset_set(raw_bitset, i);
1860 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi)
1861 void be_init_abi(void)
1863 FIRM_DBG_REGISTER(dbg, "firm.be.abi");