becopyopt: Inline the thin wrapper nodes_interfere(), so we do not need to fetch...
[libfirm] / ir / be / sparc / sparc_stackframe.c
1 /*
2  * This file is part of libFirm.
3  * Copyright (C) 2012 University of Karlsruhe.
4  */
5
6 /**
7  * @file
8  * @brief   Manage addressing into the stackframe
9  * @author  Matthias Braun
10  */
11 #include "config.h"
12
13 #include "beirg.h"
14 #include "error.h"
15 #include "firm_types.h"
16 #include "irnode_t.h"
17 #include "bearch_sparc_t.h"
18 #include "sparc_new_nodes.h"
19 #include "sparc_cconv.h"
20 #include "bitfiddle.h"
21 #include "bearch.h"
22 #include "benode.h"
23 #include "besched.h"
24
25 static void set_irn_sp_bias(ir_node *node, int new_bias)
26 {
27         if (be_is_IncSP(node)) {
28                 be_set_IncSP_offset(node, new_bias);
29         } else if (is_sparc_Save(node)) {
30                 sparc_attr_t *attr = get_sparc_attr(node);
31                 attr->immediate_value = -new_bias;
32         } else if (is_sparc_Restore(node)) {
33                 sparc_attr_t *attr = get_sparc_attr(node);
34                 attr->immediate_value = new_bias;
35         }
36 }
37
38 static void process_bias(ir_node *block, bool sp_relative, int bias,
39                          int free_bytes)
40 {
41         mark_Block_block_visited(block);
42
43         /* process schedule */
44         sched_foreach(block, irn) {
45                 int irn_bias;
46
47                 /* set bias to nodes with entities */
48                 ir_entity *entity = arch_get_frame_entity(irn);
49                 if (entity != NULL) {
50                         int offset = get_entity_offset(entity);
51                         if (sp_relative)
52                                 offset += bias + SPARC_MIN_STACKSIZE;
53                         arch_set_frame_offset(irn, offset);
54                 }
55
56                 /* The additional alignment bytes cannot be used
57                  * anymore after alloca. */
58                 if (is_sparc_SubSP(irn)) {
59                         free_bytes = 0;
60                 } else if (is_sparc_AddSP(irn)) {
61                         assert(free_bytes == 0);
62                 }
63
64                 irn_bias = arch_get_sp_bias(irn);
65                 if (irn_bias == 0) {
66                         /* do nothing */
67                 } else if (irn_bias == SP_BIAS_RESET) {
68                         bias = 0;
69                 } else {
70                         /* adjust values to respect stack alignment */
71                         int new_bias_unaligned;
72                         int new_bias_aligned;
73                         irn_bias -= free_bytes;
74
75                         new_bias_unaligned = bias + irn_bias;
76                         new_bias_aligned
77                                 = round_up2(new_bias_unaligned, SPARC_STACK_ALIGNMENT);
78                         free_bytes = new_bias_aligned - new_bias_unaligned;
79                         set_irn_sp_bias(irn, new_bias_aligned - bias);
80                         bias = new_bias_aligned;
81                 }
82         }
83
84 #ifndef NDEBUG
85         if (block == get_irg_end_block(get_irn_irg(block))) {
86                 assert(bias == 0);
87         }
88 #endif
89
90         /* continue at the successor blocks */
91         foreach_block_succ(block, edge) {
92                 ir_node *succ = get_edge_src_irn(edge);
93                 if (Block_block_visited(succ))
94                         continue;
95                 process_bias(succ, sp_relative, bias, free_bytes);
96         }
97 }
98
99 static void adjust_entity_offsets(ir_type *type, long offset)
100 {
101         size_t n_members = get_compound_n_members(type);
102         size_t i;
103
104         for (i = 0; i < n_members; ++i) {
105                 ir_entity *member        = get_compound_member(type, i);
106                 int        member_offset = get_entity_offset(member);
107                 set_entity_offset(member, member_offset + offset);
108         }
109 }
110
111 /**
112  * Perform some fixups for variadic functions.
113  * To make the rest of the frontend code easier to understand we add
114  * "dummy" parameters until the number of parameters transmitted in registers.
115  * (because otherwise the backend wouldn't store the value of the register
116  *  parameters into memory for the VLA magic)
117  */
118 bool sparc_variadic_fixups(ir_graph *irg, calling_convention_t *cconv)
119 {
120         ir_entity *entity = get_irg_entity(irg);
121         ir_type   *mtp    = get_entity_type(entity);
122         if (get_method_variadicity(mtp) != variadicity_variadic)
123                 return false;
124
125         if (cconv->n_param_regs >= SPARC_N_PARAM_REGS)
126                 return false;
127
128         {
129         size_t         n_params     = get_method_n_params(mtp);
130         type_dbg_info *dbgi         = get_type_dbg_info(mtp);
131         size_t         n_ress       = get_method_n_ress(mtp);
132         size_t         new_n_params
133                 = n_params + (SPARC_N_PARAM_REGS - cconv->n_param_regs);
134         ir_type       *new_mtp      = new_d_type_method(new_n_params, n_ress, dbgi);
135         ir_mode       *gp_reg_mode  = sparc_reg_classes[CLASS_sparc_gp].mode;
136         ir_type       *gp_reg_type  = get_type_for_mode(gp_reg_mode);
137         ir_type       *frame_type   = get_irg_frame_type(irg);
138         size_t         i;
139
140         for (i = 0; i < n_ress; ++i) {
141                 ir_type *type = get_method_res_type(mtp, i);
142                 set_method_res_type(new_mtp, i, type);
143         }
144         for (i = 0; i < n_params; ++i) {
145                 ir_type *type = get_method_param_type(mtp, i);
146                 set_method_param_type(new_mtp, i, type);
147         }
148         for ( ; i < new_n_params; ++i) {
149                 set_method_param_type(new_mtp, i, gp_reg_type);
150                 new_parameter_entity(frame_type, i, gp_reg_type);
151         }
152
153         set_method_variadicity(new_mtp, get_method_variadicity(mtp));
154         set_method_calling_convention(new_mtp, get_method_calling_convention(mtp));
155         set_method_additional_properties(new_mtp, get_method_additional_properties(mtp));
156         set_higher_type(new_mtp, mtp);
157
158         set_entity_type(entity, new_mtp);
159         }
160         return true;
161 }
162
163 static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
164                                  ir_type *between_type)
165 {
166         ir_entity       *va_start_entity = NULL;
167         const ir_entity *entity          = get_irg_entity(irg);
168         const ir_type   *mtp             = get_entity_type(entity);
169         size_t           n_params        = get_method_n_params(mtp);
170         ir_entity      **param_map       = ALLOCANZ(ir_entity*, n_params);
171
172         ir_type *frame_type      = get_irg_frame_type(irg);
173         size_t   n_frame_members = get_compound_n_members(frame_type);
174         size_t   f;
175         size_t   i;
176
177         ir_type *res = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8)));
178
179         /* search for existing value_param entities */
180         for (f = n_frame_members; f > 0; ) {
181                 ir_entity *member = get_compound_member(frame_type, --f);
182                 size_t     num;
183
184                 if (!is_parameter_entity(member))
185                         continue;
186                 num = get_entity_parameter_number(member);
187                 if (num == IR_VA_START_PARAMETER_NUMBER) {
188                         if (va_start_entity != NULL)
189                                 panic("multiple va_start entities found (%+F,%+F)",
190                                       va_start_entity, member);
191                         va_start_entity = member;
192                         continue;
193                 }
194                 assert(num < n_params);
195                 if (param_map[num] != NULL)
196                         panic("multiple entities for parameter %u in %+F found", f, irg);
197
198                 param_map[num] = member;
199                 /* move to new arg_type */
200                 set_entity_owner(member, res);
201         }
202
203         /* calculate offsets/create missing entities */
204         for (i = 0; i < n_params; ++i) {
205                 reg_or_stackslot_t *param  = &cconv->parameters[i];
206                 ir_entity          *entity = param_map[i];
207
208                 if (param->reg0 != NULL) {
209                         /* use reserved spill space on between type */
210                         if (entity != NULL) {
211                                 long offset = SPARC_PARAMS_SPILL_OFFSET + i * SPARC_REGISTER_SIZE;
212                                 assert(i < SPARC_N_PARAM_REGS);
213                                 set_entity_owner(entity, between_type);
214                                 set_entity_offset(entity, offset);
215                         }
216                         continue;
217                 }
218
219                 if (entity == NULL)
220                         entity = new_parameter_entity(res, i, param->type);
221                 param->entity = entity;
222                 set_entity_offset(entity, param->offset);
223         }
224
225         if (va_start_entity != NULL) {
226                 /* sparc_variadic_fixups() fiddled with our type, find out the
227                  * original number of parameters */
228                 ir_type *non_lowered   = get_higher_type(mtp);
229                 size_t   orig_n_params = get_method_n_params(non_lowered);
230                 long     offset;
231                 assert(get_method_variadicity(mtp) == variadicity_variadic);
232                 if (orig_n_params < n_params) {
233                         assert(param_map[orig_n_params] != NULL);
234                         offset = get_entity_offset(param_map[orig_n_params]);
235                         set_entity_owner(va_start_entity, between_type);
236                         set_entity_offset(va_start_entity, offset);
237                 } else {
238                         set_entity_owner(va_start_entity, res);
239                         set_entity_offset(va_start_entity, cconv->param_stack_size);
240                 }
241         }
242         set_type_size_bytes(res, cconv->param_stack_size);
243
244         return res;
245 }
246
247 void sparc_create_stacklayout(ir_graph *irg, calling_convention_t *cconv)
248 {
249         be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
250         ir_type           *between_type;
251         memset(layout, 0, sizeof(*layout));
252
253         between_type = new_type_class(new_id_from_str("sparc_between_type"));
254         if (cconv->omit_fp) {
255                 set_type_size_bytes(between_type, 0);
256         } else {
257                 set_type_size_bytes(between_type, SPARC_MIN_STACKSIZE);
258         }
259
260         layout->frame_type     = get_irg_frame_type(irg);
261         layout->between_type   = between_type;
262         layout->arg_type       = compute_arg_type(irg, cconv, between_type);
263         layout->initial_offset = 0;
264         layout->initial_bias   = 0;
265         layout->sp_relative    = cconv->omit_fp;
266
267         assert(N_FRAME_TYPES == 3);
268         layout->order[0] = layout->frame_type;
269         layout->order[1] = layout->between_type;
270         layout->order[2] = layout->arg_type;
271 }
272
273 /* Assign entity offsets, to all stack-related entities.
274  * The offsets are relative to the begin of the stack frame.
275  */
276 void sparc_adjust_stack_entity_offsets(ir_graph *irg)
277 {
278         be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
279
280         /* initially the stackpointer points to the begin of our stackframe.
281          * Situation at the begin of our function:
282          *
283          *      high address |-----------------------------|
284          *                   |            ...              |
285          *          arg-type |         stackarg 1          |
286          *                   |         stackarg 0          |
287          *                   |-----------------------------|
288          *                   | space for storing regarg0-5 |
289          *      between type | pointer to aggregate return |
290          *                   |      16 words save are      |
291          *  stack pointer -> |-----------------------------|
292          *                   |    high end of stackframe   |
293          *                   |            ...              |
294          *                   |    low end of stackframe    |
295          *      low address  |-----------------------------|
296          */
297         ir_type *between_type = layout->between_type;
298         unsigned between_size = get_type_size_bytes(between_type);
299
300         ir_type *frame_type  = get_irg_frame_type(irg);
301         unsigned frame_size  = get_type_size_bytes(frame_type);
302         unsigned frame_align = get_type_alignment_bytes(frame_type);
303
304         /* There's the tricky case of the stackframe size not being a multiple
305          * of the alignment. There are 2 variants:
306          *
307          * - frame-pointer relative addressing:
308          *   Increase frame_size in case it is not a multiple of the alignment as we
309          *   address entities from the "top" with negative offsets
310          * - stack-pointer relative addressing:
311          *   Stackframesize + SPARC_MIN_STACK_SIZE has to be aligned. Increase
312          *   frame_size accordingly.
313          */
314         if (!layout->sp_relative) {
315                 frame_size = (frame_size + frame_align-1) & ~(frame_align-1);
316         } else {
317                 unsigned misalign = (SPARC_MIN_STACKSIZE+frame_size) % frame_align;
318                 frame_size += misalign;
319         }
320         set_type_size_bytes(frame_type, frame_size);
321
322         ir_type *arg_type = layout->arg_type;
323
324         adjust_entity_offsets(frame_type, -(long)frame_size);
325         /* no need to adjust between type, it's already at 0 */
326         adjust_entity_offsets(arg_type, between_size);
327 }
328
329 void sparc_fix_stack_bias(ir_graph *irg)
330 {
331         bool sp_relative = be_get_irg_stack_layout(irg)->sp_relative;
332
333         ir_node *start_block = get_irg_start_block(irg);
334
335         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
336         inc_irg_block_visited(irg);
337         process_bias(start_block, sp_relative, 0, 0);
338         ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
339 }