sparc: fix wrong stack-offset in omit-fp commits
[libfirm] / ir / be / sparc / sparc_stackframe.c
1 /*
2  * Copyright (C) 1995-2010 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Manage addressing into the stackframe
23  * @author  Matthias Braun
24  * @version $Id$
25  */
26 #include "config.h"
27
28 #include "firm_types.h"
29 #include "irnode_t.h"
30 #include "bearch_sparc_t.h"
31 #include "sparc_new_nodes.h"
32 #include "sparc_cconv.h"
33 #include "bitfiddle.h"
34 #include "bearch.h"
35 #include "benode.h"
36 #include "besched.h"
37
38 static void set_irn_sp_bias(ir_node *node, int new_bias)
39 {
40         if (be_is_IncSP(node)) {
41                 be_set_IncSP_offset(node, new_bias);
42         } else if (is_sparc_Save(node)) {
43                 sparc_attr_t *attr = get_sparc_attr(node);
44                 attr->immediate_value = -new_bias;
45         } else if (is_sparc_Restore(node)) {
46                 sparc_attr_t *attr = get_sparc_attr(node);
47                 attr->immediate_value = new_bias;
48         }
49 }
50
51 static void process_bias(ir_node *block, bool sp_relative, int bias,
52                          int free_bytes)
53 {
54         const ir_edge_t *edge;
55         ir_node         *irn;
56
57         mark_Block_block_visited(block);
58
59         /* process schedule */
60         sched_foreach(block, irn) {
61                 int irn_bias;
62
63                 /* set bias to nodes with entities */
64                 ir_entity *entity = arch_get_frame_entity(irn);
65                 if (entity != NULL) {
66                         int offset = get_entity_offset(entity);
67                         if (sp_relative)
68                                 offset += bias;
69                         arch_set_frame_offset(irn, offset);
70                 }
71
72                 irn_bias = arch_get_sp_bias(irn);
73                 if (irn_bias == 0) {
74                         /* do nothing */
75                 } else if (irn_bias == SP_BIAS_RESET) {
76                         bias = 0;
77                 } else {
78                         /* adjust values to respect stack alignment */
79                         int new_bias_unaligned;
80                         int new_bias_aligned;
81                         irn_bias -= free_bytes;
82
83                         new_bias_unaligned = bias + irn_bias;
84                         new_bias_aligned
85                                 = round_up2(new_bias_unaligned, SPARC_STACK_ALIGNMENT);
86                         free_bytes = new_bias_aligned - new_bias_unaligned;
87                         set_irn_sp_bias(irn, new_bias_aligned - bias);
88                         bias = new_bias_aligned;
89                 }
90         }
91
92         /* continue at the successor blocks */
93         foreach_block_succ(block, edge) {
94                 ir_node *succ = get_edge_src_irn(edge);
95                 if (Block_block_visited(succ))
96                         continue;
97                 process_bias(succ, sp_relative, bias, free_bytes);
98         }
99 }
100
101 static void adjust_entity_offsets(ir_type *type, long offset)
102 {
103         size_t n_members = get_compound_n_members(type);
104         size_t i;
105
106         for (i = 0; i < n_members; ++i) {
107                 ir_entity *member        = get_compound_member(type, i);
108                 int        member_offset = get_entity_offset(member);
109                 set_entity_offset(member, member_offset + offset);
110         }
111 }
112
113 /**
114  * Perform some fixups for variadic functions.
115  * To make the rest of the frontend code easier to understand we add
116  * "dummy" parameters until the number of parameters transmitted in registers.
117  * (because otherwise the backend wouldn't store the value of the register
118  *  parameters into memory for the VLA magic)
119  */
120 bool sparc_variadic_fixups(ir_graph *irg, calling_convention_t *cconv)
121 {
122         ir_entity *entity = get_irg_entity(irg);
123         ir_type   *mtp    = get_entity_type(entity);
124         if (get_method_variadicity(mtp) != variadicity_variadic)
125                 return false;
126
127         if (cconv->n_param_regs >= SPARC_N_PARAM_REGS)
128                 return false;
129
130         {
131         size_t         n_params     = get_method_n_params(mtp);
132         type_dbg_info *dbgi         = get_type_dbg_info(mtp);
133         size_t         n_ress       = get_method_n_ress(mtp);
134         size_t         new_n_params
135                 = n_params + (SPARC_N_PARAM_REGS - cconv->n_param_regs);
136         ir_type       *new_mtp      = new_d_type_method(new_n_params, n_ress, dbgi);
137         ir_mode       *gp_reg_mode  = sparc_reg_classes[CLASS_sparc_gp].mode;
138         ir_type       *gp_reg_type  = get_type_for_mode(gp_reg_mode);
139         ir_type       *frame_type   = get_irg_frame_type(irg);
140         size_t         i;
141
142         for (i = 0; i < n_ress; ++i) {
143                 ir_type *type = get_method_res_type(mtp, i);
144                 set_method_res_type(new_mtp, i, type);
145         }
146         for (i = 0; i < n_params; ++i) {
147                 ir_type *type = get_method_param_type(mtp, i);
148                 set_method_param_type(new_mtp, i, type);
149         }
150         for ( ; i < new_n_params; ++i) {
151                 set_method_param_type(new_mtp, i, gp_reg_type);
152                 new_parameter_entity(frame_type, i, gp_reg_type);
153         }
154
155         set_method_variadicity(new_mtp, get_method_variadicity(mtp));
156         set_method_calling_convention(new_mtp, get_method_calling_convention(mtp));
157         set_method_additional_properties(new_mtp, get_method_additional_properties(mtp));
158         set_higher_type(new_mtp, mtp);
159
160         set_entity_type(entity, new_mtp);
161         }
162         return true;
163 }
164
165 static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
166                                  ir_type *between_type)
167 {
168         ir_entity       *va_start_entity = NULL;
169         const ir_entity *entity          = get_irg_entity(irg);
170         const ir_type   *mtp             = get_entity_type(entity);
171         size_t           n_params        = get_method_n_params(mtp);
172         ir_entity      **param_map       = ALLOCANZ(ir_entity*, n_params);
173
174         ir_type *frame_type      = get_irg_frame_type(irg);
175         size_t   n_frame_members = get_compound_n_members(frame_type);
176         size_t   f;
177         size_t   i;
178
179         ir_type *res = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8)));
180
181         /* search for existing value_param entities */
182         for (f = n_frame_members; f > 0; ) {
183                 ir_entity *member = get_compound_member(frame_type, --f);
184                 size_t     num;
185
186                 if (!is_parameter_entity(member))
187                         continue;
188                 num = get_entity_parameter_number(member);
189                 if (num == IR_VA_START_PARAMETER_NUMBER) {
190                         if (va_start_entity != NULL)
191                                 panic("multiple va_start entities found (%+F,%+F)",
192                                       va_start_entity, member);
193                         va_start_entity = member;
194                         continue;
195                 }
196                 assert(num < n_params);
197                 if (param_map[num] != NULL)
198                         panic("multiple entities for parameter %u in %+F found", f, irg);
199
200                 param_map[num] = member;
201                 /* move to new arg_type */
202                 set_entity_owner(member, res);
203         }
204
205         /* calculate offsets/create missing entities */
206         for (i = 0; i < n_params; ++i) {
207                 reg_or_stackslot_t *param  = &cconv->parameters[i];
208                 ir_entity          *entity = param_map[i];
209
210                 if (param->reg0 != NULL) {
211                         /* use reserved spill space on between type */
212                         if (entity != NULL) {
213                                 long offset = SPARC_PARAMS_SPILL_OFFSET + i * SPARC_REGISTER_SIZE;
214                                 assert(i < SPARC_N_PARAM_REGS);
215                                 set_entity_owner(entity, between_type);
216                                 set_entity_offset(entity, offset);
217                         }
218                         continue;
219                 }
220
221                 if (entity == NULL)
222                         entity = new_parameter_entity(res, i, param->type);
223                 param->entity = entity;
224                 set_entity_offset(entity, param->offset);
225         }
226
227         if (va_start_entity != NULL) {
228                 /* sparc_variadic_fixups() fiddled with our type, find out the
229                  * original number of parameters */
230                 ir_type *non_lowered   = get_higher_type(mtp);
231                 size_t   orig_n_params = get_method_n_params(non_lowered);
232                 long     offset;
233                 assert(get_method_variadicity(mtp) == variadicity_variadic);
234                 if (orig_n_params < n_params) {
235                         assert(param_map[orig_n_params] != NULL);
236                         offset = get_entity_offset(param_map[orig_n_params]);
237                         set_entity_owner(va_start_entity, between_type);
238                         set_entity_offset(va_start_entity, offset);
239                 } else {
240                         set_entity_owner(va_start_entity, res);
241                         set_entity_offset(va_start_entity, cconv->param_stack_size);
242                 }
243         }
244         set_type_size_bytes(res, cconv->param_stack_size);
245
246         return res;
247 }
248
249 void sparc_create_stacklayout(ir_graph *irg, calling_convention_t *cconv)
250 {
251         be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
252         ir_type           *between_type;
253         memset(layout, 0, sizeof(*layout));
254
255         between_type = new_type_class(new_id_from_str("sparc_between_type"));
256         if (cconv->omit_fp) {
257                 set_type_size_bytes(between_type, 0);
258         } else {
259                 set_type_size_bytes(between_type, SPARC_MIN_STACKSIZE);
260         }
261
262         layout->frame_type     = get_irg_frame_type(irg);
263         layout->between_type   = between_type;
264         layout->arg_type       = compute_arg_type(irg, cconv, between_type);
265         layout->initial_offset = 0;
266         layout->initial_bias   = 0;
267         layout->sp_relative    = cconv->omit_fp;
268
269         assert(N_FRAME_TYPES == 3);
270         layout->order[0] = layout->frame_type;
271         layout->order[1] = layout->between_type;
272         layout->order[2] = layout->arg_type;
273 }
274
275 /* Assign entity offsets, to all stack-related entities.
276  * The offsets are relative to the begin of the stack frame.
277  */
278 static void process_frame_types(ir_graph *irg)
279 {
280         be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
281
282         /* initially the stackpointer points to the begin of our stackframe.
283          * Situation at the begin of our function:
284          *
285          *      high address |-----------------------------|
286          *                   |            ...              |
287          *          arg-type |         stackarg 1          |
288          *                   |         stackarg 0          |
289          *                   |-----------------------------|
290          *                   | space for storing regarg0-5 |
291          *      between type | pointer to aggregate return |
292          *                   |      16 words save are      |
293          *  stack pointer -> |-----------------------------|
294          *                   |    high end of stackframe   |
295          *                   |            ...              |
296          *                   |    low end of stackframe    |
297          *      low address  |-----------------------------|
298          */
299         ir_type *between_type = layout->between_type;
300         unsigned between_size = get_type_size_bytes(between_type);
301
302         ir_type *frame_type = get_irg_frame_type(irg);
303         unsigned frame_size = get_type_size_bytes(frame_type);
304
305         ir_type *arg_type = layout->arg_type;
306
307         adjust_entity_offsets(frame_type, -(long)frame_size);
308         /* no need to adjust between type, it's already at 0 */
309         adjust_entity_offsets(arg_type, between_size);
310 }
311
312 void sparc_fix_stack_bias(ir_graph *irg)
313 {
314         int initial_bias;
315         bool sp_relative = be_get_irg_stack_layout(irg)->sp_relative;
316
317         ir_node *start_block = get_irg_start_block(irg);
318
319         process_frame_types(irg);
320
321         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
322         inc_irg_block_visited(irg);
323         initial_bias = 0;
324         if (sp_relative)
325                 initial_bias = SPARC_MIN_STACKSIZE;
326         process_bias(start_block, sp_relative, initial_bias, 0);
327         ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
328 }