2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Manage addressing into the stackframe
23 * @author Matthias Braun
27 #include "firm_types.h"
29 #include "bearch_sparc_t.h"
30 #include "sparc_new_nodes.h"
31 #include "sparc_cconv.h"
32 #include "bitfiddle.h"
37 static void set_irn_sp_bias(ir_node *node, int new_bias)
39 if (be_is_IncSP(node)) {
40 be_set_IncSP_offset(node, new_bias);
41 } else if (is_sparc_Save(node)) {
42 sparc_attr_t *attr = get_sparc_attr(node);
43 attr->immediate_value = -new_bias;
44 } else if (is_sparc_Restore(node)) {
45 sparc_attr_t *attr = get_sparc_attr(node);
46 attr->immediate_value = new_bias;
50 static void process_bias(ir_node *block, bool sp_relative, int bias,
53 mark_Block_block_visited(block);
55 /* process schedule */
56 sched_foreach(block, irn) {
59 /* set bias to nodes with entities */
60 ir_entity *entity = arch_get_frame_entity(irn);
62 int offset = get_entity_offset(entity);
64 offset += bias + SPARC_MIN_STACKSIZE;
65 arch_set_frame_offset(irn, offset);
68 /* The additional alignment bytes cannot be used
69 * anymore after alloca. */
70 if (is_sparc_SubSP(irn)) {
72 } else if (is_sparc_AddSP(irn)) {
73 assert(free_bytes == 0);
76 irn_bias = arch_get_sp_bias(irn);
79 } else if (irn_bias == SP_BIAS_RESET) {
82 /* adjust values to respect stack alignment */
83 int new_bias_unaligned;
85 irn_bias -= free_bytes;
87 new_bias_unaligned = bias + irn_bias;
89 = round_up2(new_bias_unaligned, SPARC_STACK_ALIGNMENT);
90 free_bytes = new_bias_aligned - new_bias_unaligned;
91 set_irn_sp_bias(irn, new_bias_aligned - bias);
92 bias = new_bias_aligned;
97 if (block == get_irg_end_block(get_irn_irg(block))) {
102 /* continue at the successor blocks */
103 foreach_block_succ(block, edge) {
104 ir_node *succ = get_edge_src_irn(edge);
105 if (Block_block_visited(succ))
107 process_bias(succ, sp_relative, bias, free_bytes);
111 static void adjust_entity_offsets(ir_type *type, long offset)
113 size_t n_members = get_compound_n_members(type);
116 for (i = 0; i < n_members; ++i) {
117 ir_entity *member = get_compound_member(type, i);
118 int member_offset = get_entity_offset(member);
119 set_entity_offset(member, member_offset + offset);
124 * Perform some fixups for variadic functions.
125 * To make the rest of the frontend code easier to understand we add
126 * "dummy" parameters until the number of parameters transmitted in registers.
127 * (because otherwise the backend wouldn't store the value of the register
128 * parameters into memory for the VLA magic)
130 bool sparc_variadic_fixups(ir_graph *irg, calling_convention_t *cconv)
132 ir_entity *entity = get_irg_entity(irg);
133 ir_type *mtp = get_entity_type(entity);
134 if (get_method_variadicity(mtp) != variadicity_variadic)
137 if (cconv->n_param_regs >= SPARC_N_PARAM_REGS)
141 size_t n_params = get_method_n_params(mtp);
142 type_dbg_info *dbgi = get_type_dbg_info(mtp);
143 size_t n_ress = get_method_n_ress(mtp);
145 = n_params + (SPARC_N_PARAM_REGS - cconv->n_param_regs);
146 ir_type *new_mtp = new_d_type_method(new_n_params, n_ress, dbgi);
147 ir_mode *gp_reg_mode = sparc_reg_classes[CLASS_sparc_gp].mode;
148 ir_type *gp_reg_type = get_type_for_mode(gp_reg_mode);
149 ir_type *frame_type = get_irg_frame_type(irg);
152 for (i = 0; i < n_ress; ++i) {
153 ir_type *type = get_method_res_type(mtp, i);
154 set_method_res_type(new_mtp, i, type);
156 for (i = 0; i < n_params; ++i) {
157 ir_type *type = get_method_param_type(mtp, i);
158 set_method_param_type(new_mtp, i, type);
160 for ( ; i < new_n_params; ++i) {
161 set_method_param_type(new_mtp, i, gp_reg_type);
162 new_parameter_entity(frame_type, i, gp_reg_type);
165 set_method_variadicity(new_mtp, get_method_variadicity(mtp));
166 set_method_calling_convention(new_mtp, get_method_calling_convention(mtp));
167 set_method_additional_properties(new_mtp, get_method_additional_properties(mtp));
168 set_higher_type(new_mtp, mtp);
170 set_entity_type(entity, new_mtp);
175 static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
176 ir_type *between_type)
178 ir_entity *va_start_entity = NULL;
179 const ir_entity *entity = get_irg_entity(irg);
180 const ir_type *mtp = get_entity_type(entity);
181 size_t n_params = get_method_n_params(mtp);
182 ir_entity **param_map = ALLOCANZ(ir_entity*, n_params);
184 ir_type *frame_type = get_irg_frame_type(irg);
185 size_t n_frame_members = get_compound_n_members(frame_type);
189 ir_type *res = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8)));
191 /* search for existing value_param entities */
192 for (f = n_frame_members; f > 0; ) {
193 ir_entity *member = get_compound_member(frame_type, --f);
196 if (!is_parameter_entity(member))
198 num = get_entity_parameter_number(member);
199 if (num == IR_VA_START_PARAMETER_NUMBER) {
200 if (va_start_entity != NULL)
201 panic("multiple va_start entities found (%+F,%+F)",
202 va_start_entity, member);
203 va_start_entity = member;
206 assert(num < n_params);
207 if (param_map[num] != NULL)
208 panic("multiple entities for parameter %u in %+F found", f, irg);
210 param_map[num] = member;
211 /* move to new arg_type */
212 set_entity_owner(member, res);
215 /* calculate offsets/create missing entities */
216 for (i = 0; i < n_params; ++i) {
217 reg_or_stackslot_t *param = &cconv->parameters[i];
218 ir_entity *entity = param_map[i];
220 if (param->reg0 != NULL) {
221 /* use reserved spill space on between type */
222 if (entity != NULL) {
223 long offset = SPARC_PARAMS_SPILL_OFFSET + i * SPARC_REGISTER_SIZE;
224 assert(i < SPARC_N_PARAM_REGS);
225 set_entity_owner(entity, between_type);
226 set_entity_offset(entity, offset);
232 entity = new_parameter_entity(res, i, param->type);
233 param->entity = entity;
234 set_entity_offset(entity, param->offset);
237 if (va_start_entity != NULL) {
238 /* sparc_variadic_fixups() fiddled with our type, find out the
239 * original number of parameters */
240 ir_type *non_lowered = get_higher_type(mtp);
241 size_t orig_n_params = get_method_n_params(non_lowered);
243 assert(get_method_variadicity(mtp) == variadicity_variadic);
244 if (orig_n_params < n_params) {
245 assert(param_map[orig_n_params] != NULL);
246 offset = get_entity_offset(param_map[orig_n_params]);
247 set_entity_owner(va_start_entity, between_type);
248 set_entity_offset(va_start_entity, offset);
250 set_entity_owner(va_start_entity, res);
251 set_entity_offset(va_start_entity, cconv->param_stack_size);
254 set_type_size_bytes(res, cconv->param_stack_size);
259 void sparc_create_stacklayout(ir_graph *irg, calling_convention_t *cconv)
261 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
262 ir_type *between_type;
263 memset(layout, 0, sizeof(*layout));
265 between_type = new_type_class(new_id_from_str("sparc_between_type"));
266 if (cconv->omit_fp) {
267 set_type_size_bytes(between_type, 0);
269 set_type_size_bytes(between_type, SPARC_MIN_STACKSIZE);
272 layout->frame_type = get_irg_frame_type(irg);
273 layout->between_type = between_type;
274 layout->arg_type = compute_arg_type(irg, cconv, between_type);
275 layout->initial_offset = 0;
276 layout->initial_bias = 0;
277 layout->sp_relative = cconv->omit_fp;
279 assert(N_FRAME_TYPES == 3);
280 layout->order[0] = layout->frame_type;
281 layout->order[1] = layout->between_type;
282 layout->order[2] = layout->arg_type;
285 /* Assign entity offsets, to all stack-related entities.
286 * The offsets are relative to the begin of the stack frame.
288 void sparc_adjust_stack_entity_offsets(ir_graph *irg)
290 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
292 /* initially the stackpointer points to the begin of our stackframe.
293 * Situation at the begin of our function:
295 * high address |-----------------------------|
297 * arg-type | stackarg 1 |
299 * |-----------------------------|
300 * | space for storing regarg0-5 |
301 * between type | pointer to aggregate return |
302 * | 16 words save are |
303 * stack pointer -> |-----------------------------|
304 * | high end of stackframe |
306 * | low end of stackframe |
307 * low address |-----------------------------|
309 ir_type *between_type = layout->between_type;
310 unsigned between_size = get_type_size_bytes(between_type);
312 ir_type *frame_type = get_irg_frame_type(irg);
313 unsigned frame_size = get_type_size_bytes(frame_type);
314 unsigned frame_align = get_type_alignment_bytes(frame_type);
316 /* There's the tricky case of the stackframe size not being a multiple
317 * of the alignment. There are 2 variants:
319 * - frame-pointer relative addressing:
320 * Increase frame_size in case it is not a multiple of the alignment as we
321 * address entities from the "top" with negative offsets
322 * - stack-pointer relative addressing:
323 * Stackframesize + SPARC_MIN_STACK_SIZE has to be aligned. Increase
324 * frame_size accordingly.
326 if (!layout->sp_relative) {
327 frame_size = (frame_size + frame_align-1) & ~(frame_align-1);
329 unsigned misalign = (SPARC_MIN_STACKSIZE+frame_size) % frame_align;
330 frame_size += misalign;
332 set_type_size_bytes(frame_type, frame_size);
334 ir_type *arg_type = layout->arg_type;
336 adjust_entity_offsets(frame_type, -(long)frame_size);
337 /* no need to adjust between type, it's already at 0 */
338 adjust_entity_offsets(arg_type, between_size);
341 void sparc_fix_stack_bias(ir_graph *irg)
343 bool sp_relative = be_get_irg_stack_layout(irg)->sp_relative;
345 ir_node *start_block = get_irg_start_block(irg);
347 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
348 inc_irg_block_visited(irg);
349 process_bias(start_block, sp_relative, 0, 0);
350 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);