* @file
* @brief Manage addressing into the stackframe
* @author Matthias Braun
- * @version $Id$
*/
#include "config.h"
+#include "beirg.h"
+#include "error.h"
#include "firm_types.h"
#include "irnode_t.h"
#include "bearch_sparc_t.h"
#include "sparc_new_nodes.h"
#include "sparc_cconv.h"
#include "bitfiddle.h"
-#include "../bearch.h"
-#include "../benode.h"
-#include "../besched.h"
+#include "bearch.h"
+#include "benode.h"
+#include "besched.h"
static void set_irn_sp_bias(ir_node *node, int new_bias)
{
}
}
-static void process_bias(ir_node *block, bool sp_relative, int bias, int free_bytes)
+static void process_bias(ir_node *block, bool sp_relative, int bias,
+ int free_bytes)
{
- const ir_edge_t *edge;
- ir_node *irn;
-
mark_Block_block_visited(block);
/* process schedule */
if (entity != NULL) {
int offset = get_entity_offset(entity);
if (sp_relative)
- offset -= bias;
+ offset += bias + SPARC_MIN_STACKSIZE;
arch_set_frame_offset(irn, offset);
}
+ /* The additional alignment bytes cannot be used
+ * anymore after alloca. */
+ if (is_sparc_SubSP(irn)) {
+ free_bytes = 0;
+ } else if (is_sparc_AddSP(irn)) {
+ assert(free_bytes == 0);
+ }
+
irn_bias = arch_get_sp_bias(irn);
if (irn_bias == 0) {
/* do nothing */
irn_bias -= free_bytes;
new_bias_unaligned = bias + irn_bias;
- new_bias_aligned = round_up2(new_bias_unaligned, 8);
+ new_bias_aligned
+ = round_up2(new_bias_unaligned, SPARC_STACK_ALIGNMENT);
free_bytes = new_bias_aligned - new_bias_unaligned;
set_irn_sp_bias(irn, new_bias_aligned - bias);
bias = new_bias_aligned;
}
}
-static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv)
+/**
+ * Perform some fixups for variadic functions.
+ * To make the rest of the frontend code easier to understand we add
+ * "dummy" parameters until the number of parameters transmitted in registers.
+ * (because otherwise the backend wouldn't store the value of the register
+ * parameters into memory for the VLA magic)
+ */
+bool sparc_variadic_fixups(ir_graph *irg, calling_convention_t *cconv)
+{
+ ir_entity *entity = get_irg_entity(irg);
+ ir_type *mtp = get_entity_type(entity);
+ if (get_method_variadicity(mtp) != variadicity_variadic)
+ return false;
+
+ if (cconv->n_param_regs >= SPARC_N_PARAM_REGS)
+ return false;
+
+ {
+ size_t n_params = get_method_n_params(mtp);
+ type_dbg_info *dbgi = get_type_dbg_info(mtp);
+ size_t n_ress = get_method_n_ress(mtp);
+ size_t new_n_params
+ = n_params + (SPARC_N_PARAM_REGS - cconv->n_param_regs);
+ ir_type *new_mtp = new_d_type_method(new_n_params, n_ress, dbgi);
+ ir_mode *gp_reg_mode = sparc_reg_classes[CLASS_sparc_gp].mode;
+ ir_type *gp_reg_type = get_type_for_mode(gp_reg_mode);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ size_t i;
+
+ for (i = 0; i < n_ress; ++i) {
+ ir_type *type = get_method_res_type(mtp, i);
+ set_method_res_type(new_mtp, i, type);
+ }
+ for (i = 0; i < n_params; ++i) {
+ ir_type *type = get_method_param_type(mtp, i);
+ set_method_param_type(new_mtp, i, type);
+ }
+ for ( ; i < new_n_params; ++i) {
+ set_method_param_type(new_mtp, i, gp_reg_type);
+ new_parameter_entity(frame_type, i, gp_reg_type);
+ }
+
+ set_method_variadicity(new_mtp, get_method_variadicity(mtp));
+ set_method_calling_convention(new_mtp, get_method_calling_convention(mtp));
+ set_method_additional_properties(new_mtp, get_method_additional_properties(mtp));
+ set_higher_type(new_mtp, mtp);
+
+ set_entity_type(entity, new_mtp);
+ }
+ return true;
+}
+
+static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
+ ir_type *between_type)
{
- ir_entity *entity = get_irg_entity(irg);
- ir_type *mtp = get_entity_type(entity);
- size_t n_params = get_method_n_params(mtp);
- ir_entity **param_map = ALLOCANZ(ir_entity*, n_params);
+ ir_entity *va_start_entity = NULL;
+ const ir_entity *entity = get_irg_entity(irg);
+ const ir_type *mtp = get_entity_type(entity);
+ size_t n_params = get_method_n_params(mtp);
+ ir_entity **param_map = ALLOCANZ(ir_entity*, n_params);
ir_type *frame_type = get_irg_frame_type(irg);
size_t n_frame_members = get_compound_n_members(frame_type);
for (f = n_frame_members; f > 0; ) {
ir_entity *member = get_compound_member(frame_type, --f);
size_t num;
- const reg_or_stackslot_t *param;
if (!is_parameter_entity(member))
continue;
num = get_entity_parameter_number(member);
+ if (num == IR_VA_START_PARAMETER_NUMBER) {
+ if (va_start_entity != NULL)
+ panic("multiple va_start entities found (%+F,%+F)",
+ va_start_entity, member);
+ va_start_entity = member;
+ continue;
+ }
assert(num < n_params);
if (param_map[num] != NULL)
panic("multiple entities for parameter %u in %+F found", f, irg);
- param = &cconv->parameters[num];
- if (param->reg0 != NULL)
- continue;
-
param_map[num] = member;
/* move to new arg_type */
set_entity_owner(member, res);
}
+ /* calculate offsets/create missing entities */
for (i = 0; i < n_params; ++i) {
- reg_or_stackslot_t *param = &cconv->parameters[i];
- ir_entity *entity;
-
- if (param->reg0 != NULL)
+ reg_or_stackslot_t *param = &cconv->parameters[i];
+ ir_entity *entity = param_map[i];
+
+ if (param->reg0 != NULL) {
+ /* use reserved spill space on between type */
+ if (entity != NULL) {
+ long offset = SPARC_PARAMS_SPILL_OFFSET + i * SPARC_REGISTER_SIZE;
+ assert(i < SPARC_N_PARAM_REGS);
+ set_entity_owner(entity, between_type);
+ set_entity_offset(entity, offset);
+ }
continue;
- entity = param_map[i];
+ }
+
if (entity == NULL)
entity = new_parameter_entity(res, i, param->type);
param->entity = entity;
set_entity_offset(entity, param->offset);
}
+
+ if (va_start_entity != NULL) {
+ /* sparc_variadic_fixups() fiddled with our type, find out the
+ * original number of parameters */
+ ir_type *non_lowered = get_higher_type(mtp);
+ size_t orig_n_params = get_method_n_params(non_lowered);
+ long offset;
+ assert(get_method_variadicity(mtp) == variadicity_variadic);
+ if (orig_n_params < n_params) {
+ assert(param_map[orig_n_params] != NULL);
+ offset = get_entity_offset(param_map[orig_n_params]);
+ set_entity_owner(va_start_entity, between_type);
+ set_entity_offset(va_start_entity, offset);
+ } else {
+ set_entity_owner(va_start_entity, res);
+ set_entity_offset(va_start_entity, cconv->param_stack_size);
+ }
+ }
set_type_size_bytes(res, cconv->param_stack_size);
return res;
memset(layout, 0, sizeof(*layout));
between_type = new_type_class(new_id_from_str("sparc_between_type"));
- set_type_size_bytes(between_type, SPARC_MIN_STACKSIZE);
+ if (cconv->omit_fp) {
+ set_type_size_bytes(between_type, 0);
+ } else {
+ set_type_size_bytes(between_type, SPARC_MIN_STACKSIZE);
+ }
layout->frame_type = get_irg_frame_type(irg);
layout->between_type = between_type;
- layout->arg_type = compute_arg_type(irg, cconv);
+ layout->arg_type = compute_arg_type(irg, cconv, between_type);
layout->initial_offset = 0;
layout->initial_bias = 0;
layout->sp_relative = cconv->omit_fp;
/* Assign entity offsets, to all stack-related entities.
* The offsets are relative to the begin of the stack frame.
*/
-static void process_frame_types(ir_graph *irg)
+void sparc_adjust_stack_entity_offsets(ir_graph *irg)
{
be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ir_type *between_type = layout->between_type;
unsigned between_size = get_type_size_bytes(between_type);
- ir_type *frame_type = get_irg_frame_type(irg);
- unsigned frame_size = get_type_size_bytes(frame_type);
+ ir_type *frame_type = get_irg_frame_type(irg);
+ unsigned frame_size = get_type_size_bytes(frame_type);
+ unsigned frame_align = get_type_alignment_bytes(frame_type);
+
+ /* There's the tricky case of the stackframe size not being a multiple
+ * of the alignment. There are 2 variants:
+ *
+ * - frame-pointer relative addressing:
+ * Increase frame_size in case it is not a multiple of the alignment as we
+ * address entities from the "top" with negative offsets
+ * - stack-pointer relative addressing:
+ * Stackframesize + SPARC_MIN_STACK_SIZE has to be aligned. Increase
+ * frame_size accordingly.
+ */
+ if (!layout->sp_relative) {
+ frame_size = (frame_size + frame_align-1) & ~(frame_align-1);
+ } else {
+ unsigned misalign = (SPARC_MIN_STACKSIZE+frame_size) % frame_align;
+ frame_size += misalign;
+ }
+ set_type_size_bytes(frame_type, frame_size);
ir_type *arg_type = layout->arg_type;
void sparc_fix_stack_bias(ir_graph *irg)
{
- ir_node *start_block = get_irg_start_block(irg);
+ bool sp_relative = be_get_irg_stack_layout(irg)->sp_relative;
- process_frame_types(irg);
+ ir_node *start_block = get_irg_start_block(irg);
ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
inc_irg_block_visited(irg);
- process_bias(start_block, be_get_irg_stack_layout(irg)->sp_relative, 0, 0);
+ process_bias(start_block, sp_relative, 0, 0);
ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
}