#include "irdump.h"
#include "lowering.h"
#include "lower_dw.h"
+#include "lower_alloc.h"
#include "lower_builtins.h"
#include "lower_calls.h"
#include "lower_softfloat.h"
ir_graph *irg = get_irp_irg(i);
ir_lower_mode_b(irg, &lower_mode_b_config);
lower_switch(irg, 4, 256, false);
+ lower_alloc(irg, SPARC_STACK_ALIGNMENT, false, -SPARC_MIN_STACKSIZE);
}
for (i = 0; i < n_irgs; ++i) {
#define SPARC_AGGREGATE_RETURN_OFFSET 64
#define SPARC_PARAMS_SPILL_OFFSET 68
#define SPARC_N_PARAM_REGS 6
+#define SPARC_STACK_ALIGNMENT 8
static inline bool sparc_is_value_imm_encodeable(int32_t value)
{
},
RestoreZero => {
- emit => '. restore',
reg_req => { in => [ "frame_pointer" ], out => [ "sp:I|S" ] },
ins => [ "frame_pointer" ],
outs => [ "stack" ],
+ emit => '. restore',
mode => $mode_gp,
},
SubSP => {
- reg_req => { in => [ "sp", "gp", "none" ], out => [ "sp:I|S", "gp", "none" ] },
- ins => [ "stack", "size", "mem" ],
- outs => [ "stack", "addr", "M" ],
- emit => ". sub %S0, %S1, %D0\n",
+ reg_req => { in => [ "sp", "gp" ], out => [ "sp:I|S" ] },
+ ins => [ "stack", "size" ],
+ outs => [ "stack" ],
+ emit => ". sub %S0, %S1, %D0\n",
+ mode => $mode_gp,
},
AddSP => {
- reg_req => { in => [ "sp", "gp", "none" ], out => [ "sp:I|S", "none" ] },
- ins => [ "stack", "size", "mem" ],
- outs => [ "stack", "M" ],
- emit => ". add %S0, %S1, %D0\n",
+ reg_req => { in => [ "sp", "gp" ], out => [ "sp:I|S" ] },
+ ins => [ "stack", "size" ],
+ outs => [ "stack" ],
+ emit => ". add %S0, %S1, %D0\n",
+ mode => $mode_gp,
},
FrameAddr => {
irn_bias -= free_bytes;
new_bias_unaligned = bias + irn_bias;
- new_bias_aligned = round_up2(new_bias_unaligned, 8);
+ new_bias_aligned
+ = round_up2(new_bias_unaligned, SPARC_STACK_ALIGNMENT);
free_bytes = new_bias_aligned - new_bias_unaligned;
set_irn_sp_bias(irn, new_bias_aligned - bias);
bias = new_bias_aligned;
return new_bd_sparc_FrameAddr(dbgi, new_block, new_ptr, entity, 0);
}
+static ir_node *gen_Alloc(ir_node *node)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_type *type = get_Alloc_type(node);
+ ir_node *size = get_Alloc_count(node);
+ ir_node *stack_pred = get_stack_pointer_for(node);
+ ir_node *subsp;
+ if (get_Alloc_where(node) != stack_alloc)
+ panic("only stack-alloc supported in sparc backend (at %+F)", node);
+ /* lowerer should have transformed all allocas to byte size */
+ if (type != get_unknown_type() && get_type_size_bytes(type) != 1)
+ panic("Found non-byte alloc in sparc backend (at %+F)", node);
+
+ if (is_Const(size)) {
+ ir_tarval *tv = get_Const_tarval(size);
+ long sizel = get_tarval_long(tv);
+ subsp = be_new_IncSP(sp_reg, new_block, stack_pred, sizel, 0);
+ set_irn_dbg_info(subsp, dbgi);
+ } else {
+ ir_node *new_size = be_transform_node(size);
+ subsp = new_bd_sparc_SubSP(dbgi, new_block, stack_pred, new_size);
+ arch_set_irn_register(subsp, sp_reg);
+ }
+
+ /* if we are the last IncSP producer in a block then we have to keep
+ * the stack value.
+ * Note: This here keeps all producers which is more than necessary */
+ keep_alive(subsp);
+
+ pmap_insert(node_to_stack, node, subsp);
+ /* the "result" is the unmodified sp value */
+ return stack_pred;
+}
+
+static ir_node *gen_Proj_Alloc(ir_node *node)
+{
+ ir_node *alloc = get_Proj_pred(node);
+ long pn = get_Proj_proj(node);
+
+ switch ((pn_Alloc)pn) {
+ case pn_Alloc_M: {
+ ir_node *alloc_mem = get_Alloc_mem(alloc);
+ return be_transform_node(alloc_mem);
+ }
+ case pn_Alloc_res: {
+ ir_node *new_alloc = be_transform_node(alloc);
+ return new_alloc;
+ }
+ case pn_Alloc_X_regular:
+ case pn_Alloc_X_except:
+ panic("sparc backend: exception output of alloc not supported (at %+F)",
+ node);
+ }
+ panic("sparc backend: invalid Proj->Alloc");
+}
+
+static ir_node *gen_Free(ir_node *node)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_type *type = get_Free_type(node);
+ ir_node *size = get_Free_count(node);
+ ir_node *mem = get_Free_mem(node);
+ ir_node *new_mem = be_transform_node(mem);
+ ir_node *stack_pred = get_stack_pointer_for(node);
+ ir_node *addsp;
+ if (get_Alloc_where(node) != stack_alloc)
+ panic("only stack-alloc supported in sparc backend (at %+F)", node);
+ /* lowerer should have transformed all allocas to byte size */
+ if (type != get_unknown_type() && get_type_size_bytes(type) != 1)
+ panic("Found non-byte alloc in sparc backend (at %+F)", node);
+
+ if (is_Const(size)) {
+ ir_tarval *tv = get_Const_tarval(size);
+ long sizel = get_tarval_long(tv);
+ addsp = be_new_IncSP(sp_reg, new_block, stack_pred, -sizel, 0);
+ set_irn_dbg_info(addsp, dbgi);
+ } else {
+ ir_node *new_size = be_transform_node(size);
+ addsp = new_bd_sparc_AddSP(dbgi, new_block, stack_pred, new_size);
+ arch_set_irn_register(addsp, sp_reg);
+ }
+
+ /* if we are the last IncSP producer in a block then we have to keep
+ * the stack value.
+ * Note: This here keeps all producers which is more than necessary */
+ keep_alive(addsp);
+
+ pmap_insert(node_to_stack, node, addsp);
+ /* the "result" is the unmodified sp value */
+ return new_mem;
+}
+
static const arch_register_req_t float1_req = {
arch_register_req_type_normal,
&sparc_reg_classes[CLASS_sparc_fp],
ir_node *pred = get_Proj_pred(node);
switch (get_irn_opcode(pred)) {
+ case iro_Alloc:
+ return gen_Proj_Alloc(node);
case iro_Store:
return gen_Proj_Store(node);
case iro_Load:
be_start_transform_setup();
be_set_transform_function(op_Add, gen_Add);
+ be_set_transform_function(op_Alloc, gen_Alloc);
be_set_transform_function(op_And, gen_And);
be_set_transform_function(op_Call, gen_Call);
be_set_transform_function(op_Cmp, gen_Cmp);
be_set_transform_function(op_Conv, gen_Conv);
be_set_transform_function(op_Div, gen_Div);
be_set_transform_function(op_Eor, gen_Eor);
+ be_set_transform_function(op_Free, gen_Free);
be_set_transform_function(op_Jmp, gen_Jmp);
be_set_transform_function(op_Load, gen_Load);
be_set_transform_function(op_Minus, gen_Minus);
--- /dev/null
+/*
+ * Copyright (C) 2011 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief Lower (stack-) Alloc nodes to allocate an aligned number of bytes
+ * @author Matthias Braun
+ */
+#include "config.h"
+
+#include "lower_alloc.h"
+#include "irgwalk.h"
+#include "irnode_t.h"
+#include "ircons.h"
+#include "error.h"
+#include "irgmod.h"
+#include "irnodeset.h"
+
+static bool lower_constant_sizes;
+static unsigned stack_alignment;
+static long addr_delta;
+static ir_nodeset_t transformed;
+
+/**
+ * Adjust the size of a node representing a stack alloc to a certain
+ * stack_alignment.
+ *
+ * @param size the node containing the non-aligned size
+ * @param block the block where new nodes are allocated on
+ * @return a node representing the aligned size
+ */
+static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block)
+{
+ ir_mode *mode;
+ ir_tarval *tv;
+ ir_node *mask;
+ ir_graph *irg;
+
+ if (stack_alignment <= 1)
+ return size;
+ if (is_Const(size) && !lower_constant_sizes)
+ return size;
+
+ mode = get_irn_mode(size);
+ tv = new_tarval_from_long(stack_alignment-1, mode);
+ irg = get_Block_irg(block);
+ mask = new_r_Const(irg, tv);
+ size = new_rd_Add(dbgi, block, size, mask, mode);
+
+ tv = new_tarval_from_long(-(long)stack_alignment, mode);
+ mask = new_r_Const(irg, tv);
+ size = new_rd_And(dbgi, block, size, mask, mode);
+ return size;
+}
+
+static void transform_Proj_Alloc(ir_node *node)
+{
+ ir_graph *irg;
+ dbg_info *dbgi;
+ ir_node *block;
+ ir_node *delta;
+ ir_node *add;
+ ir_node *dummy;
+ ir_node *alloc;
+ ir_node *new_proj;
+
+ /* we might need a result adjustment */
+ if (addr_delta == 0)
+ return;
+ if (get_Proj_proj(node) != pn_Alloc_res)
+ return;
+ if (ir_nodeset_contains(&transformed, node))
+ return;
+
+ alloc = get_Proj_pred(node);
+ dbgi = get_irn_dbg_info(alloc);
+ irg = get_irn_irg(node);
+ block = get_nodes_block(node);
+ delta = new_r_Const_long(irg, mode_P, addr_delta);
+ dummy = new_r_Dummy(irg, mode_P);
+ add = new_rd_Add(dbgi, block, dummy, delta, mode_P);
+
+ exchange(node, add);
+ new_proj = new_r_Proj(alloc, mode_P, pn_Alloc_res);
+ set_Add_left(add, new_proj);
+ ir_nodeset_insert(&transformed, new_proj);
+}
+
+/**
+ * lower Alloca nodes to allocate "bytes" instead of a certain type
+ */
+static void lower_alloca_free(ir_node *node, void *data)
+{
+ ir_type *type;
+ unsigned size;
+ ir_graph *irg;
+ ir_node *count;
+ ir_mode *mode;
+ ir_node *szconst;
+ ir_node *block;
+ ir_node *mem;
+ ir_type *new_type;
+ ir_node *mul;
+ ir_node *new_size;
+ dbg_info *dbgi;
+ ir_node *new_node;
+ ir_where_alloc where;
+ (void) data;
+ if (is_Alloc(node)) {
+ type = get_Alloc_type(node);
+ } else if (is_Free(node)) {
+ type = get_Free_type(node);
+ } else if (is_Proj(node)) {
+ ir_node *proj_pred = get_Proj_pred(node);
+ if (is_Alloc(proj_pred)) {
+ transform_Proj_Alloc(node);
+ }
+ return;
+ } else {
+ return;
+ }
+ if (ir_nodeset_contains(&transformed, node))
+ return;
+
+ ir_nodeset_insert(&transformed, node);
+ size = get_type_size_bytes(type);
+ if (type == get_unknown_type())
+ size = 1;
+ if (size == 1 && stack_alignment <= 1)
+ return;
+
+ if (is_Alloc(node)) {
+ count = get_Alloc_count(node);
+ mem = get_Alloc_mem(node);
+ where = get_Alloc_where(node);
+ } else {
+ count = get_Free_count(node);
+ mem = get_Free_mem(node);
+ where = get_Free_where(node);
+ }
+ mode = get_irn_mode(count);
+ block = get_nodes_block(node);
+ irg = get_irn_irg(node);
+ szconst = new_r_Const_long(irg, mode, (long)size);
+ mul = new_r_Mul(block, count, szconst, mode);
+ dbgi = get_irn_dbg_info(node);
+ new_size = adjust_alloc_size(dbgi, mul, block);
+ new_type = get_unknown_type();
+ if (is_Alloc(node)) {
+ new_node = new_rd_Alloc(dbgi, block, mem, new_size, new_type, where);
+ } else {
+ ir_node *ptr = get_Free_ptr(node);
+ new_node
+ = new_rd_Free(dbgi, block, mem, ptr, new_size, new_type, where);
+ }
+ ir_nodeset_insert(&transformed, new_node);
+
+ if (new_node != node)
+ exchange(node, new_node);
+}
+
+void lower_alloc(ir_graph *irg, unsigned new_stack_alignment, bool lower_consts,
+ long new_addr_delta)
+{
+ if (!is_po2(stack_alignment))
+ panic("lower_alloc only supports stack alignments that are a power of 2");
+ addr_delta = new_addr_delta;
+ stack_alignment = new_stack_alignment;
+ lower_constant_sizes = lower_consts;
+ ir_nodeset_init(&transformed);
+ irg_walk_graph(irg, lower_alloca_free, NULL, NULL);
+ ir_nodeset_destroy(&transformed);
+}
--- /dev/null
+/*
+ * Copyright (C) 2011 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief Lower (stack-) Alloc nodes to allocate an aligned number of bytes
+ * @author Matthias Braun
+ */
+#ifndef FIRM_LOWER_ALLOC_H
+#define FIRM_LOWER_ALLOC_H
+
+#include <stdbool.h>
+#include "firm_types.h"
+
+/**
+ * Lower Alloc/Free nodes: This changes them to allocate bytes instead of
+ * objects of a certain type. It can also make sure that the resulting
+ * size is aligned.
+ */
+void lower_alloc(ir_graph *irg, unsigned stack_alignment,
+ bool align_constant_sizes,
+ long addr_delta);
+
+#endif