/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
- *
* This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @file
* @author Sebastian Hack, Matthias Braun
- * @version $Id$
*
* Handling of the stack frame. It is composed of three types:
* 1) The type of the arguments which are pushed on the stack.
return NULL;
}
-static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
+static void stack_frame_compute_initial_offset(be_stack_layout_t *frame)
{
ir_type *base = frame->between_type;
ir_entity *ent = search_ent_with_offset(base, 0);
} else {
frame->initial_offset = be_get_stack_entity_offset(frame, ent, 0);
}
-
- return frame->initial_offset;
}
/**
dbg_info *dbgi = get_irn_dbg_info(sel);
ir_mode *mode = get_irn_mode(sel);
ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
- ir_node *cnst = new_r_Const_long(current_ir_graph, mode_UInt, offset);
+ ir_node *cnst = new_r_Const_long(irg, mode_UInt, offset);
ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
}
* A helper struct for the bias walker.
*/
typedef struct bias_walk {
- int start_block_bias; /**< The bias at the end of the start block. */
- int between_size;
- ir_node *start_block; /**< The start block of the current graph. */
+ int start_block_bias; /**< The bias at the end of the start block. */
+ ir_node *start_block; /**< The start block of the current graph. */
} bias_walk;
/**
be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
bool sp_relative = layout->sp_relative;
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
- ir_node *irn;
sched_foreach(bl, irn) {
int ofs;
/* fill in real stack frame size */
if (be_get_IncSP_align(irn)) {
/* patch IncSP to produce an aligned stack pointer */
- ir_type *between_type = layout->between_type;
- int between_size = get_type_size_bytes(between_type);
- int alignment = 1 << arch_env->stack_alignment;
- int delta = (real_bias + ofs + between_size) & (alignment - 1);
+ int const between_size = get_type_size_bytes(layout->between_type);
+ int const alignment = 1 << arch_env->stack_alignment;
+ int const delta = (real_bias + ofs + between_size) & (alignment - 1);
assert(ofs >= 0);
if (delta > 0) {
be_set_IncSP_offset(irn, ofs + alignment - delta);
stack_frame_compute_initial_offset(stack_layout);
/* Determine the stack bias at the end of the start block. */
- bw.start_block_bias = process_stack_bias(get_irg_start_block(irg),
- stack_layout->initial_bias);
- bw.between_size = get_type_size_bytes(stack_layout->between_type);
+ bw.start_block = get_irg_start_block(irg);
+ bw.start_block_bias = process_stack_bias(bw.start_block, stack_layout->initial_bias);
/* fix the bias is all other blocks */
- bw.start_block = get_irg_start_block(irg);
irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
/* fix now inner functions: these still have Sel node to outer
*/
static void collect_stack_nodes_walker(ir_node *node, void *data)
{
- ir_node *insn = node;
- fix_stack_walker_env_t *env = (fix_stack_walker_env_t*)data;
- const arch_register_req_t *req;
+ fix_stack_walker_env_t *const env = (fix_stack_walker_env_t*)data;
- if (is_Proj(node)) {
- insn = get_Proj_pred(node);
- }
-
- if (arch_irn_get_n_outs(insn) == 0)
- return;
if (get_irn_mode(node) == mode_T)
return;
- req = arch_get_register_req_out(node);
- if (! (req->type & arch_register_req_type_produces_sp))
+ arch_register_req_t const *const req = arch_get_irn_register_req(node);
+ if (!arch_register_req_is(req, produces_sp))
return;
ARR_APP1(ir_node*, env->sp_nodes, node);
be_lv_t *lv = be_get_irg_liveness(irg);
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
be_irg_t *birg = be_birg_from_irg(irg);
- const arch_register_req_t *sp_req = birg->sp_req;
const arch_register_t *sp = arch_env->sp;
be_ssa_construction_env_t senv;
int i, len;
ir_node **phis;
fix_stack_walker_env_t walker_env;
+ arch_register_req_t const *sp_req = birg->sp_req;
if (sp_req == NULL) {
- struct obstack *obst = be_get_be_obst(irg);
- arch_register_req_t *new_sp_req;
- unsigned *limited_bitset;
-
- new_sp_req = OALLOCZ(obst, arch_register_req_t);
- new_sp_req->type = arch_register_req_type_limited
- | arch_register_req_type_produces_sp;
- new_sp_req->cls = arch_register_get_class(arch_env->sp);
- new_sp_req->width = 1;
-
- limited_bitset = rbitset_obstack_alloc(obst, new_sp_req->cls->n_regs);
- rbitset_set(limited_bitset, arch_register_get_index(sp));
- new_sp_req->limited = limited_bitset;
-
- if (!rbitset_is_set(birg->allocatable_regs, sp->global_index)) {
- new_sp_req->type |= arch_register_req_type_ignore;
- }
+ arch_register_req_type_t type = arch_register_req_type_produces_sp;
+ if (!rbitset_is_set(birg->allocatable_regs, sp->global_index))
+ type |= arch_register_req_type_ignore;
- sp_req = new_sp_req;
- birg->sp_req = new_sp_req;
+ struct obstack *const obst = be_get_be_obst(irg);
+ birg->sp_req = sp_req = be_create_reg_req(obst, sp, type);
}
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);