/**
* @file
- * @brief Naiv spilling algorithm
+ * @brief Naive spilling algorithm
* @author Matthias Braun
* @date 20.09.2005
* @version $Id: bespillbelady.c 13913 2007-05-18 12:48:56Z matze $
* @summary
- * This implements a naiv spilling algorithm. It is design to produce similar
+ * This implements a naive spilling algorithm. It is designed to produce similar
* effects to the spill decisions produced by traditional graph coloring
* register allocators that spill while they are coloring the graph.
*
* Spilling in this context means placing a spill instruction behind the
* definition of the value and a reload before each usage.
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "debug.h"
static spill_env_t *spill_env;
static int n_regs;
-static const arch_env_t *arch_env;
static const arch_register_class_t *cls;
static const be_lv_t *lv;
static bitset_t *spilled_nodes;
foreach_out_edge(node, edge) {
const ir_node *proj = get_edge_src_irn(edge);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ if (arch_irn_consider_in_reg_alloc(cls, proj)) {
++values_defined;
}
}
- } else if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
+ } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
++values_defined;
}
arity = get_irn_arity(node);
for(i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, pred)
+ if (arch_irn_consider_in_reg_alloc(cls, pred)
&& !ir_nodeset_contains(live_nodes, pred)) {
++free_regs_needed;
}
return;
DBG((dbg, LEVEL_2, "\tspills needed after %+F: %d\n", node, spills_needed));
- candidates = alloca(n_live_nodes * sizeof(candidates[0]));
+ candidates = ALLOCAN(spill_candidate_t, n_live_nodes);
/* construct array with spill candidates and calculate their costs */
i = 0;
cand_node = candidate->node;
++cand_idx;
- if(arch_irn_is(arch_env, cand_node, dont_spill))
+ if (arch_irn_is(cand_node, dont_spill))
continue;
/* make sure the node is not an argument of the instruction */
foreach_out_edge(node, edge) {
const ir_node *proj = get_edge_src_irn(edge);
- if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ if (arch_irn_consider_in_reg_alloc(cls, proj)) {
ir_nodeset_remove(nodeset, proj);
}
}
}
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
- ir_nodeset_remove(nodeset, node);
- }
+ if (arch_irn_consider_in_reg_alloc(cls, node)) {
+ ir_nodeset_remove(nodeset, node);
+ }
}
static void add_uses(ir_node *node, ir_nodeset_t *nodeset)
{
int i, arity;
- arity = get_irn_arity(node);
- for(i = 0; i < arity; ++i) {
- ir_node *op = get_irn_n(node, i);
+ arity = get_irn_arity(node);
+ for(i = 0; i < arity; ++i) {
+ ir_node *op = get_irn_n(node, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, op)
- && !bitset_is_set(spilled_nodes, get_irn_idx(op))) {
- ir_nodeset_insert(nodeset, op);
+ if (arch_irn_consider_in_reg_alloc(cls, op) &&
+ !bitset_is_set(spilled_nodes, get_irn_idx(op))) {
+ ir_nodeset_insert(nodeset, op);
}
- }
+ }
}
static __attribute__((unused))
/* construct set of live nodes at end of block */
ir_nodeset_init(&live_nodes);
- be_liveness_end_of_block(lv, arch_env, cls, block, &live_nodes);
+ be_liveness_end_of_block(lv, cls, block, &live_nodes);
/* remove already spilled nodes from liveset */
foreach_ir_nodeset(&live_nodes, node, iter) {
be_liveness_assure_sets(be_assure_liveness(birg));
spill_env = be_new_spill_env(birg);
- arch_env = be_get_birg_arch_env(birg);
cls = new_cls;
lv = be_get_birg_liveness(birg);
spilled_nodes = bitset_malloc(get_irg_last_idx(irg));