/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
static unsigned *normal_regs;
static int *congruence_classes;
static ir_node **block_order;
-static int n_block_order;
+static size_t n_block_order;
static int create_preferences = true;
static int create_congruence_classes = true;
static int propagate_phi_registers = true;
*/
static allocation_info_t *get_allocation_info(ir_node *node)
{
- allocation_info_t *info = get_irn_link(node);
+ allocation_info_t *info = (allocation_info_t*)get_irn_link(node);
if (info == NULL) {
info = OALLOCFZ(&obst, allocation_info_t, prefs, n_regs);
info->current_value = node;
*/
static block_info_t *get_block_info(ir_node *block)
{
- block_info_t *info = get_irn_link(block);
+ block_info_t *info = (block_info_t*)get_irn_link(block);
assert(is_Block(block));
if (info == NULL) {
{
ir_nodeset_iterator_t iter;
unsigned r;
- unsigned n_allowed;
+ size_t n_allowed;
allocation_info_t *info = get_allocation_info(node);
ir_node *neighbor;
ir_node *original_insn;
ir_node *block;
ir_node *copy;
- unsigned r;
+ unsigned r = 0;
unsigned from_r;
unsigned i;
allocation_info_t *info = get_allocation_info(to_split);
reg_pref_t *prefs;
- float delta;
+ float delta = 0;
float split_threshold;
(void) pref;
ir_node *in_node;
unsigned i;
const unsigned *allowed_regs;
- unsigned r;
+ unsigned r = 0;
assert(!is_Phi(node));
/* preassigned register? */
/* create list of register candidates and sort by their preference */
DB((dbg, LEVEL_2, "Candidates for %+F:", node));
- reg_prefs = alloca(n_regs * sizeof(reg_prefs[0]));
+ reg_prefs = ALLOCAN(reg_pref_t, n_regs);
fill_sort_candidates(reg_prefs, info);
for (i = 0; i < n_regs; ++i) {
unsigned num = reg_prefs[i].num;
* First we count how many destinations a single value has. At the same time
* we can be sure that each destination register has at most 1 source register
* (it can have 0 which means we don't care what value is in it).
- * We ignore all fullfilled permuations (like 7->7)
+ * We ignore all fulfilled permuations (like 7->7)
* In a first pass we create as much copy instructions as possible as they
* are generally cheaper than exchanges. We do this by counting into how many
* destinations a register has to be copied (in the example it's 2 for register
* We can then create a copy into every destination register when the usecount
* of that register is 0 (= noone else needs the value in the register).
*
- * After this step we should have cycles left. We implement a cyclic permutation
- * of n registers with n-1 transpositions.
+ * After this step we should only have cycles left. We implement a cyclic
+ * permutation of n registers with n-1 transpositions.
*
* @param live_nodes the set of live nodes, updated due to live range split
* @param before the node before we add the permutation
permute_values(live_nodes, node, assignment);
}
-/** test wether a node @p n is a copy of the value of node @p of */
+/** test whether a node @p n is a copy of the value of node @p of */
static bool is_copy_of(ir_node *value, ir_node *test_value)
{
allocation_info_t *test_info;
static int cmp_block_costs(const void *d1, const void *d2)
{
- const ir_node * const *block1 = d1;
- const ir_node * const *block2 = d2;
- const block_costs_t *info1 = get_irn_link(*block1);
- const block_costs_t *info2 = get_irn_link(*block2);
+ const ir_node * const *block1 = (const ir_node**)d1;
+ const ir_node * const *block2 = (const ir_node**)d2;
+ const block_costs_t *info1 = (const block_costs_t*)get_irn_link(*block1);
+ const block_costs_t *info2 = (const block_costs_t*)get_irn_link(*block2);
return QSORT_CMP(info2->costs, info1->costs);
}
static void determine_block_order(void)
{
- int i;
+ size_t i;
ir_node **blocklist = be_get_cfgpostorder(irg);
- int n_blocks = ARR_LEN(blocklist);
+ size_t n_blocks = ARR_LEN(blocklist);
int dfs_num = 0;
pdeq *worklist = new_pdeq();
ir_node **order = XMALLOCN(ir_node*, n_blocks);
- int order_p = 0;
+ size_t order_p = 0;
/* clear block links... */
for (i = 0; i < n_blocks; ++i) {
/* walk blocks in reverse postorder, the costs for each block are the
* sum of the costs of its predecessors (excluding the costs on backedges
* which we can't determine) */
- for (i = n_blocks-1; i >= 0; --i) {
+ for (i = n_blocks; i > 0;) {
block_costs_t *cost_info;
- ir_node *block = blocklist[i];
+ ir_node *block = blocklist[--i];
float execfreq = (float)get_block_execfreq(execfreqs, block);
float costs = execfreq;
int p;
for (p = 0; p < n_cfgpreds; ++p) {
ir_node *pred_block = get_Block_cfgpred_block(block, p);
- block_costs_t *pred_costs = get_irn_link(pred_block);
+ block_costs_t *pred_costs = (block_costs_t*)get_irn_link(pred_block);
/* we don't have any info for backedges */
if (pred_costs == NULL)
continue;
/* continually add predecessors with highest costs to worklist
* (without using backedges) */
do {
- block_costs_t *info = get_irn_link(block);
+ block_costs_t *info = (block_costs_t*)get_irn_link(block);
ir_node *best_pred = NULL;
float best_costs = -1;
int n_cfgpred = get_Block_n_cfgpreds(block);
mark_Block_block_visited(block);
for (i = 0; i < n_cfgpred; ++i) {
ir_node *pred_block = get_Block_cfgpred_block(block, i);
- block_costs_t *pred_info = get_irn_link(pred_block);
+ block_costs_t *pred_info = (block_costs_t*)get_irn_link(pred_block);
/* ignore backedges */
if (pred_info->dfs_num > info->dfs_num)
/* now put all nodes in the worklist in our final order */
while (!pdeq_empty(worklist)) {
- ir_node *pblock = pdeq_getr(worklist);
+ ir_node *pblock = (ir_node*)pdeq_getr(worklist);
assert(order_p < n_blocks);
order[order_p++] = pblock;
}
*/
static void be_pref_alloc_cls(void)
{
- int i;
+ size_t i;
lv = be_assure_liveness(irg);
be_liveness_assure_sets(lv);
static void be_pref_alloc(ir_graph *new_irg)
{
const arch_env_t *arch_env = be_get_irg_arch_env(new_irg);
- int n_cls = arch_env_get_n_reg_class(arch_env);
+ int n_cls = arch_env->n_register_classes;
int c;
obstack_init(&obst);
determine_block_order();
for (c = 0; c < n_cls; ++c) {
- cls = arch_env_get_reg_class(arch_env, c);
+ cls = &arch_env->register_classes[c];
if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
continue;
n_regs = arch_register_class_n_regs(cls);
normal_regs = rbitset_malloc(n_regs);
- be_abi_set_non_ignore_regs(be_get_irg_abi(irg), cls, normal_regs);
+ be_set_allocatable_regs(irg, cls, normal_regs);
spill();
obstack_free(&obst, NULL);
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_pref_alloc);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_pref_alloc)
void be_init_pref_alloc(void)
{
static be_ra_t be_ra_pref = {