/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
- *
* This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @brief First simple copy minimization heuristics.
* @author Daniel Grund
* @date 12.04.2005
- * @version $Id$
*
* Heuristic for minimizing copies using a queue which holds 'qnodes' not yet
* examined. A qnode has a 'target color', nodes out of the opt unit and
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
+/** Defines an invalid register index. */
+#define NO_COLOR (-1)
+
#define SEARCH_FREE_COLORS
#define SLOTS_PINNED_GLOBAL 64
/**
* Modeling additional conflicts between nodes. NOT live range interference
*/
-typedef struct _conflict_t {
+typedef struct conflict_t {
const ir_node *n1, *n2;
} conflict_t;
* If an irn is changed, the changes first get stored in a node_stat_t,
* to allow undo of changes (=drop new data) in case of conflicts.
*/
-typedef struct _node_stat_t {
+typedef struct node_stat_t {
ir_node *irn;
- int new_color;
- int pinned_local :1;
+ int new_color;
+ unsigned pinned_local :1;
} node_stat_t;
/**
* Represents a node in the optimization queue.
*/
-typedef struct _qnode_t {
+typedef struct qnode_t {
struct list_head queue; /**< chaining of unit_t->queue */
- const unit_t *ou; /**< the opt unit this node belongs to */
int color; /**< target color */
set *conflicts; /**< contains conflict_t's. All internal conflicts */
int mis_costs; /**< costs of nodes/copies in the mis. */
set *changed_nodes; /**< contains node_stat_t's. */
} qnode_t;
-static pset *pinned_global; /**< optimized nodes should not be altered any more */
-
-static inline int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
-{
- if (env->ifg)
- return be_ifg_connected(env->ifg, a, b);
- else
- return be_values_interfere(env->birg->lv, a, b);
-}
+static pset *pinned_global; /**< optimized nodes should not be altered any more */
static int set_cmp_conflict_t(const void *x, const void *y, size_t size)
{
- const conflict_t *xx = x;
- const conflict_t *yy = y;
+ const conflict_t *xx = (const conflict_t*)x;
+ const conflict_t *yy = (const conflict_t*)y;
(void) size;
return xx->n1 != yy->n1 || xx->n2 != yy->n2;
c.n1 = n2;
c.n2 = n1;
}
- set_insert(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c));
+ (void)set_insert(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c));
}
/**
{
conflict_t c;
/* search for live range interference */
- if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2))
- return 1;
+ if (n1 != n2) {
+ be_lv_t *const lv = be_get_irg_liveness(get_irn_irg(n1));
+ if (be_values_interfere(lv, n1, n2))
+ return 1;
+ }
/* search for recoloring conflicts */
if (get_irn_idx(n1) < get_irn_idx(n2)) {
c.n1 = n1;
c.n1 = n2;
c.n2 = n1;
}
- return set_find(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0;
+ return set_find(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0;
}
static int set_cmp_node_stat_t(const void *x, const void *y, size_t size)
{
node_stat_t find;
find.irn = irn;
- return set_find(qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
+ return set_find(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
}
/**
find.irn = irn;
find.new_color = NO_COLOR;
find.pinned_local = 0;
- return set_insert(qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
+ return set_insert(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
}
/**
* Else the first conflicting ir_node encountered is returned.
*
*/
-static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger)
+static ir_node *qnode_color_irn(qnode_t const *const qn, ir_node *const irn, int const col, ir_node const *const trigger, bitset_t const *const allocatable_regs, be_ifg_t *const ifg)
{
- copy_opt_t *co = qn->ou->co;
- const be_chordal_env_t *chordal_env = co->cenv;
- const arch_register_class_t *cls = co->cls;
int irn_col = qnode_get_new_color(qn, irn);
- ir_node *sub_res, *curr;
- be_ifg_t *ifg = chordal_env->ifg;
- void *iter = be_ifg_neighbours_iter_alloca(ifg);
-
+ neighbours_iter_t iter;
DBG((dbg, LEVEL_3, "\t %+F \tcaused col(%+F) \t%2d --> %2d\n", trigger, irn, irn_col, col));
return irn;
}
+ arch_register_req_t const *const req = arch_get_irn_register_req(irn);
+ arch_register_class_t const *const cls = req->cls;
#ifdef SEARCH_FREE_COLORS
/* If we resolve conflicts (recursive calls) we can use any unused color.
* In case of the first call @p col must be used.
*/
if (irn != trigger) {
bitset_t *free_cols = bitset_alloca(cls->n_regs);
- const arch_register_req_t *req;
- ir_node *curr;
int free_col;
/* Get all possible colors */
- bitset_copy(free_cols, co->cenv->ignore_colors);
- bitset_flip_all(free_cols);
+ bitset_copy(free_cols, allocatable_regs);
/* Exclude colors not assignable to the irn */
- req = arch_get_register_req_out(irn);
- if (arch_register_req_is(req, limited)) {
- bitset_t *limited = bitset_alloca(cls->n_regs);
- rbitset_copy_to_bitset(req->limited, limited);
- bitset_and(free_cols, limited);
- }
+ if (arch_register_req_is(req, limited))
+ rbitset_and(free_cols->data, req->limited, free_cols->size);
/* Exclude the color of the irn, because it must _change_ its color */
bitset_clear(free_cols, irn_col);
/* Exclude all colors used by adjacent nodes */
- be_ifg_foreach_neighbour(ifg, iter, irn, curr)
+ be_ifg_foreach_neighbour(ifg, &iter, irn, curr)
bitset_clear(free_cols, qnode_get_new_color(qn, curr));
free_col = bitset_next_set(free_cols, 0);
#endif /* SEARCH_FREE_COLORS */
/* If target color is not allocatable changing color is impossible */
- if (!arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, col))) {
+ if (!arch_reg_is_allocatable(req, arch_register_for_index(cls, col))) {
DBG((dbg, LEVEL_3, "\t %+F impossible\n", irn));
return CHANGE_IMPOSSIBLE;
}
* If we arrive here changing color may be possible, but there may be conflicts.
* Try to color all conflicting nodes 'curr' with the color of the irn itself.
*/
- be_ifg_foreach_neighbour(ifg, iter, irn, curr) {
+ be_ifg_foreach_neighbour(ifg, &iter, irn, curr) {
DBG((dbg, LEVEL_3, "\t Confl %+F(%d)\n", curr, qnode_get_new_color(qn, curr)));
if (qnode_get_new_color(qn, curr) == col && curr != trigger) {
- sub_res = qnode_color_irn(qn, curr, irn_col, irn);
+ ir_node *const sub_res = qnode_color_irn(qn, curr, irn_col, irn, allocatable_regs, ifg);
if (sub_res != CHANGE_SAVE) {
- be_ifg_neighbours_break(ifg, iter);
+ be_ifg_neighbours_break(&iter);
return sub_res;
}
}
* @returns 1 iff all members colors could be set
* 0 else
*/
-static int qnode_try_color(const qnode_t *qn)
+static int qnode_try_color(qnode_t const *const qn, bitset_t const *const allocatable_regs, be_ifg_t *const ifg)
{
int i;
for (i=0; i<qn->mis_size; ++i) {
test_node = qn->mis[i];
DBG((dbg, LEVEL_3, "\t Testing %+F\n", test_node));
- confl_node = qnode_color_irn(qn, test_node, qn->color, test_node);
+ confl_node = qnode_color_irn(qn, test_node, qn->color, test_node, allocatable_regs, ifg);
if (confl_node == CHANGE_SAVE) {
DBG((dbg, LEVEL_3, "\t Save --> pin local\n"));
} else {
if (qnode_is_pinned_local(qn, confl_node)) {
/* changing test_node would change back a node of current ou */
- if (confl_node == qn->ou->nodes[0]) {
+ if (confl_node == qn->mis[0]) {
/* Adding a conflict edge between testnode and conflnode
* would introduce a root -- arg interference.
* So remove the arg of the qn */
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
bitset_t *curr, *best;
- bitset_pos_t pos;
int next, curr_weight, best_weight = 0;
/* assign the nodes into two groups.
unsafe = ALLOCAN(ir_node*, ou->node_count - 1);
unsafe_costs = ALLOCAN(int, ou->node_count - 1);
unsafe_count = 0;
- for(i=1; i<ou->node_count; ++i) {
+ for (i=1; i<ou->node_count; ++i) {
int is_safe = 1;
- for(o=1; o<ou->node_count; ++o) {
+ for (o=1; o<ou->node_count; ++o) {
if (qnode_are_conflicting(qn, ou->nodes[i], ou->nodes[o])) {
if (i!=o) {
unsafe_costs[unsafe_count] = ou->costs[i];
goto no_stable_set;
/* if we arrive here, we have a stable set */
- /* compute the weigth of the stable set*/
+ /* compute the weight of the stable set*/
curr_weight = 0;
bitset_foreach(curr, pos)
curr_weight += unsafe_costs[pos];
}
/* transfer the best set into the qn */
- qn->mis_size = 1+safe_count+bitset_popcnt(best);
+ qn->mis_size = 1+safe_count+bitset_popcount(best);
qn->mis_costs = safe_costs+best_weight;
qn->mis[0] = ou->nodes[0]; /* the root is always in a max stable set */
next = 1;
static inline qnode_t *new_qnode(const unit_t *ou, int color)
{
qnode_t *qn = XMALLOC(qnode_t);
- qn->ou = ou;
qn->color = color;
qn->mis = XMALLOCN(ir_node*, ou->node_count);
qn->conflicts = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS);
* case for approximately 80% of all phi classes and 100% of register constrained
* nodes. (All other phi classes are reduced to this case.)
*/
-static void ou_optimize(unit_t *ou)
+static void ou_optimize(unit_t *ou, bitset_t const *const allocatable_regs, be_ifg_t *const ifg)
{
- qnode_t *curr = NULL;
- qnode_t *tmp;
- const arch_register_req_t *req;
- bitset_t const* ignore;
- bitset_pos_t n_regs;
- bitset_pos_t idx;
- int i;
-
DBG((dbg, LEVEL_1, "\tOptimizing unit:\n"));
- for (i=0; i<ou->node_count; ++i)
+ for (int i = 0; i < ou->node_count; ++i)
DBG((dbg, LEVEL_1, "\t %+F\n", ou->nodes[i]));
/* init queue */
INIT_LIST_HEAD(&ou->queue);
- req = arch_get_register_req_out(ou->nodes[0]);
- ignore = ou->co->cenv->ignore_colors;
- n_regs = req->cls->n_regs;
+ arch_register_req_t const *const req = arch_get_irn_register_req(ou->nodes[0]);
+ unsigned const n_regs = req->cls->n_regs;
if (arch_register_req_is(req, limited)) {
- rawbs_base_t const* limited = req->limited;
+ unsigned const* limited = req->limited;
- for (idx = 0; idx != n_regs; ++idx) {
- if (bitset_is_set(ignore, idx))
+ for (unsigned idx = 0; idx != n_regs; ++idx) {
+ if (!bitset_is_set(allocatable_regs, idx))
continue;
if (!rbitset_is_set(limited, idx))
continue;
ou_insert_qnode(ou, new_qnode(ou, idx));
}
} else {
- for (idx = 0; idx != n_regs; ++idx) {
- if (bitset_is_set(ignore, idx))
+ for (unsigned idx = 0; idx != n_regs; ++idx) {
+ if (!bitset_is_set(allocatable_regs, idx))
continue;
ou_insert_qnode(ou, new_qnode(ou, idx));
}
/* search best */
+ qnode_t *curr;
for (;;) {
assert(!list_empty(&ou->queue));
/* get head of queue */
DBG((dbg, LEVEL_2, "\t Examine qnode color %d with cost %d\n", curr->color, curr->mis_costs));
/* try */
- if (qnode_try_color(curr))
+ if (qnode_try_color(curr, allocatable_regs, ifg))
break;
/* no success, so re-insert */
/* apply the best found qnode */
if (curr->mis_size >= 2) {
- node_stat_t *ns;
int root_col = qnode_get_new_color(curr, ou->nodes[0]);
DBG((dbg, LEVEL_1, "\t Best color: %d Costs: %d << %d << %d\n", curr->color, ou->min_nodes_costs, ou->all_nodes_costs - curr->mis_costs, ou->all_nodes_costs));
/* globally pin root and all args which have the same color */
pset_insert_ptr(pinned_global, ou->nodes[0]);
- for (i=1; i<ou->node_count; ++i) {
+ for (int i = 1; i < ou->node_count; ++i) {
ir_node *irn = ou->nodes[i];
int nc = qnode_get_new_color(curr, irn);
if (nc != NO_COLOR && nc == root_col)
}
/* set color of all changed nodes */
- for (ns = set_first(curr->changed_nodes); ns; ns = set_next(curr->changed_nodes)) {
+ foreach_set(curr->changed_nodes, node_stat_t, ns) {
/* NO_COLOR is possible, if we had an undo */
if (ns->new_color != NO_COLOR) {
DBG((dbg, LEVEL_1, "\t color(%+F) := %d\n", ns->irn, ns->new_color));
- set_irn_col(ou->co, ns->irn, ns->new_color);
+ set_irn_col(req->cls, ns->irn, ns->new_color);
}
}
}
*/
int co_solve_heuristic(copy_opt_t *co)
{
- unit_t *curr;
-
ASSERT_OU_AVAIL(co);
pinned_global = pset_new_ptr(SLOTS_PINNED_GLOBAL);
- list_for_each_entry(unit_t, curr, &co->units, units)
+ bitset_t const *const allocatable_regs = co->cenv->allocatable_regs;
+ be_ifg_t *const ifg = co->cenv->ifg;
+ list_for_each_entry(unit_t, curr, &co->units, units) {
if (curr->node_count > 1)
- ou_optimize(curr);
+ ou_optimize(curr, allocatable_regs, ifg);
+ }
del_pset(pinned_global);
return 0;
}
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur)
void be_init_copyheur(void)
{
static co_algo_info copyheur = {
be_register_copyopt("heur1", ©heur);
FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur");
}
-
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur);