-/**
- * Author: Daniel Grund
- * Date: 12.04.2005
- * Copyright: (c) Universitaet Karlsruhe
- * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+/**
+ * @file
+ * @brief First simple copy minimization heuristics.
+ * @author Daniel Grund
+ * @date 12.04.2005
+ * @version $Id$
+ *
* Heuristic for minimizing copies using a queue which holds 'qnodes' not yet
* examined. A qnode has a 'target color', nodes out of the opt unit and
* a 'conflict graph'. 'Conflict graph' = "Interference graph' + 'conflict edges'
- * A 'max indep set' is determined form these. We try to color this mis using a
+ * A 'max indep set' is determined from these. We try to color this mis using a
* color-exchanging mechanism. Occuring conflicts are modeled with 'conflict edges'
* and the qnode is reinserted in the queue. The first qnode colored without
* conflicts is the best one.
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
-
-#ifdef HAVE_ALLOCA_H
-#include <alloca.h>
-#endif
-#ifdef HAVE_MALLOC_H
-#include <malloc.h>
-#endif
#include "debug.h"
+#include "bitset.h"
+#include "raw_bitset.h"
#include "xmalloc.h"
+
#include "becopyopt_t.h"
#include "becopystat.h"
-#include "benodesets.h"
-#include "bitset.h"
-#include "raw_bitset.h"
+#include "beintlive_t.h"
+#include "beirg_t.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
#define SLOTS_CHANGED_NODES 32
#define list_entry_queue(lh) list_entry(lh, qnode_t, queue)
-#define HASH_CONFLICT(c) (nodeset_hash(c.n1) ^ nodeset_hash(c.n2))
+#define HASH_CONFLICT(c) (hash_irn(c.n1) ^ hash_irn(c.n2))
/**
* Modeling additional conflicts between nodes. NOT live range interference
*/
typedef struct _node_stat_t {
ir_node *irn;
- int new_color;
- int pinned_local :1;
+ int new_color;
+ int pinned_local :1;
} node_stat_t;
/**
* Represents a node in the optimization queue.
*/
typedef struct _qnode_t {
- struct list_head queue; /**< chaining of unit_t->queue */
- const unit_t *ou; /**< the opt unit this qnode belongs to */
- int color; /**< target color */
- set *conflicts; /**< contains conflict_t's. All internal conflicts */
- int mis_costs; /**< costs of nodes/copies in the mis. */
- int mis_size; /**< size of the array below */
- ir_node **mis; /**< the nodes of unit_t->nodes[] being part of the max independent set */
- set *changed_nodes; /**< contains node_stat_t's. */
+ struct list_head queue; /**< chaining of unit_t->queue */
+ const unit_t *ou; /**< the opt unit this node belongs to */
+ int color; /**< target color */
+ set *conflicts; /**< contains conflict_t's. All internal conflicts */
+ int mis_costs; /**< costs of nodes/copies in the mis. */
+ int mis_size; /**< size of the array below */
+ ir_node **mis; /**< the nodes of unit_t->nodes[] being part of the max independent set */
+ set *changed_nodes; /**< contains node_stat_t's. */
} qnode_t;
static pset *pinned_global; /**< optimized nodes should not be altered any more */
-static INLINE int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
+static inline int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
{
- if(env->ifg)
+ if (env->ifg)
return be_ifg_connected(env->ifg, a, b);
else
- return values_interfere(env->birg->lv, a, b);
+ return values_interfere(env->birg, a, b);
}
static int set_cmp_conflict_t(const void *x, const void *y, size_t size) {
const conflict_t *xx = x;
const conflict_t *yy = y;
- return ! (xx->n1 == yy->n1 && xx->n2 == yy->n2);
+ (void) size;
+
+ return xx->n1 != yy->n1 || xx->n2 != yy->n2;
}
/**
* If a local pinned conflict occurs, a new edge in the conflict graph is added.
* The next maximum independent set build, will regard it.
*/
-static INLINE void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
+static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
conflict_t c;
DBG((dbg, LEVEL_4, "\t %+F -- %+F\n", n1, n2));
- if ((int)n1 < (int)n2) {
+ if (get_irn_idx(n1) < get_irn_idx(n2)) {
c.n1 = n1;
c.n2 = n2;
} else {
/**
* Checks if two nodes are in a conflict.
*/
-static INLINE int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
+static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
conflict_t c;
/* search for live range interference */
if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2))
return 1;
/* search for recoloring conflicts */
- if ((int)n1 < (int)n2) {
+ if (get_irn_idx(n1) < get_irn_idx(n2)) {
c.n1 = n1;
c.n2 = n2;
} else {
c.n1 = n2;
c.n2 = n1;
}
- return (int) set_find(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c));
+ return set_find(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0;
}
static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) {
- return ((node_stat_t *)x)->irn != ((node_stat_t *)y)->irn;
+ (void) size;
+ return ((const node_stat_t*)x)->irn != ((const node_stat_t*)y)->irn;
}
/**
* Finds a node status entry of a node if existent. Otherwise return NULL
*/
-static INLINE node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) {
+static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) {
node_stat_t find;
find.irn = irn;
- return set_find(qn->changed_nodes, &find, sizeof(find), nodeset_hash(irn));
+ return set_find(qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
}
/**
* Finds a node status entry of a node if existent. Otherwise it will return
* an initialized new entry for this node.
*/
-static INLINE node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) {
+static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) {
node_stat_t find;
find.irn = irn;
find.new_color = NO_COLOR;
find.pinned_local = 0;
- return set_insert(qn->changed_nodes, &find, sizeof(find), nodeset_hash(irn));
+ return set_insert(qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
}
/**
* Returns the virtual color of a node if set before, else returns the real color.
*/
-static INLINE int qnode_get_new_color(const qnode_t *qn, ir_node *irn) {
- node_stat_t *found = qnode_find_node(qn, irn);
+static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn) {
+ const node_stat_t *found = qnode_find_node(qn, irn);
if (found)
return found->new_color;
else
- return get_irn_col(qn->ou->co, irn);
+ return get_irn_col(irn);
}
/**
* Sets the virtual color of a node.
*/
-static INLINE void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) {
+static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) {
node_stat_t *found = qnode_find_or_insert_node(qn, irn);
found->new_color = color;
DBG((dbg, LEVEL_3, "\t col(%+F) := %d\n", irn, color));
* to the same optimization unit and has been optimized before the current
* processed node.
*/
-static INLINE int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) {
- node_stat_t *found = qnode_find_node(qn, irn);
+static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) {
+ const node_stat_t *found = qnode_find_node(qn, irn);
if (found)
return found->pinned_local;
else
* Local-pins a node, so optimizations of further nodes of the same opt unit
* can handle situations in which a color change would undo prior optimizations.
*/
-static INLINE void qnode_pin_local(const qnode_t *qn, ir_node *irn) {
+static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn) {
node_stat_t *found = qnode_find_or_insert_node(qn, irn);
found->pinned_local = 1;
if (found->new_color == NO_COLOR)
- found->new_color = get_irn_col(qn->ou->co, irn);
+ found->new_color = get_irn_col(irn);
}
*/
#define CHANGE_SAVE NULL
#define CHANGE_IMPOSSIBLE (ir_node *)1
-#define is_conflicting_node(n) (((int)n) > 1)
/**
* Performs virtual re-coloring of node @p n to color @p col. Virtual colors of
copy_opt_t *co = qn->ou->co;
const be_chordal_env_t *chordal_env = co->cenv;
const arch_register_class_t *cls = co->cls;
- const arch_env_t *arch_env = co->aenv;
int irn_col = qnode_get_new_color(qn, irn);
ir_node *sub_res, *curr;
be_ifg_t *ifg = chordal_env->ifg;
bitset_flip_all(free_cols);
/* Exclude colors not assignable to the irn */
- req = arch_get_register_req(arch_env, irn, -1);
+ req = arch_get_register_req_out(irn);
if (arch_register_req_is(req, limited)) {
bitset_t *limited = bitset_alloca(cls->n_regs);
rbitset_copy_to_bitset(req->limited, limited);
#endif /* SEARCH_FREE_COLORS */
/* If target color is not allocatable changing color is impossible */
- if (!arch_reg_is_allocatable(arch_env, irn, -1, arch_register_for_index(cls, col))) {
+ if (!arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, col))) {
DBG((dbg, LEVEL_3, "\t %+F impossible\n", irn));
return CHANGE_IMPOSSIBLE;
}
} else if (confl_node == CHANGE_IMPOSSIBLE) {
DBG((dbg, LEVEL_3, "\t Impossible --> remove from qnode\n"));
qnode_add_conflict(qn, test_node, test_node);
+ return 0;
} else {
if (qnode_is_pinned_local(qn, confl_node)) {
/* changing test_node would change back a node of current ou */
DBG((dbg, LEVEL_3, "\t Conflicting global --> remove from qnode\n"));
qnode_add_conflict(qn, test_node, test_node);
}
- }
-
- if (confl_node != CHANGE_SAVE)
return 0;
+ }
}
return 1;
}
* Determines a maximum weighted independent set with respect to
* the interference and conflict edges of all nodes in a qnode.
*/
-static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
+static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
bitset_t *curr, *best;
- int max, next, pos, curr_weight, best_weight = 0;
+ bitset_pos_t pos;
+ int max, next, curr_weight, best_weight = 0;
/* assign the nodes into two groups.
* safe: node has no interference, hence it is in every max stable set.
* unsafe: node has an interference
*/
- safe = alloca((ou->node_count-1) * sizeof(*safe));
- safe_costs = 0;
- safe_count = 0;
- unsafe = alloca((ou->node_count-1) * sizeof(*unsafe));
- unsafe_costs = alloca((ou->node_count-1) * sizeof(*unsafe_costs));
+ safe = ALLOCAN(ir_node*, ou->node_count - 1);
+ safe_costs = 0;
+ safe_count = 0;
+ unsafe = ALLOCAN(ir_node*, ou->node_count - 1);
+ unsafe_costs = ALLOCAN(int, ou->node_count - 1);
unsafe_count = 0;
for(i=1; i<ou->node_count; ++i) {
int is_safe = 1;
/**
* Creates a new qnode
*/
-static INLINE qnode_t *new_qnode(const unit_t *ou, int color) {
- qnode_t *qn = xmalloc(sizeof(*qn));
- qn->ou = ou;
- qn->color = color;
- qn->mis = xmalloc(ou->node_count * sizeof(*qn->mis));
- qn->conflicts = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS);
+static inline qnode_t *new_qnode(const unit_t *ou, int color) {
+ qnode_t *qn = XMALLOC(qnode_t);
+ qn->ou = ou;
+ qn->color = color;
+ qn->mis = XMALLOCN(ir_node*, ou->node_count);
+ qn->conflicts = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS);
qn->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
return qn;
}
/**
* Frees space used by a queue node
*/
-static INLINE void free_qnode(qnode_t *qn) {
+static inline void free_qnode(qnode_t *qn) {
del_set(qn->conflicts);
del_set(qn->changed_nodes);
xfree(qn->mis);
* Inserts a qnode in the sorted queue of the optimization unit. Queue is
* ordered by field 'size' (the size of the mis) in decreasing order.
*/
-static INLINE void ou_insert_qnode(unit_t *ou, qnode_t *qn) {
+static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn) {
struct list_head *lh;
if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) {
* nodes. (All other phi classes are reduced to this case.)
*/
static void ou_optimize(unit_t *ou) {
- int i;
- qnode_t *curr = NULL, *tmp;
- const arch_env_t *aenv = ou->co->aenv;
- const arch_register_class_t *cls = ou->co->cls;
- bitset_t *pos_regs = bitset_alloca(cls->n_regs);
+ qnode_t *curr = NULL;
+ qnode_t *tmp;
+ const arch_register_req_t *req;
+ bitset_t const* ignore;
+ bitset_pos_t n_regs;
+ bitset_pos_t idx;
+ int i;
DBG((dbg, LEVEL_1, "\tOptimizing unit:\n"));
for (i=0; i<ou->node_count; ++i)
/* init queue */
INIT_LIST_HEAD(&ou->queue);
- arch_get_allocatable_regs(aenv, ou->nodes[0], -1, pos_regs);
+ req = arch_get_register_req_out(ou->nodes[0]);
+ ignore = ou->co->cenv->ignore_colors;
+ n_regs = req->cls->n_regs;
+ if (arch_register_req_is(req, limited)) {
+ rawbs_base_t const* limited = req->limited;
- /* exclude ingore colors */
- bitset_andnot(pos_regs, ou->co->cenv->ignore_colors);
+ for (idx = 0; idx != n_regs; ++idx) {
+ if (bitset_is_set(ignore, idx))
+ continue;
+ if (!rbitset_is_set(limited, idx))
+ continue;
- assert(bitset_popcnt(pos_regs) != 0 && "No register is allowed for this node !!?");
+ ou_insert_qnode(ou, new_qnode(ou, idx));
+ }
+ } else {
+ for (idx = 0; idx != n_regs; ++idx) {
+ if (bitset_is_set(ignore, idx))
+ continue;
- /* create new qnode */
- bitset_foreach(pos_regs, i)
- ou_insert_qnode(ou, new_qnode(ou, i));
+ ou_insert_qnode(ou, new_qnode(ou, idx));
+ }
+ }
/* search best */
- while (!list_empty(&ou->queue)) {
+ for (;;) {
+ assert(!list_empty(&ou->queue));
/* get head of queue */
curr = list_entry_queue(ou->queue.next);
list_del(&curr->queue);