#include "config.h"
#endif
-#ifdef HAVE_ALLOCA_H
-#include <alloca.h>
-#endif
-#ifdef HAVE_MALLOC_H
-#include <malloc.h>
-#endif
-
#include "debug.h"
#include "xmalloc.h"
#include "becopyopt_t.h"
#include "becopystat.h"
+#include "benodesets.h"
#include "bitset.h"
-#include "bearch.h"
+#include "raw_bitset.h"
+#include "xmalloc.h"
-static firm_dbg_module_t *dbg = NULL;
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
#define SEARCH_FREE_COLORS
-#define SLOTS_PINNED_GLOBAL 256
+#define SLOTS_PINNED_GLOBAL 64
#define SLOTS_CONFLICTS 8
#define SLOTS_CHANGED_NODES 32
-#define MIN(a,b) ((a<b)?(a):(b))
#define list_entry_queue(lh) list_entry(lh, qnode_t, queue)
-#define HASH_CONFLICT(c) (HASH_PTR(c.n1) ^ HASH_PTR(c.n2))
+#define HASH_CONFLICT(c) (nodeset_hash(c.n1) ^ nodeset_hash(c.n2))
/**
* Modeling additional conflicts between nodes. NOT live range interference
static pset *pinned_global; /**< optimized nodes should not be altered any more */
+static INLINE int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
+{
+ if(env->ifg)
+ return be_ifg_connected(env->ifg, a, b);
+ else
+ return values_interfere(env->birg->lv, a, b);
+}
+
static int set_cmp_conflict_t(const void *x, const void *y, size_t size) {
const conflict_t *xx = x;
const conflict_t *yy = y;
static INLINE node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) {
node_stat_t find;
find.irn = irn;
- return set_find(qn->changed_nodes, &find, sizeof(find), HASH_PTR(irn));
+ return set_find(qn->changed_nodes, &find, sizeof(find), nodeset_hash(irn));
}
/**
find.irn = irn;
find.new_color = NO_COLOR;
find.pinned_local = 0;
- return set_insert(qn->changed_nodes, &find, sizeof(find), HASH_PTR(irn));
+ return set_insert(qn->changed_nodes, &find, sizeof(find), nodeset_hash(irn));
}
/**
*/
if (irn != trigger) {
bitset_t *free_cols = bitset_alloca(cls->n_regs);
- arch_register_req_t req;
+ const arch_register_req_t *req;
ir_node *curr;
int free_col;
/* Get all possible colors */
- arch_put_non_ignore_regs(arch_env, cls, free_cols);
+ bitset_copy(free_cols, co->cenv->ignore_colors);
+ bitset_flip_all(free_cols);
/* Exclude colors not assignable to the irn */
- arch_get_register_req(arch_env, &req, irn, -1);
- if (arch_register_req_is(&req, limited)) {
+ req = arch_get_register_req(arch_env, irn, -1);
+ if (arch_register_req_is(req, limited)) {
bitset_t *limited = bitset_alloca(cls->n_regs);
- req.limited(req.limited_env, limited);
+ rbitset_copy_to_bitset(req->limited, limited);
bitset_and(free_cols, limited);
}
qnode_t *qn = xmalloc(sizeof(*qn));
qn->ou = ou;
qn->color = color;
- qn->mis = malloc(ou->node_count * sizeof(*qn->mis));
+ qn->mis = xmalloc(ou->node_count * sizeof(*qn->mis));
qn->conflicts = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS);
qn->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
return qn;
const arch_env_t *aenv = ou->co->aenv;
const arch_register_class_t *cls = ou->co->cls;
bitset_t *pos_regs = bitset_alloca(cls->n_regs);
- bitset_t *ign_regs = bitset_alloca(cls->n_regs);
DBG((dbg, LEVEL_1, "\tOptimizing unit:\n"));
for (i=0; i<ou->node_count; ++i)
arch_get_allocatable_regs(aenv, ou->nodes[0], -1, pos_regs);
/* exclude ingore colors */
- arch_put_non_ignore_regs(aenv, cls, ign_regs);
- bitset_and(pos_regs, ign_regs);
+ bitset_andnot(pos_regs, ou->co->cenv->ignore_colors);
+
+ assert(bitset_popcnt(pos_regs) != 0 && "No register is allowed for this node !!?");
/* create new qnode */
bitset_foreach(pos_regs, i)
int co_solve_heuristic(copy_opt_t *co) {
unit_t *curr;
- dbg = firm_dbg_register("ir.be.copyoptheur");
+ FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur");
+
+ ASSERT_OU_AVAIL(co);
pinned_global = pset_new_ptr(SLOTS_PINNED_GLOBAL);
list_for_each_entry(unit_t, curr, &co->units, units)
ou_optimize(curr);
del_pset(pinned_global);
-
return 0;
}