#include "beirgmod.h"
#include "bearch.h"
#include "beuses_t.h"
+#include "benodesets.h"
#define DBG_LEVEL SET_LEVEL_0
const ir_node *bl;
const ir_node *irn;
unsigned next_use;
- int is_set;
} be_use_t;
struct _be_uses_t {
set *uses;
ir_graph *irg;
+ const be_lv_t *lv;
const arch_env_t *arch_env;
DEBUG_ONLY(firm_dbg_module_t *dbg;)
};
-static INLINE unsigned sadd(unsigned a, unsigned b)
-{
- return a + b;
-}
-
-static INLINE unsigned sdiv(unsigned a, unsigned b)
-{
- return a / b;
-}
-
static int cmp_use(const void *a, const void *b, size_t n)
{
const be_use_t *p = a;
}
static INLINE be_use_t *get_or_set_use(be_uses_t *uses,
- const ir_node *bl, const ir_node *irn, unsigned next_use)
+ const ir_node *bl, const ir_node *def, unsigned next_use)
{
- unsigned hash = HASH_COMBINE(HASH_PTR(bl), HASH_PTR(irn));
+ unsigned hash = HASH_COMBINE(nodeset_hash(bl), nodeset_hash(def));
be_use_t templ;
+ be_use_t* result;
templ.bl = bl;
- templ.irn = irn;
- templ.next_use = next_use;
- templ.is_set = 0;
- return set_insert(uses->uses, &templ, sizeof(templ), hash);
+ templ.irn = def;
+ templ.next_use = be_get_next_use(uses, sched_first(bl), 0, def, 0);
+ result = set_insert(uses->uses, &templ, sizeof(templ), hash);
+
+ return result;
}
unsigned be_get_next_use(be_uses_t *uses, const ir_node *from,
static unsigned get_next_use_bl(be_uses_t *uses, const ir_node *bl,
const ir_node *def)
{
- be_use_t *u;
-
- u = get_or_set_use(uses, bl, def, 0);
- if(!u->is_set) {
- u->is_set = 1;
- u->next_use = USES_INFINITY;
- u->next_use = be_get_next_use(uses, sched_first(bl), 0, def, 0);
- }
+ be_use_t *u = get_or_set_use(uses, bl, def, 0);
+
return u->next_use;
}
sched_foreach_from(from, irn) {
int i, n;
- if(!skip_from_uses) {
- for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ if (! skip_from_uses) {
+ for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *operand = get_irn_n(irn, i);
- if(operand == def) {
+ if (operand == def) {
DBG((uses->dbg, LEVEL_3, "found use of %+F at %+F\n", operand, irn));
return step;
}
step++;
}
+ /* FIXME: quick and dirty hack to prevent ignore nodes (like stack pointer) from being spilled */
+ return be_is_live_end(uses->lv, bl, def) ? step : USES_INFINITY;
+
next_use = USES_INFINITY;
foreach_block_succ(bl, succ_edge) {
const ir_node *succ_bl = succ_edge->src;
- if(get_irn_visited(succ_bl) < visited_nr && (is_live_in(succ_bl, def) || (get_irn_arity(succ_bl) > 1 && is_live_end(bl, def)))) {
+ if(get_irn_visited(succ_bl) < visited_nr && (be_is_live_in(uses->lv, succ_bl, def) || (get_irn_arity(succ_bl) > 1 && be_is_live_end(uses->lv, bl, def)))) {
unsigned next = get_next_use_bl(uses, succ_bl, def);
DBG((uses->dbg, LEVEL_2, "\t\tnext use in succ %+F: %d\n", succ_bl, next));
}
-be_uses_t *be_begin_uses(ir_graph *irg, const arch_env_t *arch_env, const arch_register_class_t *cls)
+be_uses_t *be_begin_uses(ir_graph *irg, const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls)
{
be_uses_t *uses = xmalloc(sizeof(uses[0]));
uses->arch_env = arch_env;
uses->uses = new_set(cmp_use, 512);
uses->irg = irg;
+ uses->lv = lv;
FIRM_DBG_REGISTER(uses->dbg, "firm.be.uses");
return uses;