4 * @author Sebastian Hack
6 * Methods to compute when a value will be used again.
8 * Copyright (C) 2005 Universitaet Karlsruhe
9 * Released under the GPL
23 #include "irgraph_t.h"
24 #include "iredges_t.h"
31 #include "besched_t.h"
36 #define DBG_LEVEL SET_LEVEL_0
38 typedef struct _be_use_t {
47 const arch_env_t *arch_env;
48 DEBUG_ONLY(firm_dbg_module_t *dbg;)
51 static INLINE unsigned sadd(unsigned a, unsigned b)
56 static INLINE unsigned sdiv(unsigned a, unsigned b)
61 static int cmp_use(const void *a, const void *b, size_t n)
63 const be_use_t *p = a;
64 const be_use_t *q = b;
65 return !(p->bl == q->bl && p->irn == q->irn);
68 static INLINE be_use_t *get_or_set_use(be_uses_t *uses,
69 const ir_node *bl, const ir_node *def, unsigned next_use)
71 unsigned hash = HASH_COMBINE(HASH_PTR(bl), HASH_PTR(def));
77 templ.next_use = be_get_next_use(uses, sched_first(bl), 0, def, 0);
78 result = set_insert(uses->uses, &templ, sizeof(templ), hash);
83 unsigned be_get_next_use(be_uses_t *uses, const ir_node *from,
84 unsigned from_step, const ir_node *def, int skip_from_uses);
86 static unsigned get_next_use_bl(be_uses_t *uses, const ir_node *bl,
89 be_use_t *u = get_or_set_use(uses, bl, def, 0);
94 static unsigned get_next_use(be_uses_t *uses, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses, unsigned long visited_nr)
96 unsigned next_use = USES_INFINITY;
97 unsigned step = from_step;
99 ir_node *bl = get_nodes_block(from);
101 const ir_edge_t *succ_edge;
103 set_irn_visited(bl, visited_nr);
105 sched_foreach_from(from, irn) {
108 if (! skip_from_uses) {
109 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
110 ir_node *operand = get_irn_n(irn, i);
112 if (operand == def) {
113 DBG((uses->dbg, LEVEL_3, "found use of %+F at %+F\n", operand, irn));
123 /* FIXME: quick and dirty hack to prevent ignore nodes (like stack pointer) from being spilled */
124 return is_live_end(bl, def) ? step : USES_INFINITY;
126 next_use = USES_INFINITY;
127 foreach_block_succ(bl, succ_edge) {
128 const ir_node *succ_bl = succ_edge->src;
129 if(get_irn_visited(succ_bl) < visited_nr && (is_live_in(succ_bl, def) || (get_irn_arity(succ_bl) > 1 && is_live_end(bl, def)))) {
130 unsigned next = get_next_use_bl(uses, succ_bl, def);
132 DBG((uses->dbg, LEVEL_2, "\t\tnext use in succ %+F: %d\n", succ_bl, next));
133 next_use = MIN(next_use, next);
138 return next_use + step;
141 unsigned be_get_next_use(be_uses_t *uses, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
143 unsigned long visited_nr = get_irg_visited(uses->irg) + 1;
145 set_irg_visited(uses->irg, visited_nr);
146 return get_next_use(uses, from, from_step, def, skip_from_uses, visited_nr);
150 be_uses_t *be_begin_uses(ir_graph *irg, const arch_env_t *arch_env, const arch_register_class_t *cls)
152 be_uses_t *uses = xmalloc(sizeof(uses[0]));
156 uses->arch_env = arch_env;
157 uses->uses = new_set(cmp_use, 512);
159 FIRM_DBG_REGISTER(uses->dbg, "firm.be.uses");
164 void be_end_uses(be_uses_t *uses)
170 int loc_compare(const void *a, const void *b)
174 return p->time - q->time;