4 * @author Sebastian Hack, Matthias Braun
6 * Methods to compute when a value will be used again.
8 * Copyright (C) 2005 Universitaet Karlsruhe
9 * Released under the GPL
26 #include "irgraph_t.h"
27 #include "iredges_t.h"
34 #include "besched_t.h"
38 #include "benodesets.h"
40 #define SCAN_INTERBLOCK_USES
42 typedef struct _be_use_t {
51 const ir_exec_freq *execfreqs;
53 DEBUG_ONLY(firm_dbg_module_t *dbg;)
56 static int cmp_use(const void *a, const void *b, size_t n)
58 const be_use_t *p = a;
59 const be_use_t *q = b;
60 return !(p->block == q->block && p->node == q->node);
63 static const be_use_t *get_or_set_use_block(be_uses_t *uses,
67 unsigned hash = HASH_COMBINE(nodeset_hash(block), nodeset_hash(def));
73 result = set_find(uses->uses, &temp, sizeof(temp), hash);
76 // insert templ first as we might end in a loop in the get_next_use
78 temp.next_use = USES_INFINITY;
79 result = set_insert(uses->uses, &temp, sizeof(temp), hash);
81 result->next_use = be_get_next_use(uses, sched_first(block), 0, def, 0);
87 static int is_real_use(const ir_node *node)
91 /* we don't check for phi loops yet, so don't enable this
99 unsigned be_get_next_use(be_uses_t *uses, const ir_node *from,
100 unsigned from_step, const ir_node *def,
103 unsigned step = from_step;
104 ir_node *block = get_nodes_block(from);
106 const ir_edge_t *edge;
110 from = sched_next(from);
113 sched_foreach_from(from, node) {
116 arity = get_irn_arity(node);
117 for (i = 0; i < arity; ++i) {
118 const ir_node *operand = get_irn_n(node, i);
120 if (operand == def) {
121 DBG((uses->dbg, LEVEL_3, "found use of %+F at %+F\n", operand, node));
123 if(!is_real_use(node)) {
124 return be_get_next_use(uses, node, step, node, 1);
134 if(be_is_live_end(uses->lv, block, def)) {
135 // TODO we really should continue searching the uses of the phi,
136 // as a phi isn't a real use that implies a reload (because we could
137 // easily spill the whole phi)
141 #ifdef SCAN_INTERBLOCK_USES
143 double best_execfreq = -1;
144 unsigned next_use = USES_INFINITY;
146 foreach_block_succ(block, edge) {
148 const ir_node *succ_block = get_edge_src_irn(edge);
149 double execfreq = get_block_execfreq(uses->execfreqs, succ_block);
151 //execfreq_sum += execfreq;
153 if(execfreq > best_execfreq) {
154 best_execfreq = execfreq;
156 if(!be_is_live_in(uses->lv, succ_block, def)) {
157 next_use = USES_INFINITY;
161 use = get_or_set_use_block(uses, succ_block, def);
162 //if(USES_IS_INFINITE(use->next_use))
165 next_use = use->next_use;
168 //next_use += use->next_use / execfreq;
172 return USES_INFINITY;*/
174 //next_use /= execfreq_sum;
176 return ((unsigned) next_use) + step;
179 return USES_INFINITY;
183 be_uses_t *be_begin_uses(ir_graph *irg, const ir_exec_freq *execfreqs, const be_lv_t *lv)
185 be_uses_t *uses = xmalloc(sizeof(uses[0]));
189 uses->uses = new_set(cmp_use, 512);
191 uses->execfreqs = execfreqs;
193 FIRM_DBG_REGISTER(uses->dbg, "firm.be.uses");
198 void be_end_uses(be_uses_t *uses)