2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Methods to compute when a value will be used again.
23 * @author Sebastian Hack, Matthias Braun
40 #include "irgraph_t.h"
41 #include "iredges_t.h"
53 typedef struct be_use_t {
65 unsigned visited_counter;
66 DEBUG_ONLY(firm_dbg_module_t *dbg;)
69 static int cmp_use(const void *a, const void *b, size_t n)
71 const be_use_t *p = (const be_use_t*)a;
72 const be_use_t *q = (const be_use_t*)b;
75 return !(p->block == q->block && p->node == q->node);
78 static be_next_use_t get_next_use(be_uses_t *env, ir_node *from,
79 unsigned from_step, const ir_node *def,
82 static const be_use_t *get_or_set_use_block(be_uses_t *env,
86 unsigned hash = HASH_COMBINE(hash_irn(block), hash_irn(def));
92 result = (be_use_t*)set_find(env->uses, &temp, sizeof(temp), hash);
95 // insert templ first as we might end in a loop in the get_next_use
97 temp.next_use = USES_INFINITY;
98 temp.outermost_loop = -1;
100 result = (be_use_t*)set_insert(env->uses, &temp, sizeof(temp), hash);
103 if (result->outermost_loop < 0 && result->visited < env->visited_counter) {
104 be_next_use_t next_use;
106 result->visited = env->visited_counter;
107 next_use = get_next_use(env, sched_first(block), 0, def, 0);
108 if (next_use.outermost_loop >= 0) {
109 result->next_use = next_use.time;
110 result->outermost_loop = next_use.outermost_loop;
111 DBG((env->dbg, LEVEL_5, "Setting nextuse of %+F in block %+F to %u (outermostloop %d)\n", def, block, result->next_use, result->outermost_loop));
118 static int be_is_phi_argument(const ir_node *block, const ir_node *def)
121 ir_node *succ_block = NULL;
125 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) < 1)
127 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) != 1)
131 succ_block = get_first_block_succ(block);
133 arity = get_Block_n_cfgpreds(succ_block);
137 for (i = 0; i < arity; ++i) {
138 if (get_Block_cfgpred_block(succ_block, i) == block)
143 sched_foreach(succ_block, node) {
149 arg = get_irn_n(node, i);
158 * Retrieve the scheduled index (the "step") of this node in its
161 * @param node the node
163 static inline unsigned get_step(const ir_node *node)
165 return (unsigned)PTR_TO_INT(get_irn_link(node));
169 * Set the scheduled index (the "step") of this node in its
172 * @param node the node
173 * @param step the scheduled index of the node
175 static inline void set_step(ir_node *node, unsigned step)
177 set_irn_link(node, INT_TO_PTR(step));
180 static be_next_use_t get_next_use(be_uses_t *env, ir_node *from,
181 unsigned from_step, const ir_node *def,
184 unsigned step = from_step;
185 ir_node *block = get_nodes_block(from);
189 unsigned next_use_step;
190 const ir_edge_t *edge;
192 assert(skip_from_uses == 0 || skip_from_uses == 1);
193 if (skip_from_uses) {
194 from = sched_next(from);
198 next_use_step = INT_MAX;
199 timestep = get_step(from);
200 foreach_out_edge(def, edge) {
201 ir_node *node = get_edge_src_irn(edge);
206 if (get_nodes_block(node) != block)
211 node_step = get_step(node);
212 if (node_step < timestep)
214 if (node_step < next_use_step) {
216 next_use_step = node_step;
220 if (next_use != NULL) {
221 be_next_use_t result;
222 result.time = next_use_step - timestep + skip_from_uses;
223 result.outermost_loop = get_loop_depth(get_irn_loop(block));
224 result.before = next_use;
228 node = sched_last(block);
229 step = get_step(node) + 1 + timestep + skip_from_uses;
231 if (be_is_phi_argument(block, def)) {
232 // TODO we really should continue searching the uses of the phi,
233 // as a phi isn't a real use that implies a reload (because we could
234 // easily spill the whole phi)
236 be_next_use_t result;
238 result.outermost_loop = get_loop_depth(get_irn_loop(block));
239 result.before = block;
244 unsigned next_use = USES_INFINITY;
246 be_next_use_t result;
247 ir_loop *loop = get_irn_loop(block);
248 int loopdepth = get_loop_depth(loop);
249 int found_visited = 0;
251 ir_graph *irg = get_irn_irg(block);
252 ir_node *startblock = get_irg_start_block(irg);
254 result.before = NULL;
255 outermost_loop = loopdepth;
256 foreach_block_succ(block, edge) {
258 const ir_node *succ_block = get_edge_src_irn(edge);
262 if (succ_block == startblock)
265 DBG((env->dbg, LEVEL_5, "Checking succ of block %+F: %+F (for use of %+F)\n", block, succ_block, def));
266 if (!be_is_live_in(env->lv, succ_block, def)) {
267 //next_use = USES_INFINITY;
268 DBG((env->dbg, LEVEL_5, " not live in\n"));
272 use = get_or_set_use_block(env, succ_block, def);
273 DBG((env->dbg, LEVEL_5, "Found %u (loopdepth %d) (we're in block %+F)\n", use->next_use,
274 use->outermost_loop, block));
275 if (USES_IS_INFINITE(use->next_use)) {
276 if (use->outermost_loop < 0) {
283 use_dist = use->next_use;
285 succ_loop = get_irn_loop(succ_block);
286 if (get_loop_depth(succ_loop) < loopdepth) {
287 unsigned factor = (loopdepth - get_loop_depth(succ_loop)) * 5000;
288 DBG((env->dbg, LEVEL_5, "Increase usestep because of loop out edge %d -> %d (%u)\n", factor));
289 // TODO we should use the number of nodes in the loop or so...
293 if (use_dist < next_use) {
295 outermost_loop = use->outermost_loop;
296 result.before = use->node;
300 if (loopdepth < outermost_loop)
301 outermost_loop = loopdepth;
303 result.time = next_use + step;
304 result.outermost_loop = outermost_loop;
306 if (!found_use && found_visited) {
307 // the current result is correct for the current search, but isn't
308 // generally correct, so mark it
309 result.outermost_loop = -1;
311 DBG((env->dbg, LEVEL_5, "Result: %d (outerloop: %d)\n", result.time, result.outermost_loop));
316 be_next_use_t be_get_next_use(be_uses_t *env, ir_node *from,
317 unsigned from_step, const ir_node *def,
320 env->visited_counter++;
321 return get_next_use(env, from, from_step, def, skip_from_uses);
325 * Pre-block walker, set the step number for every scheduled node
326 * in increasing order.
328 * After this, two scheduled nodes can be easily compared for the
329 * "scheduled earlier in block" property.
331 static void set_sched_step_walker(ir_node *block, void *data)
337 sched_foreach(block, node) {
338 set_step(node, step);
345 be_uses_t *be_begin_uses(ir_graph *irg, const be_lv_t *lv)
347 be_uses_t *env = XMALLOC(be_uses_t);
351 //set_using_irn_link(irg);
353 /* precalculate sched steps */
354 irg_block_walk_graph(irg, set_sched_step_walker, NULL, NULL);
356 env->uses = new_set(cmp_use, 512);
359 env->visited_counter = 0;
360 FIRM_DBG_REGISTER(env->dbg, "firm.be.uses");
365 void be_end_uses(be_uses_t *env)
367 //clear_using_irn_link(env->irg);