Fixed size_t related warnings by isolating PTR_TO_INT macros.
[libfirm] / ir / be / beuses.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Methods to compute when a value will be used again.
23  * @author      Sebastian Hack, Matthias Braun
24  * @date        27.06.2005
25  * @version     $Id$
26  */
27 #include "config.h"
28
29 #include <limits.h>
30 #include <stdlib.h>
31
32 #include "config.h"
33 #include "obst.h"
34 #include "pmap.h"
35 #include "debug.h"
36
37 #include "irgwalk.h"
38 #include "irnode_t.h"
39 #include "ircons_t.h"
40 #include "irgraph_t.h"
41 #include "iredges_t.h"
42 #include "irdom_t.h"
43
44 #include "be_t.h"
45 #include "beutil.h"
46 #include "belive_t.h"
47 #include "benode.h"
48 #include "besched.h"
49 #include "beirgmod.h"
50 #include "bearch.h"
51 #include "beuses.h"
52
53 typedef struct be_use_t {
54         const ir_node *block;
55         const ir_node *node;
56         int outermost_loop;
57         unsigned next_use;
58         unsigned visited;
59 } be_use_t;
60
61 struct be_uses_t {
62         set *uses;
63         ir_graph *irg;
64         const be_lv_t *lv;
65         unsigned visited_counter;
66         DEBUG_ONLY(firm_dbg_module_t *dbg;)
67 };
68
69 static int cmp_use(const void *a, const void *b, size_t n)
70 {
71         const be_use_t *p = (const be_use_t*)a;
72         const be_use_t *q = (const be_use_t*)b;
73         (void) n;
74
75         return !(p->block == q->block && p->node == q->node);
76 }
77
78 static be_next_use_t get_next_use(be_uses_t *env, ir_node *from,
79                                                                   unsigned from_step, const ir_node *def,
80                                                                   int skip_from_uses);
81
82 static const be_use_t *get_or_set_use_block(be_uses_t *env,
83                                             const ir_node *block,
84                                             const ir_node *def)
85 {
86         unsigned hash = HASH_COMBINE(hash_irn(block), hash_irn(def));
87         be_use_t temp;
88         be_use_t* result;
89
90         temp.block = block;
91         temp.node = def;
92         result = (be_use_t*)set_find(env->uses, &temp, sizeof(temp), hash);
93
94         if (result == NULL) {
95                 // insert templ first as we might end in a loop in the get_next_use
96                 // call otherwise
97                 temp.next_use = USES_INFINITY;
98                 temp.outermost_loop = -1;
99                 temp.visited = 0;
100                 result = (be_use_t*)set_insert(env->uses, &temp, sizeof(temp), hash);
101         }
102
103         if (result->outermost_loop < 0 && result->visited < env->visited_counter) {
104                 be_next_use_t next_use;
105
106                 result->visited = env->visited_counter;
107                 next_use = get_next_use(env, sched_first(block), 0, def, 0);
108                 if (next_use.outermost_loop >= 0) {
109                         result->next_use = next_use.time;
110                         result->outermost_loop = next_use.outermost_loop;
111                         DBG((env->dbg, LEVEL_5, "Setting nextuse of %+F in block %+F to %u (outermostloop %d)\n", def, block, result->next_use, result->outermost_loop));
112                 }
113         }
114
115         return result;
116 }
117
118 static int be_is_phi_argument(const ir_node *block, const ir_node *def)
119 {
120         ir_node *node;
121         ir_node *succ_block = NULL;
122         int arity, i;
123
124 #if 1
125         if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) < 1)
126 #else
127         if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) != 1)
128 #endif
129                 return 0;
130
131         succ_block = get_first_block_succ(block);
132
133         arity = get_Block_n_cfgpreds(succ_block);
134         if (arity <= 1)
135                 return 0;
136
137         for (i = 0; i < arity; ++i) {
138                 if (get_Block_cfgpred_block(succ_block, i) == block)
139                         break;
140         }
141         assert(i < arity);
142
143         sched_foreach(succ_block, node) {
144                 ir_node *arg;
145
146                 if (!is_Phi(node))
147                         break;
148
149                 arg = get_irn_n(node, i);
150                 if (arg == def)
151                         return 1;
152         }
153
154         return 0;
155 }
156
157 /**
158  * Retrieve the scheduled index (the "step") of this node in its
159  * block.
160  *
161  * @param node  the node
162  */
163 static inline unsigned get_step(const ir_node *node)
164 {
165         return (unsigned)PTR_TO_INT(get_irn_link(node));
166 }
167
168 /**
169  * Set the scheduled index (the "step") of this node in its
170  * block.
171  *
172  * @param node  the node
173  * @param step  the scheduled index of the node
174  */
175 static inline void set_step(ir_node *node, unsigned step)
176 {
177         set_irn_link(node, INT_TO_PTR(step));
178 }
179
180 static be_next_use_t get_next_use(be_uses_t *env, ir_node *from,
181                                                                   unsigned from_step, const ir_node *def,
182                                                                   int skip_from_uses)
183 {
184         unsigned  step  = from_step;
185         ir_node  *block = get_nodes_block(from);
186         ir_node  *next_use;
187         ir_node  *node;
188         unsigned  timestep;
189         unsigned  next_use_step;
190         const ir_edge_t *edge;
191
192         assert(skip_from_uses == 0 || skip_from_uses == 1);
193         if (skip_from_uses) {
194                 from = sched_next(from);
195         }
196
197         next_use      = NULL;
198         next_use_step = INT_MAX;
199         timestep      = get_step(from);
200         foreach_out_edge(def, edge) {
201                 ir_node  *node = get_edge_src_irn(edge);
202                 unsigned  node_step;
203
204                 if (is_Anchor(node))
205                         continue;
206                 if (get_nodes_block(node) != block)
207                         continue;
208                 if (is_Phi(node))
209                         continue;
210
211                 node_step = get_step(node);
212                 if (node_step < timestep)
213                         continue;
214                 if (node_step < next_use_step) {
215                         next_use      = node;
216                         next_use_step = node_step;
217                 }
218         }
219
220         if (next_use != NULL) {
221                 be_next_use_t result;
222                 result.time           = next_use_step - timestep + skip_from_uses;
223                 result.outermost_loop = get_loop_depth(get_irn_loop(block));
224                 result.before         = next_use;
225                 return result;
226         }
227
228         node = sched_last(block);
229         step = get_step(node) + 1 + timestep + skip_from_uses;
230
231         if (be_is_phi_argument(block, def)) {
232                 // TODO we really should continue searching the uses of the phi,
233                 // as a phi isn't a real use that implies a reload (because we could
234                 // easily spill the whole phi)
235
236                 be_next_use_t result;
237                 result.time           = step;
238                 result.outermost_loop = get_loop_depth(get_irn_loop(block));
239                 result.before         = block;
240                 return result;
241         }
242
243         {
244         unsigned next_use   = USES_INFINITY;
245         int outermost_loop;
246         be_next_use_t result;
247         ir_loop *loop       = get_irn_loop(block);
248         int loopdepth       = get_loop_depth(loop);
249         int found_visited   = 0;
250         int found_use       = 0;
251         ir_graph *irg       = get_irn_irg(block);
252         ir_node *startblock = get_irg_start_block(irg);
253
254         result.before  = NULL;
255         outermost_loop = loopdepth;
256         foreach_block_succ(block, edge) {
257                 const be_use_t *use;
258                 const ir_node *succ_block = get_edge_src_irn(edge);
259                 ir_loop *succ_loop;
260                 unsigned use_dist;
261
262                 if (succ_block == startblock)
263                         continue;
264
265                 DBG((env->dbg, LEVEL_5, "Checking succ of block %+F: %+F (for use of %+F)\n", block, succ_block, def));
266                 if (!be_is_live_in(env->lv, succ_block, def)) {
267                         //next_use = USES_INFINITY;
268                         DBG((env->dbg, LEVEL_5, "   not live in\n"));
269                         continue;
270                 }
271
272                 use = get_or_set_use_block(env, succ_block, def);
273                 DBG((env->dbg, LEVEL_5, "Found %u (loopdepth %d) (we're in block %+F)\n", use->next_use,
274                                         use->outermost_loop, block));
275                 if (USES_IS_INFINITE(use->next_use)) {
276                         if (use->outermost_loop < 0) {
277                                 found_visited = 1;
278                         }
279                         continue;
280                 }
281
282                 found_use = 1;
283                 use_dist = use->next_use;
284
285                 succ_loop = get_irn_loop(succ_block);
286                 if (get_loop_depth(succ_loop) < loopdepth) {
287                         unsigned factor = (loopdepth - get_loop_depth(succ_loop)) * 5000;
288                         DBG((env->dbg, LEVEL_5, "Increase usestep because of loop out edge %d -> %d (%u)\n", factor));
289                         // TODO we should use the number of nodes in the loop or so...
290                         use_dist += factor;
291                 }
292
293                 if (use_dist < next_use) {
294                         next_use       = use_dist;
295                         outermost_loop = use->outermost_loop;
296                         result.before  = use->node;
297                 }
298         }
299
300         if (loopdepth < outermost_loop)
301                 outermost_loop = loopdepth;
302
303         result.time           = next_use + step;
304         result.outermost_loop = outermost_loop;
305
306         if (!found_use && found_visited) {
307                 // the current result is correct for the current search, but isn't
308                 // generally correct, so mark it
309                 result.outermost_loop = -1;
310         }
311         DBG((env->dbg, LEVEL_5, "Result: %d (outerloop: %d)\n", result.time, result.outermost_loop));
312         return result;
313         }
314 }
315
316 be_next_use_t be_get_next_use(be_uses_t *env, ir_node *from,
317                          unsigned from_step, const ir_node *def,
318                          int skip_from_uses)
319 {
320         env->visited_counter++;
321         return get_next_use(env, from, from_step, def, skip_from_uses);
322 }
323
324 /**
325  * Pre-block walker, set the step number for every scheduled node
326  * in increasing order.
327  *
328  * After this, two scheduled nodes can be easily compared for the
329  * "scheduled earlier in block" property.
330  */
331 static void set_sched_step_walker(ir_node *block, void *data)
332 {
333         ir_node  *node;
334         unsigned step = 0;
335         (void) data;
336
337         sched_foreach(block, node) {
338                 set_step(node, step);
339                 if (is_Phi(node))
340                         continue;
341                 ++step;
342         }
343 }
344
345 be_uses_t *be_begin_uses(ir_graph *irg, const be_lv_t *lv)
346 {
347         be_uses_t *env = XMALLOC(be_uses_t);
348
349         edges_assure(irg);
350
351         //set_using_irn_link(irg);
352
353         /* precalculate sched steps */
354         irg_block_walk_graph(irg, set_sched_step_walker, NULL, NULL);
355
356         env->uses = new_set(cmp_use, 512);
357         env->irg = irg;
358         env->lv = lv;
359         env->visited_counter = 0;
360         FIRM_DBG_REGISTER(env->dbg, "firm.be.uses");
361
362         return env;
363 }
364
365 void be_end_uses(be_uses_t *env)
366 {
367         //clear_using_irn_link(env->irg);
368         del_set(env->uses);
369         free(env);
370 }