identifiers starting with _ are reserved; remove this bad practice
[libfirm] / ir / be / beschedregpress.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Register pressure node selector.
23  * @author      Sebastian Hack
24  * @date        29.08.2006
25  * @version     $Id$
26  */
27 #include "config.h"
28
29 #include <stdlib.h>
30
31 #include "iredges_t.h"
32 #include "irgwalk.h"
33 #include "irtools.h"
34
35 #include "besched.h"
36 #include "belistsched.h"
37 #include "benode.h"
38
39
40 typedef struct usage_stats_t {
41         ir_node *irn;
42         struct usage_stats_t *next;
43         int max_hops;
44         int uses_in_block;      /**< Number of uses inside the current block. */
45         int already_consumed;   /**< Number of insns using this value already
46                                                         scheduled. */
47 } usage_stats_t;
48
49 typedef struct {
50         const list_sched_selector_t *vtab;
51 } reg_pressure_main_env_t;
52
53 typedef struct {
54         struct obstack obst;
55         const reg_pressure_main_env_t *main_env;
56         usage_stats_t *root;
57         ir_nodeset_t already_scheduled;
58 } reg_pressure_selector_env_t;
59
60
61 #if 0
62 /*
63 * Ugly global variable for the compare function
64 * since qsort(3) does not pass an extra pointer.
65 */
66 static ir_node *curr_bl = NULL;
67
68 static int cmp_usage(const void *a, const void *b)
69 {
70         struct trivial_sched_env *env;
71         const ir_node *p = a;
72         const ir_node *q = b;
73         int res = 0;
74
75         res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
76
77         /*
78         * One of them is live at the end of the block.
79         * Then, that one shall be scheduled at after the other
80         */
81         if (res != 0)
82                 return res;
83
84
85         return res;
86 }
87 #endif
88
89 static inline usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
90 {
91         usage_stats_t *us = get_irn_link(irn);
92
93         if (!us) {
94                 us                   = OALLOC(&env->obst, usage_stats_t);
95                 us->irn              = irn;
96                 us->already_consumed = 0;
97                 us->max_hops         = INT_MAX;
98                 us->next             = env->root;
99                 env->root            = us;
100                 set_irn_link(irn, us);
101         }
102
103         return us;
104 }
105
106 static inline usage_stats_t *get_usage_stats(ir_node *irn)
107 {
108         usage_stats_t *us = get_irn_link(irn);
109         assert(us && "This node must have usage stats");
110         return us;
111 }
112
113 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
114 {
115         ir_node *bl = get_nodes_block(irn);
116         /*
117         * If the reached node is not in the block desired,
118         * return the value passed for this situation.
119         */
120         if (get_nodes_block(irn) != bl)
121                 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
122
123         /*
124         * If the node is in the current block but not
125         * yet scheduled, we keep on searching from that node.
126         */
127         if (!ir_nodeset_contains(&env->already_scheduled, irn)) {
128                 int i, n;
129                 int res = 0;
130                 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
131                         ir_node *operand = get_irn_in_or_dep(irn, i);
132
133                         if (get_irn_visited(operand) < visited_nr) {
134                                 int tmp;
135
136                                 set_irn_visited(operand, visited_nr);
137                                 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
138                                 res = MAX(tmp, res);
139                         }
140                 }
141
142                 return res;
143         }
144
145         /*
146         * If the node is in the current block and scheduled, return
147         * the depth which indicates the number of steps to the
148         * region of scheduled nodes.
149         */
150         return depth;
151 }
152
153 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
154 {
155         ir_node *bl   = get_nodes_block(irn);
156         ir_graph *irg = get_irn_irg(bl);
157         int res       = 0;
158
159         const ir_edge_t *edge;
160
161         foreach_out_edge(irn, edge) {
162                 ir_node *user       = get_edge_src_irn(edge);
163                 unsigned visited_nr = get_irg_visited(irg) + 1;
164                 int max_hops;
165
166                 set_irg_visited(irg, visited_nr);
167                 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
168                 res      = MAX(res, max_hops);
169         }
170
171         return res;
172 }
173
174 static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, ir_graph *irg)
175 {
176         reg_pressure_main_env_t *main_env = XMALLOC(reg_pressure_main_env_t);
177
178         main_env->vtab = vtab;
179         irg_walk_graph(irg, firm_clear_link, NULL, NULL);
180
181         return main_env;
182 }
183
184 static inline int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
185 {
186         int res = -1;
187
188         if (sel->to_appear_in_schedule)
189                 res = sel->to_appear_in_schedule(block_env, irn);
190
191         return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_Start(irn));
192 }
193
194 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
195 {
196         ir_node *irn;
197         reg_pressure_selector_env_t *env = XMALLOC(reg_pressure_selector_env_t);
198
199         obstack_init(&env->obst);
200         ir_nodeset_init(&env->already_scheduled);
201         env->root              = NULL;
202         env->main_env          = graph_env;
203
204         /*
205         * Collect usage statistics.
206         */
207         sched_foreach(bl, irn) {
208                 if (must_appear_in_schedule(env->main_env->vtab, env, irn)) {
209                         int i, n;
210
211                         for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
212                                 //ir_node *op = get_irn_n(irn, i);
213                                 if (must_appear_in_schedule(env->main_env->vtab, env, irn)) {
214                                         usage_stats_t *us = get_or_set_usage_stats(env, irn);
215 #if 0 /* Liveness is not computed here! */
216                                         if (is_live_end(bl, op))
217                                                 us->uses_in_block = 99999;
218                                         else
219 #endif
220                                                 us->uses_in_block++;
221                                 }
222                         }
223                 }
224         }
225
226         return env;
227 }
228
229 static void reg_pressure_block_free(void *block_env)
230 {
231         reg_pressure_selector_env_t *env = block_env;
232         usage_stats_t *us;
233
234         for (us = env->root; us; us = us->next)
235                 set_irn_link(us->irn, NULL);
236
237         obstack_free(&env->obst, NULL);
238         ir_nodeset_destroy(&env->already_scheduled);
239         free(env);
240 }
241
242 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
243 {
244         int res = 0;
245         if (get_irn_mode(irn) == mode_T) {
246                 const ir_edge_t *edge;
247
248                 foreach_out_edge(irn, edge)
249                         res += get_result_hops_sum(env, get_edge_src_irn(edge));
250         }
251
252         else if (mode_is_data(get_irn_mode(irn)))
253                 res = compute_max_hops(env, irn);
254
255
256         return res;
257 }
258
259 static inline int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
260 {
261         int i, n;
262         int sum = 0;
263
264         for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
265                 ir_node *op = get_irn_n(irn, i);
266
267                 if (must_appear_in_schedule(env->main_env->vtab, env, op))
268                         sum += compute_max_hops(env, op);
269         }
270
271         sum += get_result_hops_sum(env, irn);
272
273         return sum;
274 }
275
276 static ir_node *reg_pressure_select(void *block_env, ir_nodeset_t *ready_set,
277                                     ir_nodeset_t *live_set)
278 {
279         ir_nodeset_iterator_t iter;
280         reg_pressure_selector_env_t *env = block_env;
281         ir_node *irn, *res     = NULL;
282         int curr_cost          = INT_MAX;
283         (void) live_set;
284
285         assert(ir_nodeset_size(ready_set) > 0);
286
287         ir_nodeset_iterator_init(&iter, ready_set);
288         while ( (irn = ir_nodeset_iterator_next(&iter)) != NULL) {
289                 /*
290                 Ignore branch instructions for the time being.
291                 They should only be scheduled if there is nothing else.
292                 */
293                 if (!is_cfop(irn)) {
294                         int costs = reg_pr_costs(env, irn);
295                         if (costs <= curr_cost) {
296                                 res       = irn;
297                                 curr_cost = costs;
298                         }
299                 }
300         }
301
302         /*
303         There was no result so we only saw a branch.
304         Take it and finish.
305         */
306
307         if (!res) {
308                 ir_nodeset_iterator_init(&iter, ready_set);
309                 res = ir_nodeset_iterator_next(&iter);
310
311                 assert(res && "There must be a node scheduled.");
312         }
313
314         ir_nodeset_insert(&env->already_scheduled, res);
315         return res;
316 }
317
318 const list_sched_selector_t reg_pressure_selector = {
319         reg_pressure_graph_init,
320         reg_pressure_block_init,
321         reg_pressure_select,
322         NULL,                    /* to_appear_in_schedule */
323         NULL,                    /* node_ready */
324         NULL,                    /* node_selected */
325         NULL,                    /* exectime */
326         NULL,                    /* latency */
327         reg_pressure_block_free,
328         free
329 };