merge after_ra and finish phase, and move stack_bias fixing into backends
[libfirm] / ir / be / beschednormal.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @brief   Use the strong normal form theorem (though it does not hold)
22  * @author  Christoph Mallon
23  * @version $Id$
24  */
25 #include "config.h"
26
27 #include <stdlib.h>
28
29 #include "besched.h"
30 #include "belistsched.h"
31 #include "belive_t.h"
32 #include "beutil.h"
33 #include "heights.h"
34 #include "irtools.h"
35 #include "irgwalk.h"
36 #include "benode.h"
37 #include "bemodule.h"
38 #include "array_t.h"
39
40 // XXX there is no one time init for schedulers
41 //#define NORMAL_DBG
42 #include "irprintf.h"
43
44 /** An instance of the normal scheduler. */
45 typedef struct instance_t {
46         ir_graph*      irg;          /**< the IR graph of this instance */
47         struct obstack obst;         /**< obstack for temporary data */
48         ir_node*       curr_list;    /**< current block schedule list */
49 } instance_t;
50
51 static int must_be_scheduled(const ir_node* const irn)
52 {
53         return !is_Proj(irn) && !is_Sync(irn);
54 }
55
56
57 static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set)
58 {
59         instance_t* inst = (instance_t*)block_env;
60         ir_node*    irn;
61         ir_node*    next;
62         ir_node*    last = NULL;
63         ir_nodeset_iterator_t iter;
64
65         for (irn = inst->curr_list; irn != NULL; last = irn, irn = next) {
66                 next = (ir_node*)get_irn_link(irn);
67                 if (ir_nodeset_contains(ready_set, irn)) {
68 #if defined NORMAL_DBG
69                         ir_fprintf(stderr, "scheduling %+F\n", irn);
70 #endif
71                         if (last == NULL)
72                                 inst->curr_list = next;
73                         else
74                                 set_irn_link(last, next);
75                         return irn;
76                 }
77         }
78
79         ir_nodeset_iterator_init(&iter, ready_set);
80         irn = ir_nodeset_iterator_next(&iter);
81         return irn;
82 }
83
84
85 typedef struct irn_cost_pair {
86         ir_node* irn;
87         int      cost;
88 } irn_cost_pair;
89
90 static int cost_cmp(const void* a, const void* b)
91 {
92         const irn_cost_pair* const a1 = (const irn_cost_pair*)a;
93         const irn_cost_pair* const b1 = (const irn_cost_pair*)b;
94         int ret = b1->cost - a1->cost;
95         if (ret == 0)
96                 ret = (int)get_irn_idx(a1->irn) - (int)get_irn_idx(b1->irn);
97 #if defined NORMAL_DBG
98         ir_fprintf(stderr, "cost %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
99 #endif
100         return ret;
101 }
102
103
104 typedef struct flag_and_cost {
105         int no_root;
106         irn_cost_pair costs[];
107 } flag_and_cost;
108
109 #define get_irn_fc(irn)     ((flag_and_cost*)get_irn_link(irn))
110 #define set_irn_fc(irn, fc) set_irn_link(irn, fc)
111
112
113 static int count_result(const ir_node* irn)
114 {
115         const ir_mode* mode = get_irn_mode(irn);
116
117         if (mode == mode_M || mode == mode_X)
118                 return 0;
119
120         if (mode == mode_T)
121                 return 1;
122
123         if (arch_get_register_req_out(irn)->type & arch_register_req_type_ignore)
124                 return 0;
125
126         return 1;
127 }
128
129
130 /* TODO high cost for store trees
131  */
132
133 static int normal_tree_cost(ir_node* irn, instance_t *inst)
134 {
135         flag_and_cost* fc;
136         int            arity;
137         ir_node*       last;
138         int            n_res;
139         int            cost;
140         int            n_op_res = 0;
141         int            i;
142
143         if (be_is_Keep(irn))
144                 return 0;
145
146         if (is_Proj(irn)) {
147                 return normal_tree_cost(get_Proj_pred(irn), inst);
148         }
149
150         arity = get_irn_arity(irn);
151         fc    = get_irn_fc(irn);
152
153         if (fc == NULL) {
154                 irn_cost_pair* costs;
155                 ir_node*       block = get_nodes_block(irn);
156
157                 fc = OALLOCF(&inst->obst, flag_and_cost, costs, arity);
158                 fc->no_root = 0;
159                 costs = fc->costs;
160
161                 for (i = 0; i < arity; ++i) {
162                         ir_node* pred = get_irn_n(irn, i);
163
164                         if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) {
165                                 cost = 0;
166                         } else if (get_nodes_block(pred) != block) {
167                                 cost = 1;
168                         } else {
169                                 flag_and_cost* pred_fc;
170                                 ir_node*       real_pred;
171
172                                 cost = normal_tree_cost(pred, inst);
173                                 if (!arch_irn_is_ignore(pred)) {
174                                         real_pred = (is_Proj(pred) ? get_Proj_pred(pred) : pred);
175                                         pred_fc = get_irn_fc(real_pred);
176                                         pred_fc->no_root = 1;
177 #if defined NORMAL_DBG
178                                         ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, real_pred);
179 #endif
180                                 }
181                         }
182
183                         costs[i].irn  = pred;
184                         costs[i].cost = cost;
185                 }
186
187                 qsort(costs, arity, sizeof(*costs), cost_cmp);
188                 set_irn_link(irn, fc);
189         }
190
191         cost = 0;
192         last = 0;
193         for (i = 0; i < arity; ++i) {
194                 ir_node* op = fc->costs[i].irn;
195                 ir_mode* mode;
196                 if (op == last)
197                         continue;
198                 mode = get_irn_mode(op);
199                 if (mode == mode_M)
200                         continue;
201                 if (mode != mode_T && arch_irn_is_ignore(op))
202                         continue;
203                 cost = MAX(fc->costs[i].cost + n_op_res, cost);
204                 last = op;
205                 ++n_op_res;
206         }
207         n_res = count_result(irn);
208         cost = MAX(n_res, cost);
209
210 #if defined NORMAL_DBG
211         ir_fprintf(stderr, "reguse of %+F is %d\n", irn, cost);
212 #endif
213
214         return cost;
215 }
216
217
218 static void normal_cost_walker(ir_node* irn, void* env)
219 {
220         instance_t *inst = (instance_t*)env;
221
222 #if defined NORMAL_DBG
223         ir_fprintf(stderr, "cost walking node %+F\n", irn);
224 #endif
225         if (is_Block(irn)) return;
226         if (!must_be_scheduled(irn)) return;
227         normal_tree_cost(irn, inst);
228 }
229
230
231 static void collect_roots(ir_node* irn, void* env)
232 {
233         int is_root;
234
235         (void)env;
236
237         if (is_Block(irn)) return;
238         if (!must_be_scheduled(irn)) return;
239
240         is_root = be_is_Keep(irn) || !get_irn_fc(irn)->no_root;
241
242 #if defined NORMAL_DBG
243         ir_fprintf(stderr, "%+F is %sroot\n", irn, is_root ? "" : "no ");
244 #endif
245
246         if (is_root) {
247                 ir_node* block = get_nodes_block(irn);
248                 ir_node** roots = (ir_node**)get_irn_link(block);
249                 if (roots == NULL) {
250                         roots = NEW_ARR_F(ir_node*, 0);
251                 }
252                 ARR_APP1(ir_node*, roots, irn);
253                 set_irn_link(block, roots);
254         }
255 }
256
257
258 static ir_node** sched_node(ir_node** sched, ir_node* irn)
259 {
260         if (irn_visited_else_mark(irn)) return sched;
261         if (is_End(irn))                return sched;
262
263         if (!is_Phi(irn) && !be_is_Keep(irn)) {
264                 ir_node*       block = get_nodes_block(irn);
265                 int            arity = get_irn_arity(irn);
266                 flag_and_cost* fc    = get_irn_fc(irn);
267                 irn_cost_pair* irns  = fc->costs;
268                 int            i;
269
270                 for (i = 0; i < arity; ++i) {
271                         ir_node* pred = irns[i].irn;
272                         if (get_nodes_block(pred) != block) continue;
273                         if (get_irn_mode(pred) == mode_M) continue;
274                         if (is_Proj(pred)) pred = get_Proj_pred(pred);
275                         sched = sched_node(sched, pred);
276                 }
277         }
278
279         ARR_APP1(ir_node*, sched, irn);
280         return sched;
281 }
282
283
284 static int root_cmp(const void* a, const void* b)
285 {
286         const irn_cost_pair* const a1 = (const irn_cost_pair*)a;
287         const irn_cost_pair* const b1 = (const irn_cost_pair*)b;
288         int ret;
289         if (is_irn_forking(a1->irn)) {
290                 ret = 1;
291         } else if (is_irn_forking(b1->irn)) {
292                 ret = -1;
293         } else {
294                 ret = b1->cost - a1->cost;
295                 if (ret == 0) {
296                         /* place live-out nodes later */
297                         ret = (count_result(a1->irn) != 0) - (count_result(b1->irn) != 0);
298                 }
299         }
300 #if defined NORMAL_DBG
301         ir_fprintf(stderr, "root %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
302 #endif
303         return ret;
304 }
305
306
307 static void normal_sched_block(ir_node* block, void* env)
308 {
309         ir_node**      roots = (ir_node**)get_irn_link(block);
310         ir_heights_t*  heights = (ir_heights_t*)env;
311         int            root_count;
312         irn_cost_pair* root_costs;
313         int i;
314         ir_node**      sched;
315
316 #if defined NORMAL_DBG
317         ir_fprintf(stderr, "sched walking block %+F\n", block);
318 #endif
319
320         if (roots == NULL) {
321 #if defined NORMAL_DBG
322                 fprintf(stderr, "has no roots\n");
323 #endif
324                 return;
325         }
326
327         root_count = ARR_LEN(roots);
328         NEW_ARR_A(irn_cost_pair, root_costs, root_count);
329         for (i = 0; i < root_count; ++i) {
330                 root_costs[i].irn  = roots[i];
331                 root_costs[i].cost = get_irn_height(heights, roots[i]);
332 #if defined NORMAL_DBG
333                 ir_fprintf(stderr, "height of %+F is %u\n", roots[i], root_costs[i].cost);
334 #endif
335         }
336         qsort(root_costs, root_count, sizeof(*root_costs), root_cmp);
337 #if defined NORMAL_DBG
338         {
339                 int n = root_count;
340                 int i;
341
342                 ir_fprintf(stderr, "Root Scheduling of %+F:\n", block);
343                 for (i = 0; i < n; ++i) {
344                         ir_fprintf(stderr, "  %+F\n", root_costs[i].irn);
345                 }
346                 fprintf(stderr, "\n");
347         }
348 #endif
349
350         sched = NEW_ARR_F(ir_node*, 0);
351         for (i = 0; i < root_count; ++i) {
352                 ir_node* irn = root_costs[i].irn;
353                 assert(must_be_scheduled(irn));
354                 sched = sched_node(sched, irn);
355         }
356         set_irn_link(block, sched);
357         DEL_ARR_F(roots);
358
359 #if defined NORMAL_DBG
360         {
361                 int n = ARR_LEN(sched);
362                 int i;
363
364                 ir_fprintf(stderr, "Scheduling of %+F:\n", block);
365                 for (i = 0; i < n; ++i) {
366                         ir_fprintf(stderr, "  %+F\n", sched[i]);
367                 }
368                 fprintf(stderr, "\n");
369         }
370 #endif
371 }
372
373
374 static void *normal_init_graph(ir_graph *irg)
375 {
376         instance_t   *inst = XMALLOC(instance_t);
377         ir_heights_t *heights;
378
379         be_clear_links(irg);
380
381         obstack_init(&inst->obst);
382         inst->irg         = irg;
383
384         heights = heights_new(irg);
385
386         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
387         irg_walk_graph(irg, normal_cost_walker,  NULL, inst);
388         irg_walk_graph(irg, collect_roots, NULL, NULL);
389         inc_irg_visited(irg);
390         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
391         irg_block_walk_graph(irg, normal_sched_block, NULL, heights);
392         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
393
394         heights_free(heights);
395
396         return inst;
397 }
398
399 static void *normal_init_block(void *graph_env, ir_node *block)
400 {
401         instance_t* inst  = (instance_t*)graph_env;
402         ir_node**   sched = (ir_node**)get_irn_link(block);
403         ir_node*    first = NULL;
404         int         i;
405
406         /* turn into a list, so we can easily remove nodes.
407            The link field is used anyway. */
408         for (i = ARR_LEN(sched) - 1; i >= 0; --i) {
409                 ir_node* irn = sched[i];
410                 if (!is_cfop(irn)) {
411                         set_irn_link(irn, first);
412                         first = irn;
413                 }
414         }
415         /* note: we can free sched here, there should be no attempt to schedule
416            a block twice */
417         DEL_ARR_F(sched);
418         set_irn_link(block, sched);
419         inst->curr_list = first;
420         return inst;
421 }
422
423 static void normal_finish_graph(void *env)
424 {
425         instance_t *inst = (instance_t*)env;
426
427         /* block uses the link field to store the schedule */
428         ir_free_resources(inst->irg, IR_RESOURCE_IRN_LINK);
429         obstack_free(&inst->obst, NULL);
430         xfree(inst);
431 }
432
433 static void sched_normal(ir_graph *irg)
434 {
435         static const list_sched_selector_t normal_selector = {
436                 normal_init_graph,
437                 normal_init_block,
438                 normal_select,
439                 NULL,              /* node_ready */
440                 NULL,              /* node_selected */
441                 NULL,              /* finish_block */
442                 normal_finish_graph
443         };
444         be_list_sched_graph(irg, &normal_selector);
445 }
446
447 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_normal)
448 void be_init_sched_normal(void)
449 {
450         be_register_scheduler("normal", sched_normal);
451 }