backend: cleanup queries for ignore regs
[libfirm] / ir / be / bespillbelady.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Beladys spillalgorithm.
23  * @author      Daniel Grund, Matthias Braun
24  * @date        20.09.2005
25  * @version     $Id$
26  */
27 #include "config.h"
28
29 #include <stdbool.h>
30
31 #include "obst.h"
32 #include "irprintf_t.h"
33 #include "irgraph.h"
34 #include "irnode.h"
35 #include "irmode.h"
36 #include "irgwalk.h"
37 #include "irloop.h"
38 #include "iredges_t.h"
39 #include "ircons_t.h"
40 #include "irprintf.h"
41 #include "irnodeset.h"
42
43 #include "beutil.h"
44 #include "bearch.h"
45 #include "beuses.h"
46 #include "besched.h"
47 #include "beirgmod.h"
48 #include "belive_t.h"
49 #include "benode.h"
50 #include "bechordal_t.h"
51 #include "bespill.h"
52 #include "beloopana.h"
53 #include "beirg.h"
54 #include "bespillutil.h"
55 #include "bemodule.h"
56
57 #define DBG_SPILL     1
58 #define DBG_WSETS     2
59 #define DBG_FIX       4
60 #define DBG_DECIDE    8
61 #define DBG_START    16
62 #define DBG_SLOTS    32
63 #define DBG_TRACE    64
64 #define DBG_WORKSET 128
65 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66
67 #define TIME_UNDEFINED 6666
68
69 /**
70  * An association between a node and a point in time.
71  */
72 typedef struct loc_t {
73         ir_node          *node;
74         unsigned          time;     /**< A use time (see beuses.h). */
75         bool              spilled;  /**< value was already spilled on this path */
76 } loc_t;
77
78 typedef struct workset_t {
79         unsigned len;     /**< current length */
80         loc_t    vals[0]; /**< array of the values/distances in this working set */
81 } workset_t;
82
83 static struct obstack               obst;
84 static const arch_register_class_t *cls;
85 static const be_lv_t               *lv;
86 static be_loopana_t                *loop_ana;
87 static unsigned                     n_regs;
88 static workset_t                   *ws;     /**< the main workset used while
89                                                      processing a block. */
90 static be_uses_t                   *uses;   /**< env for the next-use magic */
91 static ir_node                     *instr;  /**< current instruction */
92 static unsigned                     instr_nr; /**< current instruction number
93                                                        (relative to block start) */
94 static spill_env_t                 *senv;   /**< see bespill.h */
95 static ir_node                    **blocklist;
96
97 static int                          move_spills      = true;
98 static int                          respectloopdepth = true;
99 static int                          improve_known_preds = true;
100 /* factor to weight the different costs of reloading/rematerializing a node
101    (see bespill.h be_get_reload_costs_no_weight) */
102 static int                          remat_bonus      = 10;
103
104 static const lc_opt_table_entry_t options[] = {
105         LC_OPT_ENT_BOOL   ("movespills", "try to move spills out of loops", &move_spills),
106         LC_OPT_ENT_BOOL   ("respectloopdepth", "outermost loop cutting", &respectloopdepth),
107         LC_OPT_ENT_BOOL   ("improveknownpreds", "known preds cutting", &improve_known_preds),
108         LC_OPT_ENT_INT    ("rematbonus", "give bonus to rematerialisable nodes", &remat_bonus),
109         LC_OPT_LAST
110 };
111
112 /**
113  * Alloc a new workset on obstack @p ob with maximum size @p max
114  */
115 static workset_t *new_workset(void)
116 {
117         return OALLOCFZ(&obst, workset_t, vals, n_regs);
118 }
119
120 /**
121  * Alloc a new instance on obstack and make it equal to @param workset
122  */
123 static workset_t *workset_clone(workset_t *workset)
124 {
125         workset_t *res = OALLOCF(&obst, workset_t, vals, n_regs);
126         memcpy(res, workset, sizeof(*res) + n_regs * sizeof(res->vals[0]));
127         return res;
128 }
129
130 /**
131  * Copy workset @param src to @param tgt
132  */
133 static void workset_copy(workset_t *dest, const workset_t *src)
134 {
135         size_t size = sizeof(*src) + n_regs * sizeof(src->vals[0]);
136         memcpy(dest, src, size);
137 }
138
139 /**
140  * Overwrites the current content array of @param ws with the
141  * @param count locations given at memory @param locs.
142  * Set the length of @param ws to count.
143  */
144 static void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs)
145 {
146         workset->len = count;
147         memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
148 }
149
150 /**
151  * Inserts the value @p val into the workset, iff it is not
152  * already contained. The workset must not be full.
153  */
154 static void workset_insert(workset_t *workset, ir_node *val, bool spilled)
155 {
156         loc_t    *loc;
157         unsigned  i;
158         /* check for current regclass */
159         assert(arch_irn_consider_in_reg_alloc(cls, val));
160
161         /* check if val is already contained */
162         for (i = 0; i < workset->len; ++i) {
163                 loc = &workset->vals[i];
164                 if (loc->node == val) {
165                         if (spilled) {
166                                 loc->spilled = true;
167                         }
168                         return;
169                 }
170         }
171
172         /* insert val */
173         assert(workset->len < n_regs && "Workset already full!");
174         loc           = &workset->vals[workset->len];
175         loc->node     = val;
176         loc->spilled  = spilled;
177         loc->time     = TIME_UNDEFINED;
178         workset->len++;
179 }
180
181 /**
182  * Removes all entries from this workset
183  */
184 static void workset_clear(workset_t *workset)
185 {
186         workset->len = 0;
187 }
188
189 /**
190  * Removes the value @p val from the workset if present.
191  */
192 static void workset_remove(workset_t *workset, ir_node *val)
193 {
194         unsigned i;
195         for (i = 0; i < workset->len; ++i) {
196                 if (workset->vals[i].node == val) {
197                         workset->vals[i] = workset->vals[--workset->len];
198                         return;
199                 }
200         }
201 }
202
203 static const loc_t *workset_contains(const workset_t *ws, const ir_node *val)
204 {
205         unsigned i;
206         for (i = 0; i < ws->len; ++i) {
207                 if (ws->vals[i].node == val)
208                         return &ws->vals[i];
209         }
210
211         return NULL;
212 }
213
214 static int loc_compare(const void *a, const void *b)
215 {
216         const loc_t *p = a;
217         const loc_t *q = b;
218         return p->time - q->time;
219 }
220
221 static void workset_sort(workset_t *workset)
222 {
223         qsort(workset->vals, workset->len, sizeof(workset->vals[0]), loc_compare);
224 }
225
226 static inline unsigned workset_get_time(const workset_t *workset, unsigned idx)
227 {
228         return workset->vals[idx].time;
229 }
230
231 static inline void workset_set_time(workset_t *workset, unsigned idx,
232                                     unsigned time)
233 {
234         workset->vals[idx].time = time;
235 }
236
237 static inline unsigned workset_get_length(const workset_t *workset)
238 {
239         return workset->len;
240 }
241
242 static inline void workset_set_length(workset_t *workset, unsigned len)
243 {
244         workset->len = len;
245 }
246
247 static inline ir_node *workset_get_val(const workset_t *workset, unsigned idx)
248 {
249         return workset->vals[idx].node;
250 }
251
252 /**
253  * Iterates over all values in the working set.
254  * @p ws The workset to iterate
255  * @p v  A variable to put the current value in
256  * @p i  An integer for internal use
257  */
258 #define workset_foreach(ws, v, i) \
259         for (i=0; v=(i < ws->len) ? ws->vals[i].node : NULL, i < ws->len; ++i)
260
261 typedef struct block_info_t {
262         workset_t *start_workset;
263         workset_t *end_workset;
264 } block_info_t;
265
266 static block_info_t *new_block_info(void)
267 {
268         return OALLOCZ(&obst, block_info_t);
269 }
270
271 static inline block_info_t *get_block_info(const ir_node *block)
272 {
273         return get_irn_link(block);
274 }
275
276 static inline void set_block_info(ir_node *block, block_info_t *info)
277 {
278         set_irn_link(block, info);
279 }
280
281 /**
282  * @return The distance to the next use or 0 if irn has dont_spill flag set
283  */
284 static unsigned get_distance(ir_node *from, unsigned from_step,
285                              const ir_node *def, int skip_from_uses)
286 {
287         be_next_use_t use;
288         unsigned      costs;
289         unsigned      time;
290
291         assert(!arch_irn_is_ignore(def));
292
293         use  = be_get_next_use(uses, from, from_step, def, skip_from_uses);
294         time = use.time;
295         if (USES_IS_INFINITE(time))
296                 return USES_INFINITY;
297
298         /* We have to keep nonspillable nodes in the workingset */
299         if (arch_irn_get_flags(skip_Proj_const(def)) & arch_irn_flags_dont_spill)
300                 return 0;
301
302         /* give some bonus to rematerialisable nodes */
303         if (remat_bonus > 0) {
304                 costs = be_get_reload_costs_no_weight(senv, def, use.before);
305                 assert(costs * remat_bonus < 1000);
306                 time  += 1000 - (costs * remat_bonus);
307         }
308
309         return time;
310 }
311
312 /**
313  * Performs the actions necessary to grant the request that:
314  * - new_vals can be held in registers
315  * - as few as possible other values are disposed
316  * - the worst values get disposed
317  *
318  * @p is_usage indicates that the values in new_vals are used (not defined)
319  * In this case reloads must be performed
320  */
321 static void displace(workset_t *new_vals, int is_usage)
322 {
323         ir_node **to_insert = ALLOCAN(ir_node*, n_regs);
324         bool     *spilled   = ALLOCAN(bool,     n_regs);
325         ir_node  *val;
326         int       i;
327         int       len;
328         int       spills_needed;
329         int       demand;
330         unsigned  iter;
331
332         /* 1. Identify the number of needed slots and the values to reload */
333         demand = 0;
334         workset_foreach(new_vals, val, iter) {
335                 bool reloaded = false;
336
337                 if (! workset_contains(ws, val)) {
338                         DB((dbg, DBG_DECIDE, "    insert %+F\n", val));
339                         if (is_usage) {
340                                 DB((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, instr));
341                                 be_add_reload(senv, val, instr, cls, 1);
342                                 reloaded = true;
343                         }
344                 } else {
345                         DB((dbg, DBG_DECIDE, "    %+F already in workset\n", val));
346                         assert(is_usage);
347                         /* remove the value from the current workset so it is not accidently
348                          * spilled */
349                         workset_remove(ws, val);
350                 }
351                 spilled[demand]   = reloaded;
352                 to_insert[demand] = val;
353                 ++demand;
354         }
355
356         /* 2. Make room for at least 'demand' slots */
357         len           = workset_get_length(ws);
358         spills_needed = len + demand - n_regs;
359         assert(spills_needed <= len);
360
361         /* Only make more free room if we do not have enough */
362         if (spills_needed > 0) {
363                 DB((dbg, DBG_DECIDE, "    disposing %d values\n", spills_needed));
364
365                 /* calculate current next-use distance for live values */
366                 for (i = 0; i < len; ++i) {
367                         ir_node  *val  = workset_get_val(ws, i);
368                         unsigned  dist = get_distance(instr, instr_nr, val, !is_usage);
369                         workset_set_time(ws, i, dist);
370                 }
371
372                 /* sort entries by increasing nextuse-distance*/
373                 workset_sort(ws);
374
375                 for (i = len - spills_needed; i < len; ++i) {
376                         ir_node *val = ws->vals[i].node;
377
378                         DB((dbg, DBG_DECIDE, "    disposing node %+F (%u)\n", val,
379                              workset_get_time(ws, i)));
380
381                         if (move_spills) {
382                                 if (!USES_IS_INFINITE(ws->vals[i].time)
383                                                 && !ws->vals[i].spilled) {
384                                         ir_node *after_pos = sched_prev(instr);
385                                         DB((dbg, DBG_DECIDE, "Spill %+F after node %+F\n", val,
386                                                 after_pos));
387                                         be_add_spill(senv, val, after_pos);
388                                 }
389                         }
390                 }
391
392                 /* kill the last 'demand' entries in the array */
393                 workset_set_length(ws, len - spills_needed);
394         }
395
396         /* 3. Insert the new values into the workset */
397         for (i = 0; i < demand; ++i) {
398                 ir_node *val = to_insert[i];
399
400                 workset_insert(ws, val, spilled[i]);
401         }
402 }
403
404 enum {
405         AVAILABLE_EVERYWHERE,
406         AVAILABLE_NOWHERE,
407         AVAILABLE_PARTLY,
408         AVAILABLE_UNKNOWN
409 };
410
411 static unsigned available_in_all_preds(workset_t* const* pred_worksets,
412                                        size_t n_pred_worksets,
413                                        const ir_node *value, bool is_local_phi)
414 {
415         size_t i;
416         bool   avail_everywhere = true;
417         bool   avail_nowhere    = true;
418
419         assert(n_pred_worksets > 0);
420
421         /* value available in all preds? */
422         for (i = 0; i < n_pred_worksets; ++i) {
423                 bool             found     = false;
424                 const workset_t *p_workset = pred_worksets[i];
425                 int              p_len     = workset_get_length(p_workset);
426                 int              p_i;
427                 const ir_node   *l_value;
428
429                 if (is_local_phi) {
430                         assert(is_Phi(value));
431                         l_value = get_irn_n(value, i);
432                 } else {
433                         l_value = value;
434                 }
435
436                 for (p_i = 0; p_i < p_len; ++p_i) {
437                         const loc_t *p_l = &p_workset->vals[p_i];
438                         if (p_l->node != l_value)
439                                 continue;
440
441                         found = true;
442                         break;
443                 }
444
445                 if (found) {
446                         avail_nowhere = false;
447                 } else {
448                         avail_everywhere = false;
449                 }
450         }
451
452         if (avail_everywhere) {
453                 assert(!avail_nowhere);
454                 return AVAILABLE_EVERYWHERE;
455         } else if (avail_nowhere) {
456                 return AVAILABLE_NOWHERE;
457         } else {
458                 return AVAILABLE_PARTLY;
459         }
460 }
461
462 /** Decides whether a specific node should be in the start workset or not
463  *
464  * @param env      belady environment
465  * @param first
466  * @param node     the node to test
467  * @param loop     the loop of the node
468  */
469 static loc_t to_take_or_not_to_take(ir_node* first, ir_node *node,
470                                     ir_loop *loop, unsigned available)
471 {
472         be_next_use_t next_use;
473         loc_t         loc;
474
475         loc.time    = USES_INFINITY;
476         loc.node    = node;
477         loc.spilled = false;
478
479         if (!arch_irn_consider_in_reg_alloc(cls, node)) {
480                 loc.time = USES_INFINITY;
481                 return loc;
482         }
483
484         /* We have to keep nonspillable nodes in the workingset */
485         if (arch_irn_get_flags(skip_Proj_const(node)) & arch_irn_flags_dont_spill) {
486                 loc.time = 0;
487                 DB((dbg, DBG_START, "    %+F taken (dontspill node)\n", node, loc.time));
488                 return loc;
489         }
490
491         next_use = be_get_next_use(uses, first, 0, node, 0);
492         if (USES_IS_INFINITE(next_use.time)) {
493                 /* the nodes marked as live in shouldn't be dead, so it must be a phi */
494                 assert(is_Phi(node));
495                 loc.time = USES_INFINITY;
496                 DB((dbg, DBG_START, "    %+F not taken (dead)\n", node));
497                 return loc;
498         }
499
500         loc.time = next_use.time;
501
502         if (improve_known_preds) {
503                 if (available == AVAILABLE_EVERYWHERE) {
504                         DB((dbg, DBG_START, "    %+F taken (%u, live in all preds)\n",
505                             node, loc.time));
506                         return loc;
507                 } else if (available == AVAILABLE_NOWHERE) {
508                         DB((dbg, DBG_START, "    %+F not taken (%u, live in no pred)\n",
509                             node, loc.time));
510                         loc.time = USES_INFINITY;
511                         return loc;
512                 }
513         }
514
515         if (!respectloopdepth || next_use.outermost_loop >= get_loop_depth(loop)) {
516                 DB((dbg, DBG_START, "    %+F taken (%u, loop %d)\n", node, loc.time,
517                     next_use.outermost_loop));
518         } else {
519                 loc.time = USES_PENDING;
520                 DB((dbg, DBG_START, "    %+F delayed (outerdepth %d < loopdepth %d)\n",
521                     node, next_use.outermost_loop, get_loop_depth(loop)));
522         }
523
524         return loc;
525 }
526
527 /**
528  * Computes the start-workset for a block with multiple predecessors. We assume
529  * that at least 1 of the predeccesors is a back-edge which means we're at the
530  * beginning of a loop. We try to reload as much values as possible now so they
531  * don't get reloaded inside the loop.
532  */
533 static void decide_start_workset(const ir_node *block)
534 {
535         ir_loop    *loop = get_irn_loop(block);
536         ir_node    *first;
537         ir_node    *node;
538         loc_t       loc;
539         loc_t      *starters;
540         loc_t      *delayed;
541         unsigned    len;
542         unsigned    i;
543         int         in;
544         unsigned    ws_count;
545         int         free_slots, free_pressure_slots;
546         unsigned    pressure;
547         int         arity;
548         workset_t **pred_worksets;
549         bool        all_preds_known;
550
551         /* check predecessors */
552         arity           = get_irn_arity(block);
553         pred_worksets   = ALLOCAN(workset_t*, arity);
554         all_preds_known = true;
555         for (in = 0; in < arity; ++in) {
556                 ir_node      *pred_block = get_Block_cfgpred_block(block, in);
557                 block_info_t *pred_info  = get_block_info(pred_block);
558
559                 if (pred_info == NULL) {
560                         pred_worksets[in] = NULL;
561                         all_preds_known   = false;
562                 } else {
563                         pred_worksets[in] = pred_info->end_workset;
564                 }
565         }
566
567         /* Collect all values living at start of block */
568         starters = NEW_ARR_F(loc_t, 0);
569         delayed  = NEW_ARR_F(loc_t, 0);
570
571         DB((dbg, DBG_START, "Living at start of %+F:\n", block));
572         first = sched_first(block);
573
574         /* check all Phis first */
575         sched_foreach(block, node) {
576                 unsigned available;
577
578                 if (! is_Phi(node))
579                         break;
580                 if (!arch_irn_consider_in_reg_alloc(cls, node))
581                         continue;
582
583                 if (all_preds_known) {
584                         available = available_in_all_preds(pred_worksets, arity, node, true);
585                 } else {
586                         available = AVAILABLE_UNKNOWN;
587                 }
588
589                 loc = to_take_or_not_to_take(first, node, loop, available);
590
591                 if (! USES_IS_INFINITE(loc.time)) {
592                         if (USES_IS_PENDING(loc.time))
593                                 ARR_APP1(loc_t, delayed, loc);
594                         else
595                                 ARR_APP1(loc_t, starters, loc);
596                 } else {
597                         be_spill_phi(senv, node);
598                 }
599         }
600
601         /* check all Live-Ins */
602         be_lv_foreach(lv, block, be_lv_state_in, in) {
603                 ir_node *node = be_lv_get_irn(lv, block, in);
604                 unsigned available;
605
606                 if (all_preds_known) {
607                         available = available_in_all_preds(pred_worksets, arity, node, false);
608                 } else {
609                         available = AVAILABLE_UNKNOWN;
610                 }
611
612                 loc = to_take_or_not_to_take(first, node, loop, available);
613
614                 if (! USES_IS_INFINITE(loc.time)) {
615                         if (USES_IS_PENDING(loc.time))
616                                 ARR_APP1(loc_t, delayed, loc);
617                         else
618                                 ARR_APP1(loc_t, starters, loc);
619                 }
620         }
621
622         pressure            = be_get_loop_pressure(loop_ana, cls, loop);
623         assert(ARR_LEN(delayed) <= (signed)pressure);
624         free_slots          = n_regs - ARR_LEN(starters);
625         free_pressure_slots = n_regs - (pressure - ARR_LEN(delayed));
626         free_slots          = MIN(free_slots, free_pressure_slots);
627
628         /* so far we only put nodes into the starters list that are used inside
629          * the loop. If register pressure in the loop is low then we can take some
630          * values and let them live through the loop */
631         DB((dbg, DBG_START, "Loop pressure %d, taking %d delayed vals\n",
632             pressure, free_slots));
633         if (free_slots > 0) {
634                 int i;
635                 qsort(delayed, ARR_LEN(delayed), sizeof(delayed[0]), loc_compare);
636
637                 for (i = 0; i < ARR_LEN(delayed) && free_slots > 0; ++i) {
638                         int    p, arity;
639                         loc_t *loc = & delayed[i];
640
641                         if (!is_Phi(loc->node)) {
642                                 /* don't use values which are dead in a known predecessors
643                                  * to not induce unnecessary reloads */
644                                 arity = get_irn_arity(block);
645                                 for (p = 0; p < arity; ++p) {
646                                         ir_node      *pred_block = get_Block_cfgpred_block(block, p);
647                                         block_info_t *pred_info  = get_block_info(pred_block);
648
649                                         if (pred_info == NULL)
650                                                 continue;
651
652                                         if (!workset_contains(pred_info->end_workset, loc->node)) {
653                                                 DB((dbg, DBG_START,
654                                                         "    delayed %+F not live at pred %+F\n", loc->node,
655                                                         pred_block));
656                                                 goto skip_delayed;
657                                         }
658                                 }
659                         }
660
661                         DB((dbg, DBG_START, "    delayed %+F taken\n", loc->node));
662                         ARR_APP1(loc_t, starters, *loc);
663                         loc->node = NULL;
664                         --free_slots;
665                 skip_delayed:
666                         ;
667                 }
668         }
669
670         /* spill phis (the actual phis not just their values) that are in this block
671          * but not in the start workset */
672         len = ARR_LEN(delayed);
673         for (i = 0; i < len; ++i) {
674                 ir_node *node = delayed[i].node;
675                 if (node == NULL || !is_Phi(node) || get_nodes_block(node) != block)
676                         continue;
677
678                 DB((dbg, DBG_START, "    spilling delayed phi %+F\n", node));
679                 be_spill_phi(senv, node);
680         }
681         DEL_ARR_F(delayed);
682
683         /* Sort start values by first use */
684         qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
685
686         /* Copy the best ones from starters to start workset */
687         ws_count = MIN((unsigned) ARR_LEN(starters), n_regs);
688         workset_clear(ws);
689         workset_bulk_fill(ws, ws_count, starters);
690
691         /* spill phis (the actual phis not just their values) that are in this block
692          * but not in the start workset */
693         len = ARR_LEN(starters);
694         for (i = ws_count; i < len; ++i) {
695                 ir_node *node = starters[i].node;
696                 if (! is_Phi(node) || get_nodes_block(node) != block)
697                         continue;
698
699                 DB((dbg, DBG_START, "    spilling phi %+F\n", node));
700                 be_spill_phi(senv, node);
701         }
702
703         DEL_ARR_F(starters);
704
705         /* determine spill status of the values: If there's 1 pred block (which
706          * is no backedge) where the value is spilled then we must set it to
707          * spilled here. */
708         for (i = 0; i < ws_count; ++i) {
709                 loc_t   *loc     = &ws->vals[i];
710                 ir_node *value   = loc->node;
711                 bool     spilled;
712                 int      n;
713
714                 /* phis from this block aren't spilled */
715                 if (get_nodes_block(value) == block) {
716                         assert(is_Phi(value));
717                         loc->spilled = false;
718                         continue;
719                 }
720
721                 /* determine if value was spilled on any predecessor */
722                 spilled = false;
723                 for (n = 0; n < arity; ++n) {
724                         workset_t *pred_workset = pred_worksets[n];
725                         int        p_len;
726                         int        p;
727
728                         if (pred_workset == NULL)
729                                 continue;
730
731                         p_len = workset_get_length(pred_workset);
732                         for (p = 0; p < p_len; ++p) {
733                                 loc_t *l = &pred_workset->vals[p];
734
735                                 if (l->node != value)
736                                         continue;
737
738                                 if (l->spilled) {
739                                         spilled = true;
740                                 }
741                                 break;
742                         }
743                 }
744
745                 loc->spilled = spilled;
746         }
747 }
748
749 /**
750  * For the given block @p block, decide for each values
751  * whether it is used from a register or is reloaded
752  * before the use.
753  */
754 static void process_block(ir_node *block)
755 {
756         workset_t    *new_vals;
757         ir_node      *irn;
758         unsigned      iter;
759         block_info_t *block_info;
760         int           arity;
761
762         /* no need to process a block twice */
763         assert(get_block_info(block) == NULL);
764
765         /* construct start workset */
766         arity = get_Block_n_cfgpreds(block);
767         if (arity == 0) {
768                 /* no predecessor -> empty set */
769                 workset_clear(ws);
770         } else if (arity == 1) {
771                 /* one predecessor, copy it's end workset */
772                 ir_node      *pred_block = get_Block_cfgpred_block(block, 0);
773                 block_info_t *pred_info  = get_block_info(pred_block);
774
775                 assert(pred_info != NULL);
776                 workset_copy(ws, pred_info->end_workset);
777         } else {
778                 /* multiple predecessors, do more advanced magic :) */
779                 decide_start_workset(block);
780         }
781
782         DB((dbg, DBG_DECIDE, "\n"));
783         DB((dbg, DBG_DECIDE, "Decide for %+F\n", block));
784
785         block_info = new_block_info();
786         set_block_info(block, block_info);
787
788         DB((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
789         workset_foreach(ws, irn, iter) {
790                 DB((dbg, DBG_WSETS, "  %+F (%u)\n", irn,
791                      workset_get_time(ws, iter)));
792         }
793
794         block_info->start_workset = workset_clone(ws);
795
796         /* process the block from start to end */
797         DB((dbg, DBG_WSETS, "Processing...\n"));
798         instr_nr = 0;
799         /* TODO: this leaks (into the obstack)... */
800         new_vals = new_workset();
801
802         sched_foreach(block, irn) {
803                 int i, arity;
804                 ir_node *value;
805                 assert(workset_get_length(ws) <= n_regs);
806
807                 /* Phis are no real instr (see insert_starters()) */
808                 if (is_Phi(irn)) {
809                         continue;
810                 }
811                 DB((dbg, DBG_DECIDE, "  ...%+F\n", irn));
812
813                 /* set instruction in the workset */
814                 instr = irn;
815
816                 /* allocate all values _used_ by this instruction */
817                 workset_clear(new_vals);
818                 for (i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
819                         ir_node *in = get_irn_n(irn, i);
820                         if (!arch_irn_consider_in_reg_alloc(cls, in))
821                                 continue;
822
823                         /* (note that "spilled" is irrelevant here) */
824                         workset_insert(new_vals, in, false);
825                 }
826                 displace(new_vals, 1);
827
828                 /* allocate all values _defined_ by this instruction */
829                 workset_clear(new_vals);
830                 be_foreach_definition(irn, cls, value,
831                         assert(req_->width == 1);
832                         workset_insert(new_vals, value, false);
833                 );
834                 displace(new_vals, 0);
835
836                 instr_nr++;
837         }
838
839         /* Remember end-workset for this block */
840         block_info->end_workset = workset_clone(ws);
841         DB((dbg, DBG_WSETS, "End workset for %+F:\n", block));
842         workset_foreach(ws, irn, iter)
843                 DB((dbg, DBG_WSETS, "  %+F (%u)\n", irn, workset_get_time(ws, iter)));
844 }
845
846 /**
847  * 'decide' is block-local and makes assumptions
848  * about the set of live-ins. Thus we must adapt the
849  * live-outs to the live-ins at each block-border.
850  */
851 static void fix_block_borders(ir_node *block, void *data)
852 {
853         workset_t *start_workset;
854         int        arity;
855         int        i;
856         unsigned   iter;
857         (void) data;
858
859         DB((dbg, DBG_FIX, "\n"));
860         DB((dbg, DBG_FIX, "Fixing %+F\n", block));
861
862         arity = get_irn_arity(block);
863         /* can happen for endless loops */
864         if (arity == 0)
865                 return;
866
867         start_workset = get_block_info(block)->start_workset;
868
869         /* process all pred blocks */
870         for (i = 0; i < arity; ++i) {
871                 ir_node   *pred = get_Block_cfgpred_block(block, i);
872                 workset_t *pred_end_workset = get_block_info(pred)->end_workset;
873                 ir_node   *node;
874
875                 DB((dbg, DBG_FIX, "  Pred %+F\n", pred));
876
877                 /* spill all values not used anymore */
878                 workset_foreach(pred_end_workset, node, iter) {
879                         ir_node *n2;
880                         unsigned iter2;
881                         bool     found = false;
882                         workset_foreach(start_workset, n2, iter2) {
883                                 if (n2 == node) {
884                                         found = true;
885                                         break;
886                                 }
887                                 /* note that we do not look at phi inputs, becuase the values
888                                  * will be either live-end and need no spill or
889                                  * they have other users in which must be somewhere else in the
890                                  * workset */
891                         }
892
893                         if (found)
894                                 continue;
895
896                         if (move_spills && be_is_live_in(lv, block, node)
897                                         && !pred_end_workset->vals[iter].spilled) {
898                                 ir_node *insert_point;
899                                 if (arity > 1) {
900                                         insert_point = be_get_end_of_block_insertion_point(pred);
901                                         insert_point = sched_prev(insert_point);
902                                 } else {
903                                         insert_point = block;
904                                 }
905                                 DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
906                                      insert_point));
907                                 be_add_spill(senv, node, insert_point);
908                         }
909                 }
910
911                 /* reload missing values in predecessors, add missing spills */
912                 workset_foreach(start_workset, node, iter) {
913                         const loc_t *l    = &start_workset->vals[iter];
914                         const loc_t *pred_loc;
915
916                         /* if node is a phi of the current block we reload
917                          * the corresponding argument, else node itself */
918                         if (is_Phi(node) && get_nodes_block(node) == block) {
919                                 node = get_irn_n(node, i);
920                                 assert(!l->spilled);
921
922                                 /* we might have unknowns as argument for the phi */
923                                 if (!arch_irn_consider_in_reg_alloc(cls, node))
924                                         continue;
925                         }
926
927                         /* check if node is in a register at end of pred */
928                         pred_loc = workset_contains(pred_end_workset, node);
929                         if (pred_loc != NULL) {
930                                 /* we might have to spill value on this path */
931                                 if (move_spills && !pred_loc->spilled && l->spilled) {
932                                         ir_node *insert_point
933                                                 = be_get_end_of_block_insertion_point(pred);
934                                         insert_point = sched_prev(insert_point);
935                                         DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
936                                             insert_point));
937                                         be_add_spill(senv, node, insert_point);
938                                 }
939                         } else {
940                                 /* node is not in register at the end of pred -> reload it */
941                                 DB((dbg, DBG_FIX, "    reload %+F\n", node));
942                                 DB((dbg, DBG_SPILL, "Reload %+F before %+F,%d\n", node, block, i));
943                                 be_add_reload_on_edge(senv, node, block, i, cls, 1);
944                         }
945                 }
946         }
947 }
948
949 static void be_spill_belady(ir_graph *irg, const arch_register_class_t *rcls)
950 {
951         int i;
952
953         be_liveness_assure_sets(be_assure_liveness(irg));
954
955         stat_ev_tim_push();
956         /* construct control flow loop tree */
957         if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
958                 construct_cf_backedges(irg);
959         }
960         stat_ev_tim_pop("belady_time_backedges");
961
962         stat_ev_tim_push();
963         be_clear_links(irg);
964         stat_ev_tim_pop("belady_time_clear_links");
965
966         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
967
968         /* init belady env */
969         stat_ev_tim_push();
970         obstack_init(&obst);
971         cls       = rcls;
972         lv        = be_get_irg_liveness(irg);
973         n_regs    = be_get_n_allocatable_regs(irg, cls);
974         ws        = new_workset();
975         uses      = be_begin_uses(irg, lv);
976         loop_ana  = be_new_loop_pressure(irg, cls);
977         senv      = be_new_spill_env(irg);
978         blocklist = be_get_cfgpostorder(irg);
979         stat_ev_tim_pop("belady_time_init");
980
981         stat_ev_tim_push();
982         /* walk blocks in reverse postorder */
983         for (i = ARR_LEN(blocklist) - 1; i >= 0; --i) {
984                 process_block(blocklist[i]);
985         }
986         DEL_ARR_F(blocklist);
987         stat_ev_tim_pop("belady_time_belady");
988
989         stat_ev_tim_push();
990         /* belady was block-local, fix the global flow by adding reloads on the
991          * edges */
992         irg_block_walk_graph(irg, fix_block_borders, NULL, NULL);
993         stat_ev_tim_pop("belady_time_fix_borders");
994
995         ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
996
997         /* Insert spill/reload nodes into the graph and fix usages */
998         be_insert_spills_reloads(senv);
999
1000         /* clean up */
1001         be_delete_spill_env(senv);
1002         be_end_uses(uses);
1003         be_free_loop_pressure(loop_ana);
1004         obstack_free(&obst, NULL);
1005 }
1006
1007 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady);
1008 void be_init_spillbelady(void)
1009 {
1010         static be_spiller_t belady_spiller = {
1011                 be_spill_belady
1012         };
1013         lc_opt_entry_t *be_grp       = lc_opt_get_grp(firm_opt_get_root(), "be");
1014         lc_opt_entry_t *belady_group = lc_opt_get_grp(be_grp, "belady");
1015         lc_opt_add_table(belady_group, options);
1016
1017         be_register_spiller("belady", &belady_spiller);
1018         FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
1019 }