050cefb501661ad7c8d564d23c8a45f48b56ab2e
[libfirm] / ir / be / bespillbelady.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Beladys spillalgorithm.
23  * @author      Daniel Grund, Matthias Braun
24  * @date        20.09.2005
25  * @version     $Id$
26  */
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include <stdbool.h>
32
33 #include "obst.h"
34 #include "irprintf_t.h"
35 #include "irgraph.h"
36 #include "irnode.h"
37 #include "irmode.h"
38 #include "irgwalk.h"
39 #include "irloop.h"
40 #include "iredges_t.h"
41 #include "ircons_t.h"
42 #include "irprintf.h"
43 #include "irnodeset.h"
44 #include "xmalloc.h"
45 #include "pdeq.h"
46
47 #include "beutil.h"
48 #include "bearch_t.h"
49 #include "beuses.h"
50 #include "besched_t.h"
51 #include "beirgmod.h"
52 #include "belive_t.h"
53 #include "benode_t.h"
54 #include "bechordal_t.h"
55 #include "bespilloptions.h"
56 #include "beloopana.h"
57 #include "beirg_t.h"
58 #include "bespill.h"
59 #include "bemodule.h"
60
61 #define DBG_SPILL     1
62 #define DBG_WSETS     2
63 #define DBG_FIX       4
64 #define DBG_DECIDE    8
65 #define DBG_START    16
66 #define DBG_SLOTS    32
67 #define DBG_TRACE    64
68 #define DBG_WORKSET 128
69 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
70
71 #define TIME_UNDEFINED 6666
72
73 //#define LOOK_AT_LOOPDEPTH
74
75 /**
76  * An association between a node and a point in time.
77  */
78 typedef struct loc_t {
79         ir_node          *node;
80         unsigned          time;     /**< A use time (see beuses.h). */
81         bool              spilled;  /**< the value was already spilled on this path */
82 } loc_t;
83
84 typedef struct _workset_t {
85         int   len;          /**< current length */
86         loc_t vals[0];      /**< inlined array of the values/distances in this working set */
87 } workset_t;
88
89 static struct obstack               obst;
90 static const arch_env_t            *arch_env;
91 static const arch_register_class_t *cls;
92 static const be_lv_t               *lv;
93 static be_loopana_t                *loop_ana;
94 static int                          n_regs;
95 static workset_t                   *ws;     /**< the main workset used while
96                                                      processing a block. */
97 static be_uses_t                   *uses;   /**< env for the next-use magic */
98 static ir_node                     *instr;  /**< current instruction */
99 static unsigned                     instr_nr; /**< current instruction number
100                                                        (relative to block start) */
101 static spill_env_t                 *senv;   /**< see bespill.h */
102 static pdeq                        *worklist;
103
104 static bool                         move_spills      = true;
105 static bool                         respectloopdepth = true;
106 static bool                         improve_known_preds = true;
107 /* factor to weight the different costs of reloading/rematerializing a node
108    (see bespill.h be_get_reload_costs_no_weight) */
109 static int                          remat_bonus      = 10;
110
111 static const lc_opt_table_entry_t options[] = {
112         LC_OPT_ENT_BOOL   ("movespills", "try to move spills out of loops", &move_spills),
113         LC_OPT_ENT_BOOL   ("respectloopdepth", "exprimental (outermost loop cutting)", &respectloopdepth),
114         LC_OPT_ENT_BOOL   ("improveknownpreds", "experimental (known preds cutting)", &improve_known_preds),
115         LC_OPT_ENT_INT    ("rematbonus", "give bonus to rematerialisable nodes", &remat_bonus),
116         LC_OPT_LAST
117 };
118
119 static int loc_compare(const void *a, const void *b)
120 {
121         const loc_t *p = a;
122         const loc_t *q = b;
123         return p->time - q->time;
124 }
125
126 void workset_print(const workset_t *w)
127 {
128         int i;
129
130         for(i = 0; i < w->len; ++i) {
131                 ir_fprintf(stderr, "%+F %d\n", w->vals[i].node, w->vals[i].time);
132         }
133 }
134
135 /**
136  * Alloc a new workset on obstack @p ob with maximum size @p max
137  */
138 static workset_t *new_workset(void)
139 {
140         workset_t *res;
141         size_t     size = sizeof(*res) + n_regs * sizeof(res->vals[0]);
142
143         res  = obstack_alloc(&obst, size);
144         memset(res, 0, size);
145         return res;
146 }
147
148 /**
149  * Alloc a new instance on obstack and make it equal to @param workset
150  */
151 static workset_t *workset_clone(workset_t *workset)
152 {
153         workset_t *res;
154         size_t size = sizeof(*res) + n_regs * sizeof(res->vals[0]);
155         res = obstack_alloc(&obst, size);
156         memcpy(res, workset, size);
157         return res;
158 }
159
160 /**
161  * Copy workset @param src to @param tgt
162  */
163 static void workset_copy(workset_t *dest, const workset_t *src)
164 {
165         size_t size = sizeof(*src) + n_regs * sizeof(src->vals[0]);
166         memcpy(dest, src, size);
167 }
168
169 /**
170  * Overwrites the current content array of @param ws with the
171  * @param count locations given at memory @param locs.
172  * Set the length of @param ws to count.
173  */
174 static void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs)
175 {
176         workset->len = count;
177         memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
178 }
179
180 /**
181  * Inserts the value @p val into the workset, iff it is not
182  * already contained. The workset must not be full.
183  */
184 static void workset_insert(workset_t *workset, ir_node *val, bool spilled)
185 {
186         loc_t *loc;
187         int    i;
188         /* check for current regclass */
189         assert(arch_irn_consider_in_reg_alloc(arch_env, cls, val));
190
191         /* check if val is already contained */
192         for (i = 0; i < workset->len; ++i) {
193                 loc = &workset->vals[i];
194                 if (loc->node == val) {
195                         if (spilled) {
196                                 loc->spilled = true;
197                         }
198                         return;
199                 }
200         }
201
202         /* insert val */
203         assert(workset->len < n_regs && "Workset already full!");
204         loc           = &workset->vals[workset->len];
205         loc->node     = val;
206         loc->spilled  = spilled;
207         loc->time     = TIME_UNDEFINED;
208         workset->len++;
209 }
210
211 /**
212  * Removes all entries from this workset
213  */
214 static void workset_clear(workset_t *workset)
215 {
216         workset->len = 0;
217 }
218
219 /**
220  * Removes the value @p val from the workset if present.
221  */
222 static INLINE void workset_remove(workset_t *workset, ir_node *val)
223 {
224         int i;
225         for(i = 0; i < workset->len; ++i) {
226                 if (workset->vals[i].node == val) {
227                         workset->vals[i] = workset->vals[--workset->len];
228                         return;
229                 }
230         }
231 }
232
233 static INLINE const loc_t *workset_contains(const workset_t *ws,
234                                             const ir_node *val)
235 {
236         int i;
237
238         for (i = 0; i < ws->len; ++i) {
239                 if (ws->vals[i].node == val)
240                         return &ws->vals[i];
241         }
242
243         return NULL;
244 }
245
246 /**
247  * Iterates over all values in the working set.
248  * @p ws The workset to iterate
249  * @p v  A variable to put the current value in
250  * @p i  An integer for internal use
251  */
252 #define workset_foreach(ws, v, i)       for(i=0; \
253                                                                                 v=(i < ws->len) ? ws->vals[i].node : NULL, i < ws->len; \
254                                                                                 ++i)
255
256 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
257 #define workset_get_time(ws, i) (ws)->vals[i].time
258 #define workset_set_length(ws, length) (ws)->len = length
259 #define workset_get_length(ws) ((ws)->len)
260 #define workset_get_val(ws, i) ((ws)->vals[i].node)
261 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
262
263 typedef struct _block_info_t
264 {
265         workset_t *start_workset;
266         workset_t *end_workset;
267 } block_info_t;
268
269
270 static void *new_block_info(void)
271 {
272         block_info_t *res = obstack_alloc(&obst, sizeof(res[0]));
273         memset(res, 0, sizeof(res[0]));
274
275         return res;
276 }
277
278 #define get_block_info(block)        ((block_info_t *)get_irn_link(block))
279 #define set_block_info(block, info)  set_irn_link(block, info)
280
281 /**
282  * @return The distance to the next use or 0 if irn has dont_spill flag set
283  */
284 static INLINE unsigned get_distance(ir_node *from, unsigned from_step,
285                                     const ir_node *def, int skip_from_uses)
286 {
287         be_next_use_t use;
288         int           flags = arch_irn_get_flags(arch_env, def);
289         unsigned      costs;
290         unsigned      time;
291
292         assert(! (flags & arch_irn_flags_ignore));
293
294         use  = be_get_next_use(uses, from, from_step, def, skip_from_uses);
295         time = use.time;
296         if (USES_IS_INFINITE(time))
297                 return USES_INFINITY;
298
299         /* We have to keep nonspillable nodes in the workingset */
300         if (flags & arch_irn_flags_dont_spill)
301                 return 0;
302
303         /* give some bonus to rematerialisable nodes */
304         if (remat_bonus > 0) {
305                 costs = be_get_reload_costs_no_weight(senv, def, use.before);
306                 assert(costs * remat_bonus < 1000);
307                 time  += 1000 - (costs * remat_bonus);
308         }
309
310         return time;
311 }
312
313 /**
314  * Performs the actions necessary to grant the request that:
315  * - new_vals can be held in registers
316  * - as few as possible other values are disposed
317  * - the worst values get disposed
318  *
319  * @p is_usage indicates that the values in new_vals are used (not defined)
320  * In this case reloads must be performed
321  */
322 static void displace(workset_t *new_vals, int is_usage)
323 {
324         ir_node **to_insert = alloca(n_regs * sizeof(to_insert[0]));
325         bool     *spilled   = alloca(n_regs * sizeof(spilled[0]));
326         ir_node  *val;
327         int       i;
328         int       len;
329         int       spills_needed;
330         int       demand;
331         int       iter;
332
333         /* 1. Identify the number of needed slots and the values to reload */
334         demand = 0;
335         workset_foreach(new_vals, val, iter) {
336                 bool reloaded = false;
337
338                 if (! workset_contains(ws, val)) {
339                         DB((dbg, DBG_DECIDE, "    insert %+F\n", val));
340                         if (is_usage) {
341                                 DB((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, instr));
342                                 be_add_reload(senv, val, instr, cls, 1);
343                                 reloaded = true;
344                         }
345                 } else {
346                         DB((dbg, DBG_DECIDE, "    %+F already in workset\n", val));
347                         assert(is_usage);
348                         /* remove the value from the current workset so it is not accidently
349                          * spilled */
350                         workset_remove(ws, val);
351                 }
352                 spilled[demand]   = reloaded;
353                 to_insert[demand] = val;
354                 ++demand;
355         }
356
357         /* 2. Make room for at least 'demand' slots */
358         len           = workset_get_length(ws);
359         spills_needed = len + demand - n_regs;
360         assert(spills_needed <= len);
361
362         /* Only make more free room if we do not have enough */
363         if (spills_needed > 0) {
364                 ir_node   *curr_bb  = NULL;
365                 workset_t *ws_start = NULL;
366
367                 if (move_spills) {
368                         curr_bb  = get_nodes_block(instr);
369                         ws_start = get_block_info(curr_bb)->start_workset;
370                 }
371
372                 DB((dbg, DBG_DECIDE, "    disposing %d values\n", spills_needed));
373
374                 /* calculate current next-use distance for live values */
375                 for (i = 0; i < len; ++i) {
376                         ir_node  *val  = workset_get_val(ws, i);
377                         unsigned  dist = get_distance(instr, instr_nr, val, !is_usage);
378                         workset_set_time(ws, i, dist);
379                 }
380
381                 /* sort entries by increasing nextuse-distance*/
382                 workset_sort(ws);
383
384                 for (i = len - spills_needed; i < len; ++i) {
385                         ir_node *val = ws->vals[i].node;
386
387                         DB((dbg, DBG_DECIDE, "    disposing node %+F (%u)\n", val,
388                              workset_get_time(ws, i)));
389
390                         if (move_spills) {
391                                 if (!USES_IS_INFINITE(ws->vals[i].time)
392                                                 && !ws->vals[i].spilled) {
393                                         ir_node *after_pos = sched_prev(instr);
394                                         DB((dbg, DBG_DECIDE, "Spill %+F after node %+F\n", val,
395                                                 after_pos));
396                                         be_add_spill(senv, val, after_pos);
397                                 }
398                         }
399                 }
400
401                 /* kill the last 'demand' entries in the array */
402                 workset_set_length(ws, len - spills_needed);
403         }
404
405         /* 3. Insert the new values into the workset */
406         for (i = 0; i < demand; ++i) {
407                 ir_node *val = to_insert[i];
408
409                 workset_insert(ws, val, spilled[i]);
410         }
411 }
412
413 enum {
414         AVAILABLE_EVERYWHERE,
415         AVAILABLE_NOWHERE,
416         AVAILABLE_PARTLY,
417         AVAILABLE_UNKNOWN
418 };
419
420 static unsigned available_in_all_preds(workset_t* const* pred_worksets,
421                                        size_t n_pred_worksets,
422                                        const ir_node *value, bool is_local_phi)
423 {
424         size_t i;
425         bool   avail_everywhere = true;
426         bool   avail_nowhere    = true;
427
428         assert(n_pred_worksets > 0);
429
430         /* value available in all preds? */
431         for (i = 0; i < n_pred_worksets; ++i) {
432                 bool             found     = false;
433                 const workset_t *p_workset = pred_worksets[i];
434                 int              p_len     = workset_get_length(p_workset);
435                 int              p_i;
436                 const ir_node   *l_value;
437
438                 if (is_local_phi) {
439                         assert(is_Phi(value));
440                         l_value = get_irn_n(value, i);
441                 } else {
442                         l_value = value;
443                 }
444
445                 for (p_i = 0; p_i < p_len; ++p_i) {
446                         const loc_t *p_l = &p_workset->vals[p_i];
447                         if (p_l->node != l_value)
448                                 continue;
449
450                         found = true;
451                         break;
452                 }
453
454                 if (found) {
455                         avail_nowhere = false;
456                 } else {
457                         avail_everywhere = false;
458                 }
459         }
460
461         if (avail_everywhere) {
462                 assert(!avail_nowhere);
463                 return AVAILABLE_EVERYWHERE;
464         } else if (avail_nowhere) {
465                 return AVAILABLE_NOWHERE;
466         } else {
467                 return AVAILABLE_PARTLY;
468         }
469 }
470
471 /** Decides whether a specific node should be in the start workset or not
472  *
473  * @param env      belady environment
474  * @param first
475  * @param node     the node to test
476  * @param loop     the loop of the node
477  */
478 static loc_t to_take_or_not_to_take(ir_node* first, ir_node *node,
479                                     ir_loop *loop, unsigned available)
480 {
481         be_next_use_t next_use;
482         loc_t         loc;
483
484         loc.time    = USES_INFINITY;
485         loc.node    = node;
486         loc.spilled = false;
487
488         if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
489                 loc.time = USES_INFINITY;
490                 return loc;
491         }
492
493         /* We have to keep nonspillable nodes in the workingset */
494         if (arch_irn_get_flags(arch_env, node) & arch_irn_flags_dont_spill) {
495                 loc.time = 0;
496                 DB((dbg, DBG_START, "    %+F taken (dontspill node)\n", node, loc.time));
497                 return loc;
498         }
499
500         next_use = be_get_next_use(uses, first, 0, node, 0);
501         if (USES_IS_INFINITE(next_use.time)) {
502                 // the nodes marked as live in shouldn't be dead, so it must be a phi
503                 assert(is_Phi(node));
504                 loc.time = USES_INFINITY;
505                 DB((dbg, DBG_START, "    %+F not taken (dead)\n", node));
506                 return loc;
507         }
508
509         loc.time = next_use.time;
510
511         if (improve_known_preds) {
512                 if (available == AVAILABLE_EVERYWHERE) {
513                         DB((dbg, DBG_START, "    %+F taken (%u, live in all preds)\n",
514                             node, loc.time));
515                         return loc;
516                 } else if(available == AVAILABLE_NOWHERE) {
517                         DB((dbg, DBG_START, "    %+F not taken (%u, live in no pred)\n",
518                             node, loc.time));
519                         loc.time = USES_INFINITY;
520                         return loc;
521                 }
522         }
523
524         if (!respectloopdepth || next_use.outermost_loop >= get_loop_depth(loop)) {
525                 DB((dbg, DBG_START, "    %+F taken (%u, loop %d)\n", node, loc.time,
526                     next_use.outermost_loop));
527         } else {
528                 loc.time = USES_PENDING;
529                 DB((dbg, DBG_START, "    %+F delayed (outerdepth %d < loopdepth %d)\n",
530                     node, next_use.outermost_loop, get_loop_depth(loop)));
531         }
532
533         return loc;
534 }
535
536 /**
537  * Computes the start-workset for a block with multiple predecessors. We assume
538  * that at least 1 of the predeccesors is a back-edge which means we're at the
539  * beginning of a loop. We try to reload as much values as possible now so they
540  * don't get reloaded inside the loop.
541  */
542 static void decide_start_workset(const ir_node *block)
543 {
544         ir_loop    *loop = get_irn_loop(block);
545         ir_node    *first;
546         ir_node    *node;
547         loc_t       loc;
548         loc_t      *starters;
549         loc_t      *delayed;
550         int         i, len, ws_count;
551         int             free_slots, free_pressure_slots;
552         unsigned    pressure;
553         int         arity;
554         workset_t **pred_worksets;
555         bool        all_preds_known;
556
557         /* check predecessors */
558         arity           = get_irn_arity(block);
559         pred_worksets   = alloca(sizeof(pred_worksets[0]) * arity);
560         all_preds_known = true;
561         for(i = 0; i < arity; ++i) {
562                 ir_node      *pred_block = get_Block_cfgpred_block(block, i);
563                 block_info_t *pred_info  = get_block_info(pred_block);
564
565                 if (pred_info == NULL) {
566                         pred_worksets[i] = NULL;
567                         all_preds_known  = false;
568                 } else {
569                         pred_worksets[i] = pred_info->end_workset;
570                 }
571         }
572
573         /* Collect all values living at start of block */
574         starters = NEW_ARR_F(loc_t, 0);
575         delayed  = NEW_ARR_F(loc_t, 0);
576
577         DB((dbg, DBG_START, "Living at start of %+F:\n", block));
578         first = sched_first(block);
579
580         /* check all Phis first */
581         sched_foreach(block, node) {
582                 unsigned available;
583
584                 if (! is_Phi(node))
585                         break;
586                 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
587                         continue;
588
589                 if (all_preds_known) {
590                         available = available_in_all_preds(pred_worksets, arity, node, true);
591                 } else {
592                         available = AVAILABLE_UNKNOWN;
593                 }
594
595                 loc = to_take_or_not_to_take(first, node, loop, available);
596
597                 if (! USES_IS_INFINITE(loc.time)) {
598                         if (USES_IS_PENDING(loc.time))
599                                 ARR_APP1(loc_t, delayed, loc);
600                         else
601                                 ARR_APP1(loc_t, starters, loc);
602                 } else {
603                         be_spill_phi(senv, node);
604                 }
605         }
606
607         /* check all Live-Ins */
608         be_lv_foreach(lv, block, be_lv_state_in, i) {
609                 ir_node *node = be_lv_get_irn(lv, block, i);
610                 unsigned available;
611
612                 if (all_preds_known) {
613                         available = available_in_all_preds(pred_worksets, arity, node, false);
614                 } else {
615                         available = AVAILABLE_UNKNOWN;
616                 }
617
618                 loc = to_take_or_not_to_take(first, node, loop, available);
619
620                 if (! USES_IS_INFINITE(loc.time)) {
621                         if (USES_IS_PENDING(loc.time))
622                                 ARR_APP1(loc_t, delayed, loc);
623                         else
624                                 ARR_APP1(loc_t, starters, loc);
625                 }
626         }
627
628         pressure            = be_get_loop_pressure(loop_ana, cls, loop);
629         assert(ARR_LEN(delayed) <= (signed)pressure);
630         free_slots          = n_regs - ARR_LEN(starters);
631         free_pressure_slots = n_regs - (pressure - ARR_LEN(delayed));
632         free_slots          = MIN(free_slots, free_pressure_slots);
633
634         /* so far we only put nodes into the starters list that are used inside
635          * the loop. If register pressure in the loop is low then we can take some
636          * values and let them live through the loop */
637         DB((dbg, DBG_START, "Loop pressure %d, taking %d delayed vals\n",
638             pressure, free_slots));
639         if (free_slots > 0) {
640                 qsort(delayed, ARR_LEN(delayed), sizeof(delayed[0]), loc_compare);
641
642                 for (i = 0; i < ARR_LEN(delayed) && free_slots > 0; ++i) {
643                         int    p, arity;
644                         loc_t *loc = & delayed[i];
645
646                         /* don't use values which are dead in a known predecessors
647                          * to not induce unnecessary reloads */
648                         arity = get_irn_arity(block);
649                         for (p = 0; p < arity; ++p) {
650                                 ir_node      *pred_block = get_Block_cfgpred_block(block, p);
651                                 block_info_t *pred_info  = get_block_info(pred_block);
652
653                                 if (pred_info == NULL)
654                                         continue;
655
656                                 if (!workset_contains(pred_info->end_workset, loc->node)) {
657                                         DB((dbg, DBG_START,
658                                             "    delayed %+F not live at pred %+F\n", loc->node,
659                                             pred_block));
660                                         goto skip_delayed;
661                                 }
662                         }
663
664                         DB((dbg, DBG_START, "    delayed %+F taken\n", loc->node));
665                         ARR_APP1(loc_t, starters, *loc);
666                         loc->node = NULL;
667                         --free_slots;
668                 skip_delayed:
669                         ;
670                 }
671         }
672
673         /* spill phis (the actual phis not just their values) that are in this block
674          * but not in the start workset */
675         for (i = ARR_LEN(delayed) - 1; i >= 0; --i) {
676                 ir_node *node = delayed[i].node;
677                 if (node == NULL || !is_Phi(node) || get_nodes_block(node) != block)
678                         continue;
679
680                 DB((dbg, DBG_START, "    spilling delayed phi %+F\n", node));
681                 be_spill_phi(senv, node);
682         }
683         DEL_ARR_F(delayed);
684
685         /* Sort start values by first use */
686         qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
687
688         /* Copy the best ones from starters to start workset */
689         ws_count = MIN(ARR_LEN(starters), n_regs);
690         workset_clear(ws);
691         workset_bulk_fill(ws, ws_count, starters);
692
693         /* spill phis (the actual phis not just their values) that are in this block
694          * but not in the start workset */
695         len = ARR_LEN(starters);
696         for (i = ws_count; i < len; ++i) {
697                 ir_node *node = starters[i].node;
698                 if (! is_Phi(node) || get_nodes_block(node) != block)
699                         continue;
700
701                 DB((dbg, DBG_START, "    spilling phi %+F\n", node));
702                 be_spill_phi(senv, node);
703         }
704
705         DEL_ARR_F(starters);
706
707         /* determine spill status of the values: If there's 1 pred block (which
708          * is no backedge) where the value is spilled then we must set it to
709          * spilled here. */
710         for(i = 0; i < ws_count; ++i) {
711                 loc_t   *loc     = &ws->vals[i];
712                 ir_node *value   = loc->node;
713                 bool     spilled;
714                 int      n;
715
716                 /* phis from this block aren't spilled */
717                 if (get_nodes_block(value) == block) {
718                         assert(is_Phi(value));
719                         loc->spilled = false;
720                         continue;
721                 }
722
723                 /* determine if value was spilled on any predecessor */
724                 spilled = false;
725                 for(n = 0; n < arity; ++n) {
726                         workset_t *pred_workset = pred_worksets[n];
727                         int        p_len;
728                         int        p;
729
730                         if (pred_workset == NULL)
731                                 continue;
732
733                         p_len = workset_get_length(pred_workset);
734                         for(p = 0; p < p_len; ++p) {
735                                 loc_t *l = &pred_workset->vals[p];
736
737                                 if (l->node != value)
738                                         continue;
739
740                                 if (l->spilled) {
741                                         spilled = true;
742                                 }
743                                 break;
744                         }
745                 }
746
747                 loc->spilled = spilled;
748         }
749 }
750
751 /**
752  * For the given block @p block, decide for each values
753  * whether it is used from a register or is reloaded
754  * before the use.
755  */
756 static void belady(ir_node *block)
757 {
758         workset_t       *new_vals;
759         ir_node         *irn;
760         int              iter;
761         block_info_t    *block_info;
762         int              i, arity;
763         int              has_backedges = 0;
764         //int              first         = 0;
765         const ir_edge_t *edge;
766
767         /* no need to process a block twice */
768         if (get_block_info(block) != NULL) {
769                 return;
770         }
771
772         /* check if all predecessor blocks are processed yet (though for backedges
773          * we have to make an exception as we can't process them first) */
774         arity = get_Block_n_cfgpreds(block);
775         for(i = 0; i < arity; ++i) {
776                 ir_node      *pred_block = get_Block_cfgpred_block(block, i);
777                 block_info_t *pred_info  = get_block_info(pred_block);
778
779                 if (pred_info == NULL) {
780                         /* process predecessor first (it will be in the queue already) */
781                         if (!is_backedge(block, i)) {
782                                 return;
783                         }
784                         has_backedges = 1;
785                 }
786         }
787         (void) has_backedges;
788         if (arity == 0) {
789                 workset_clear(ws);
790         } else if (arity == 1) {
791                 ir_node      *pred_block = get_Block_cfgpred_block(block, 0);
792                 block_info_t *pred_info  = get_block_info(pred_block);
793
794                 assert(pred_info != NULL);
795                 workset_copy(ws, pred_info->end_workset);
796         } else {
797                 /* we need 2 heuristics here, for the case when all predecessor blocks
798                  * are known and when some are backedges (and therefore can't be known
799                  * yet) */
800                 decide_start_workset(block);
801         }
802
803         DB((dbg, DBG_DECIDE, "\n"));
804         DB((dbg, DBG_DECIDE, "Decide for %+F\n", block));
805
806         block_info = new_block_info();
807         set_block_info(block, block_info);
808
809         DB((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
810         workset_foreach(ws, irn, iter) {
811                 DB((dbg, DBG_WSETS, "  %+F (%u)\n", irn,
812                      workset_get_time(ws, iter)));
813         }
814
815         block_info->start_workset = workset_clone(ws);
816
817         /* process the block from start to end */
818         DB((dbg, DBG_WSETS, "Processing...\n"));
819         instr_nr = 0;
820         /* TODO: this leaks (into the obstack)... */
821         new_vals = new_workset();
822
823         sched_foreach(block, irn) {
824                 int i, arity;
825                 assert(workset_get_length(ws) <= n_regs);
826
827                 /* Phis are no real instr (see insert_starters()) */
828                 if (is_Phi(irn)) {
829                         continue;
830                 }
831                 DB((dbg, DBG_DECIDE, "  ...%+F\n", irn));
832
833                 /* set instruction in the workset */
834                 instr = irn;
835
836                 /* allocate all values _used_ by this instruction */
837                 workset_clear(new_vals);
838                 for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
839                         ir_node *in = get_irn_n(irn, i);
840                         if (!arch_irn_consider_in_reg_alloc(arch_env, cls, in))
841                                 continue;
842
843                         /* (note that "spilled" is irrelevant here) */
844                         workset_insert(new_vals, in, false);
845                 }
846                 displace(new_vals, 1);
847
848                 /* allocate all values _defined_ by this instruction */
849                 workset_clear(new_vals);
850                 if (get_irn_mode(irn) == mode_T) {
851                         const ir_edge_t *edge;
852
853                         foreach_out_edge(irn, edge) {
854                                 ir_node *proj = get_edge_src_irn(edge);
855                                 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, proj))
856                                         continue;
857                                 workset_insert(new_vals, proj, false);
858                         }
859                 } else {
860                         if (!arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
861                                 continue;
862                         workset_insert(new_vals, irn, false);
863                 }
864                 displace(new_vals, 0);
865
866                 instr_nr++;
867         }
868
869         /* Remember end-workset for this block */
870         block_info->end_workset = workset_clone(ws);
871         DB((dbg, DBG_WSETS, "End workset for %+F:\n", block));
872         workset_foreach(ws, irn, iter)
873                 DB((dbg, DBG_WSETS, "  %+F (%u)\n", irn,
874                      workset_get_time(ws, iter)));
875
876         /* add successor blocks into worklist */
877         foreach_block_succ(block, edge) {
878                 ir_node *succ = get_edge_src_irn(edge);
879                 pdeq_putr(worklist, succ);
880         }
881 }
882
883 /**
884  * 'decide' is block-local and makes assumptions
885  * about the set of live-ins. Thus we must adapt the
886  * live-outs to the live-ins at each block-border.
887  */
888 static void fix_block_borders(ir_node *block, void *data)
889 {
890         workset_t    *start_workset;
891         int           arity;
892         int           i;
893         int           iter;
894         (void) data;
895
896         DB((dbg, DBG_FIX, "\n"));
897         DB((dbg, DBG_FIX, "Fixing %+F\n", block));
898
899         start_workset = get_block_info(block)->start_workset;
900
901         /* process all pred blocks */
902         arity = get_irn_arity(block);
903         for (i = 0; i < arity; ++i) {
904                 ir_node   *pred = get_Block_cfgpred_block(block, i);
905                 workset_t *pred_end_workset = get_block_info(pred)->end_workset;
906                 ir_node   *node;
907
908                 DB((dbg, DBG_FIX, "  Pred %+F\n", pred));
909
910                 /* spill all values not used anymore */
911                 workset_foreach(pred_end_workset, node, iter) {
912                         ir_node *n2;
913                         int      iter2;
914                         bool     found = false;
915                         workset_foreach(start_workset, n2, iter2) {
916                                 if (n2 == node) {
917                                         found = true;
918                                         break;
919                                 }
920                                 /* note that we do not look at phi inputs, becuase the values
921                                  * will be either live-end and need no spill or
922                                  * they have other users in which must be somewhere else in the
923                                  * workset */
924                         }
925
926                         if (found)
927                                 continue;
928
929                         if (move_spills && be_is_live_in(lv, block, node)
930                                         && !pred_end_workset->vals[iter].spilled) {
931                                 ir_node *insert_point;
932                                 if (arity > 1) {
933                                         insert_point = be_get_end_of_block_insertion_point(pred);
934                                         insert_point = sched_prev(insert_point);
935                                 } else {
936                                         insert_point = block;
937                                 }
938                                 DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
939                                      insert_point));
940                                 be_add_spill(senv, node, insert_point);
941                         }
942                 }
943
944                 /* reload missing values in predecessors, add missing spills */
945                 workset_foreach(start_workset, node, iter) {
946                         const loc_t *l    = &start_workset->vals[iter];
947                         const loc_t *pred_loc;
948
949                         /* if node is a phi of the current block we reload
950                          * the corresponding argument, else node itself */
951                         if (is_Phi(node) && get_nodes_block(node) == block) {
952                                 node = get_irn_n(node, i);
953                                 assert(!l->spilled);
954
955                                 /* we might have unknowns as argument for the phi */
956                                 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
957                                         continue;
958                         }
959
960                         /* check if node is in a register at end of pred */
961                         pred_loc = workset_contains(pred_end_workset, node);
962                         if (pred_loc != NULL) {
963                                 /* we might have to spill value on this path */
964                                 if (move_spills && !pred_loc->spilled && l->spilled) {
965                                         ir_node *insert_point
966                                                 = be_get_end_of_block_insertion_point(pred);
967                                         insert_point = sched_prev(insert_point);
968                                         DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
969                                             insert_point));
970                                         be_add_spill(senv, node, insert_point);
971                                 }
972                         } else {
973                                 /* node is not in register at the end of pred -> reload it */
974                                 DB((dbg, DBG_FIX, "    reload %+F\n", node));
975                                 DB((dbg, DBG_SPILL, "Reload %+F before %+F,%d\n", node, block, i));
976                                 be_add_reload_on_edge(senv, node, block, i, cls, 1);
977                         }
978                 }
979         }
980 }
981
982 static void be_spill_belady(be_irg_t *birg, const arch_register_class_t *rcls)
983 {
984         ir_graph *irg = be_get_birg_irg(birg);
985
986         be_liveness_assure_sets(be_assure_liveness(birg));
987
988         /* construct control flow loop tree */
989         if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
990                 construct_cf_backedges(irg);
991         }
992
993         be_clear_links(irg);
994
995         /* init belady env */
996         obstack_init(&obst);
997         arch_env = birg->main_env->arch_env;
998         cls      = rcls;
999         lv       = be_get_birg_liveness(birg);
1000         n_regs   = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
1001         ws       = new_workset();
1002         uses     = be_begin_uses(irg, lv);
1003         loop_ana = be_new_loop_pressure(birg);
1004         senv     = be_new_spill_env(birg);
1005         worklist = new_pdeq();
1006
1007         pdeq_putr(worklist, get_irg_start_block(irg));
1008
1009         while(!pdeq_empty(worklist)) {
1010                 ir_node *block = pdeq_getl(worklist);
1011                 belady(block);
1012         }
1013         /* end block might not be reachable in endless loops */
1014         belady(get_irg_end_block(irg));
1015
1016         del_pdeq(worklist);
1017
1018         /* belady was block-local, fix the global flow by adding reloads on the
1019          * edges */
1020         irg_block_walk_graph(irg, fix_block_borders, NULL, NULL);
1021
1022         /* Insert spill/reload nodes into the graph and fix usages */
1023         be_insert_spills_reloads(senv);
1024
1025         /* clean up */
1026         be_delete_spill_env(senv);
1027         be_end_uses(uses);
1028         be_free_loop_pressure(loop_ana);
1029         obstack_free(&obst, NULL);
1030 }
1031
1032 void be_init_spillbelady(void)
1033 {
1034         static be_spiller_t belady_spiller = {
1035                 be_spill_belady
1036         };
1037         lc_opt_entry_t *be_grp       = lc_opt_get_grp(firm_opt_get_root(), "be");
1038         lc_opt_entry_t *belady_group = lc_opt_get_grp(be_grp, "belady");
1039         lc_opt_add_table(belady_group, options);
1040
1041         be_register_spiller("belady", &belady_spiller);
1042         FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
1043 }
1044
1045 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady);