cleanup: Remove unnecessary #include from belive_t.h.
[libfirm] / ir / be / bespillbelady.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Beladys spillalgorithm.
23  * @author      Daniel Grund, Matthias Braun
24  * @date        20.09.2005
25  */
26 #include "config.h"
27
28 #include <stdbool.h>
29
30 #include "obst.h"
31 #include "irprintf_t.h"
32 #include "irgraph.h"
33 #include "irnode.h"
34 #include "irmode.h"
35 #include "irgwalk.h"
36 #include "irloop.h"
37 #include "iredges_t.h"
38 #include "ircons_t.h"
39 #include "irprintf.h"
40 #include "irnodeset.h"
41 #include "irtools.h"
42 #include "statev_t.h"
43 #include "util.h"
44
45 #include "beutil.h"
46 #include "bearch.h"
47 #include "beuses.h"
48 #include "besched.h"
49 #include "beirgmod.h"
50 #include "belive_t.h"
51 #include "benode.h"
52 #include "bechordal_t.h"
53 #include "bespill.h"
54 #include "beloopana.h"
55 #include "beirg.h"
56 #include "bespillutil.h"
57 #include "bemodule.h"
58
59 #define DBG_SPILL     1
60 #define DBG_WSETS     2
61 #define DBG_FIX       4
62 #define DBG_DECIDE    8
63 #define DBG_START    16
64 #define DBG_SLOTS    32
65 #define DBG_TRACE    64
66 #define DBG_WORKSET 128
67 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
68
69 #define TIME_UNDEFINED 6666
70
71 /**
72  * An association between a node and a point in time.
73  */
74 typedef struct loc_t {
75         ir_node          *node;
76         unsigned          time;     /**< A use time (see beuses.h). */
77         bool              spilled;  /**< value was already spilled on this path */
78 } loc_t;
79
80 typedef struct workset_t {
81         unsigned len;     /**< current length */
82         loc_t    vals[];  /**< array of the values/distances in this working set */
83 } workset_t;
84
85 static struct obstack               obst;
86 static const arch_register_class_t *cls;
87 static const be_lv_t               *lv;
88 static be_loopana_t                *loop_ana;
89 static unsigned                     n_regs;
90 static workset_t                   *ws;     /**< the main workset used while
91                                                      processing a block. */
92 static be_uses_t                   *uses;   /**< env for the next-use magic */
93 static ir_node                     *instr;  /**< current instruction */
94 static spill_env_t                 *senv;   /**< see bespill.h */
95 static ir_node                    **blocklist;
96
97 static int                          move_spills      = true;
98 static int                          respectloopdepth = true;
99 static int                          improve_known_preds = true;
100 /* factor to weight the different costs of reloading/rematerializing a node
101    (see bespill.h be_get_reload_costs_no_weight) */
102 static int                          remat_bonus      = 10;
103
104 static const lc_opt_table_entry_t options[] = {
105         LC_OPT_ENT_BOOL   ("movespills", "try to move spills out of loops", &move_spills),
106         LC_OPT_ENT_BOOL   ("respectloopdepth", "outermost loop cutting", &respectloopdepth),
107         LC_OPT_ENT_BOOL   ("improveknownpreds", "known preds cutting", &improve_known_preds),
108         LC_OPT_ENT_INT    ("rematbonus", "give bonus to rematerialisable nodes", &remat_bonus),
109         LC_OPT_LAST
110 };
111
112 /**
113  * Alloc a new workset on obstack @p ob with maximum size @p max
114  */
115 static workset_t *new_workset(void)
116 {
117         return OALLOCFZ(&obst, workset_t, vals, n_regs);
118 }
119
120 /**
121  * Alloc a new instance on obstack and make it equal to @param workset
122  */
123 static workset_t *workset_clone(workset_t *workset)
124 {
125         workset_t *res = OALLOCF(&obst, workset_t, vals, n_regs);
126         memcpy(res, workset, sizeof(*res) + n_regs * sizeof(res->vals[0]));
127         return res;
128 }
129
130 /**
131  * Copy workset @param src to @param tgt
132  */
133 static void workset_copy(workset_t *dest, const workset_t *src)
134 {
135         size_t size = sizeof(*src) + n_regs * sizeof(src->vals[0]);
136         memcpy(dest, src, size);
137 }
138
139 /**
140  * Overwrites the current content array of @param ws with the
141  * @param count locations given at memory @param locs.
142  * Set the length of @param ws to count.
143  */
144 static void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs)
145 {
146         workset->len = count;
147         memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
148 }
149
150 /**
151  * Inserts the value @p val into the workset, iff it is not
152  * already contained. The workset must not be full.
153  */
154 static void workset_insert(workset_t *workset, ir_node *val, bool spilled)
155 {
156         loc_t    *loc;
157         unsigned  i;
158         /* check for current regclass */
159         assert(arch_irn_consider_in_reg_alloc(cls, val));
160
161         /* check if val is already contained */
162         for (i = 0; i < workset->len; ++i) {
163                 loc = &workset->vals[i];
164                 if (loc->node == val) {
165                         if (spilled) {
166                                 loc->spilled = true;
167                         }
168                         return;
169                 }
170         }
171
172         /* insert val */
173         assert(workset->len < n_regs && "Workset already full!");
174         loc           = &workset->vals[workset->len];
175         loc->node     = val;
176         loc->spilled  = spilled;
177         loc->time     = TIME_UNDEFINED;
178         workset->len++;
179 }
180
181 /**
182  * Removes all entries from this workset
183  */
184 static void workset_clear(workset_t *workset)
185 {
186         workset->len = 0;
187 }
188
189 /**
190  * Removes the value @p val from the workset if present.
191  */
192 static void workset_remove(workset_t *workset, ir_node *val)
193 {
194         unsigned i;
195         for (i = 0; i < workset->len; ++i) {
196                 if (workset->vals[i].node == val) {
197                         workset->vals[i] = workset->vals[--workset->len];
198                         return;
199                 }
200         }
201 }
202
203 static const loc_t *workset_contains(const workset_t *ws, const ir_node *val)
204 {
205         unsigned i;
206         for (i = 0; i < ws->len; ++i) {
207                 if (ws->vals[i].node == val)
208                         return &ws->vals[i];
209         }
210
211         return NULL;
212 }
213
214 static int loc_compare(const void *a, const void *b)
215 {
216         const loc_t   *p  = ((const loc_t*) a);
217         const loc_t   *q  = ((const loc_t*) b);
218         const unsigned pt = p->time;
219         const unsigned qt = q->time;
220
221         if (pt < qt)
222                 return -1;
223         if (pt > qt)
224                 return 1;
225
226         return get_irn_node_nr(p->node) - get_irn_node_nr(q->node);
227 }
228
229 static void workset_sort(workset_t *workset)
230 {
231         qsort(workset->vals, workset->len, sizeof(workset->vals[0]), loc_compare);
232 }
233
234 static inline unsigned workset_get_time(const workset_t *workset, unsigned idx)
235 {
236         return workset->vals[idx].time;
237 }
238
239 static inline void workset_set_time(workset_t *workset, unsigned idx,
240                                     unsigned time)
241 {
242         workset->vals[idx].time = time;
243 }
244
245 static inline unsigned workset_get_length(const workset_t *workset)
246 {
247         return workset->len;
248 }
249
250 static inline void workset_set_length(workset_t *workset, unsigned len)
251 {
252         workset->len = len;
253 }
254
255 static inline ir_node *workset_get_val(const workset_t *workset, unsigned idx)
256 {
257         return workset->vals[idx].node;
258 }
259
260 /**
261  * Iterates over all values in the working set.
262  * @p ws The workset to iterate
263  * @p v  A variable to put the current value in
264  * @p i  An integer for internal use
265  */
266 #define workset_foreach(ws, v, i) \
267         for (i=0; v=(i < ws->len) ? ws->vals[i].node : NULL, i < ws->len; ++i)
268
269 typedef struct block_info_t {
270         workset_t *start_workset;
271         workset_t *end_workset;
272 } block_info_t;
273
274 static block_info_t *new_block_info(void)
275 {
276         return OALLOCZ(&obst, block_info_t);
277 }
278
279 static inline block_info_t *get_block_info(const ir_node *block)
280 {
281         return (block_info_t*)get_irn_link(block);
282 }
283
284 static inline void set_block_info(ir_node *block, block_info_t *info)
285 {
286         set_irn_link(block, info);
287 }
288
289 /**
290  * @return The distance to the next use or 0 if irn has dont_spill flag set
291  */
292 static unsigned get_distance(ir_node *from, const ir_node *def, int skip_from_uses)
293 {
294         be_next_use_t use;
295         unsigned      costs;
296         unsigned      time;
297
298         assert(!arch_irn_is_ignore(def));
299
300         use  = be_get_next_use(uses, from, def, skip_from_uses);
301         time = use.time;
302         if (USES_IS_INFINITE(time))
303                 return USES_INFINITY;
304
305         /* We have to keep nonspillable nodes in the workingset */
306         if (arch_get_irn_flags(skip_Proj_const(def)) & arch_irn_flags_dont_spill)
307                 return 0;
308
309         /* give some bonus to rematerialisable nodes */
310         if (remat_bonus > 0) {
311                 costs = be_get_reload_costs_no_weight(senv, def, use.before);
312                 assert(costs * remat_bonus < 1000);
313                 time  += 1000 - (costs * remat_bonus);
314         }
315
316         return time;
317 }
318
319 /**
320  * Performs the actions necessary to grant the request that:
321  * - new_vals can be held in registers
322  * - as few as possible other values are disposed
323  * - the worst values get disposed
324  *
325  * @p is_usage indicates that the values in new_vals are used (not defined)
326  * In this case reloads must be performed
327  */
328 static void displace(workset_t *new_vals, int is_usage)
329 {
330         ir_node **to_insert = ALLOCAN(ir_node*, n_regs);
331         bool     *spilled   = ALLOCAN(bool,     n_regs);
332         ir_node  *val;
333         int       i;
334         int       len;
335         int       spills_needed;
336         int       demand;
337         unsigned  iter;
338
339         /* 1. Identify the number of needed slots and the values to reload */
340         demand = 0;
341         workset_foreach(new_vals, val, iter) {
342                 bool reloaded = false;
343
344                 if (! workset_contains(ws, val)) {
345                         DB((dbg, DBG_DECIDE, "    insert %+F\n", val));
346                         if (is_usage) {
347                                 DB((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, instr));
348                                 be_add_reload(senv, val, instr, cls, 1);
349                                 reloaded = true;
350                         }
351                 } else {
352                         DB((dbg, DBG_DECIDE, "    %+F already in workset\n", val));
353                         assert(is_usage);
354                         /* remove the value from the current workset so it is not accidently
355                          * spilled */
356                         workset_remove(ws, val);
357                 }
358                 spilled[demand]   = reloaded;
359                 to_insert[demand] = val;
360                 ++demand;
361         }
362
363         /* 2. Make room for at least 'demand' slots */
364         len           = workset_get_length(ws);
365         spills_needed = len + demand - n_regs;
366         assert(spills_needed <= len);
367
368         /* Only make more free room if we do not have enough */
369         if (spills_needed > 0) {
370                 DB((dbg, DBG_DECIDE, "    disposing %d values\n", spills_needed));
371
372                 /* calculate current next-use distance for live values */
373                 for (i = 0; i < len; ++i) {
374                         ir_node  *val  = workset_get_val(ws, i);
375                         unsigned  dist = get_distance(instr, val, !is_usage);
376                         workset_set_time(ws, i, dist);
377                 }
378
379                 /* sort entries by increasing nextuse-distance*/
380                 workset_sort(ws);
381
382                 for (i = len - spills_needed; i < len; ++i) {
383                         ir_node *val = ws->vals[i].node;
384
385                         DB((dbg, DBG_DECIDE, "    disposing node %+F (%u)\n", val,
386                              workset_get_time(ws, i)));
387
388                         if (move_spills) {
389                                 if (!USES_IS_INFINITE(ws->vals[i].time)
390                                                 && !ws->vals[i].spilled) {
391                                         ir_node *after_pos = sched_prev(instr);
392                                         DB((dbg, DBG_DECIDE, "Spill %+F after node %+F\n", val,
393                                                 after_pos));
394                                         be_add_spill(senv, val, after_pos);
395                                 }
396                         }
397                 }
398
399                 /* kill the last 'demand' entries in the array */
400                 workset_set_length(ws, len - spills_needed);
401         }
402
403         /* 3. Insert the new values into the workset */
404         for (i = 0; i < demand; ++i) {
405                 ir_node *val = to_insert[i];
406
407                 workset_insert(ws, val, spilled[i]);
408         }
409 }
410
411 enum {
412         AVAILABLE_EVERYWHERE,
413         AVAILABLE_NOWHERE,
414         AVAILABLE_PARTLY,
415         AVAILABLE_UNKNOWN
416 };
417
418 static unsigned available_in_all_preds(workset_t* const* pred_worksets,
419                                        size_t n_pred_worksets,
420                                        const ir_node *value, bool is_local_phi)
421 {
422         size_t i;
423         bool   avail_everywhere = true;
424         bool   avail_nowhere    = true;
425
426         assert(n_pred_worksets > 0);
427
428         /* value available in all preds? */
429         for (i = 0; i < n_pred_worksets; ++i) {
430                 bool             found     = false;
431                 const workset_t *p_workset = pred_worksets[i];
432                 int              p_len     = workset_get_length(p_workset);
433                 int              p_i;
434                 const ir_node   *l_value;
435
436                 if (is_local_phi) {
437                         assert(is_Phi(value));
438                         l_value = get_irn_n(value, i);
439                 } else {
440                         l_value = value;
441                 }
442
443                 for (p_i = 0; p_i < p_len; ++p_i) {
444                         const loc_t *p_l = &p_workset->vals[p_i];
445                         if (p_l->node != l_value)
446                                 continue;
447
448                         found = true;
449                         break;
450                 }
451
452                 if (found) {
453                         avail_nowhere = false;
454                 } else {
455                         avail_everywhere = false;
456                 }
457         }
458
459         if (avail_everywhere) {
460                 assert(!avail_nowhere);
461                 return AVAILABLE_EVERYWHERE;
462         } else if (avail_nowhere) {
463                 return AVAILABLE_NOWHERE;
464         } else {
465                 return AVAILABLE_PARTLY;
466         }
467 }
468
469 /** Decides whether a specific node should be in the start workset or not
470  *
471  * @param env      belady environment
472  * @param first
473  * @param node     the node to test
474  * @param loop     the loop of the node
475  */
476 static loc_t to_take_or_not_to_take(ir_node* first, ir_node *node,
477                                     ir_loop *loop, unsigned available)
478 {
479         be_next_use_t next_use;
480         loc_t         loc;
481
482         loc.time    = USES_INFINITY;
483         loc.node    = node;
484         loc.spilled = false;
485
486         if (!arch_irn_consider_in_reg_alloc(cls, node)) {
487                 loc.time = USES_INFINITY;
488                 return loc;
489         }
490
491         /* We have to keep nonspillable nodes in the workingset */
492         if (arch_get_irn_flags(skip_Proj_const(node)) & arch_irn_flags_dont_spill) {
493                 loc.time = 0;
494                 DB((dbg, DBG_START, "    %+F taken (dontspill node)\n", node, loc.time));
495                 return loc;
496         }
497
498         next_use = be_get_next_use(uses, first, node, 0);
499         if (USES_IS_INFINITE(next_use.time)) {
500                 /* the nodes marked as live in shouldn't be dead, so it must be a phi */
501                 assert(is_Phi(node));
502                 loc.time = USES_INFINITY;
503                 DB((dbg, DBG_START, "    %+F not taken (dead)\n", node));
504                 return loc;
505         }
506
507         loc.time = next_use.time;
508
509         if (improve_known_preds) {
510                 if (available == AVAILABLE_EVERYWHERE) {
511                         DB((dbg, DBG_START, "    %+F taken (%u, live in all preds)\n",
512                             node, loc.time));
513                         return loc;
514                 } else if (available == AVAILABLE_NOWHERE) {
515                         DB((dbg, DBG_START, "    %+F not taken (%u, live in no pred)\n",
516                             node, loc.time));
517                         loc.time = USES_INFINITY;
518                         return loc;
519                 }
520         }
521
522         if (!respectloopdepth || next_use.outermost_loop >= get_loop_depth(loop)) {
523                 DB((dbg, DBG_START, "    %+F taken (%u, loop %d)\n", node, loc.time,
524                     next_use.outermost_loop));
525         } else {
526                 loc.time = USES_PENDING;
527                 DB((dbg, DBG_START, "    %+F delayed (outerdepth %d < loopdepth %d)\n",
528                     node, next_use.outermost_loop, get_loop_depth(loop)));
529         }
530
531         return loc;
532 }
533
534 /**
535  * Computes the start-workset for a block with multiple predecessors. We assume
536  * that at least 1 of the predeccesors is a back-edge which means we're at the
537  * beginning of a loop. We try to reload as much values as possible now so they
538  * don't get reloaded inside the loop.
539  */
540 static void decide_start_workset(const ir_node *block)
541 {
542         ir_loop    *loop = get_irn_loop(block);
543         ir_node    *first;
544         loc_t       loc;
545         loc_t      *starters;
546         loc_t      *delayed;
547         unsigned    len;
548         unsigned    i;
549         unsigned    ws_count;
550         int         free_slots, free_pressure_slots;
551         unsigned    pressure;
552         int         arity;
553         workset_t **pred_worksets;
554         bool        all_preds_known;
555
556         /* check predecessors */
557         arity           = get_irn_arity(block);
558         pred_worksets   = ALLOCAN(workset_t*, arity);
559         all_preds_known = true;
560         for (int in = 0; in < arity; ++in) {
561                 ir_node      *pred_block = get_Block_cfgpred_block(block, in);
562                 block_info_t *pred_info  = get_block_info(pred_block);
563
564                 if (pred_info == NULL) {
565                         pred_worksets[in] = NULL;
566                         all_preds_known   = false;
567                 } else {
568                         pred_worksets[in] = pred_info->end_workset;
569                 }
570         }
571
572         /* Collect all values living at start of block */
573         starters = NEW_ARR_F(loc_t, 0);
574         delayed  = NEW_ARR_F(loc_t, 0);
575
576         DB((dbg, DBG_START, "Living at start of %+F:\n", block));
577         first = sched_first(block);
578
579         /* check all Phis first */
580         sched_foreach(block, node) {
581                 unsigned available;
582
583                 if (! is_Phi(node))
584                         break;
585                 if (!arch_irn_consider_in_reg_alloc(cls, node))
586                         continue;
587
588                 if (all_preds_known) {
589                         available = available_in_all_preds(pred_worksets, arity, node, true);
590                 } else {
591                         available = AVAILABLE_UNKNOWN;
592                 }
593
594                 loc = to_take_or_not_to_take(first, node, loop, available);
595
596                 if (! USES_IS_INFINITE(loc.time)) {
597                         if (USES_IS_PENDING(loc.time))
598                                 ARR_APP1(loc_t, delayed, loc);
599                         else
600                                 ARR_APP1(loc_t, starters, loc);
601                 } else {
602                         be_spill_phi(senv, node);
603                 }
604         }
605
606         /* check all Live-Ins */
607         be_lv_foreach(lv, block, be_lv_state_in, node) {
608                 unsigned available;
609
610                 if (all_preds_known) {
611                         available = available_in_all_preds(pred_worksets, arity, node, false);
612                 } else {
613                         available = AVAILABLE_UNKNOWN;
614                 }
615
616                 loc = to_take_or_not_to_take(first, node, loop, available);
617
618                 if (! USES_IS_INFINITE(loc.time)) {
619                         if (USES_IS_PENDING(loc.time))
620                                 ARR_APP1(loc_t, delayed, loc);
621                         else
622                                 ARR_APP1(loc_t, starters, loc);
623                 }
624         }
625
626         pressure            = be_get_loop_pressure(loop_ana, cls, loop);
627         assert(ARR_LEN(delayed) <= pressure);
628         free_slots          = n_regs - ARR_LEN(starters);
629         free_pressure_slots = n_regs - (pressure - ARR_LEN(delayed));
630         free_slots          = MIN(free_slots, free_pressure_slots);
631
632         /* so far we only put nodes into the starters list that are used inside
633          * the loop. If register pressure in the loop is low then we can take some
634          * values and let them live through the loop */
635         DB((dbg, DBG_START, "Loop pressure %d, taking %d delayed vals\n",
636             pressure, free_slots));
637         if (free_slots > 0) {
638                 size_t i;
639
640                 qsort(delayed, ARR_LEN(delayed), sizeof(delayed[0]), loc_compare);
641
642                 for (i = 0; i < ARR_LEN(delayed) && free_slots > 0; ++i) {
643                         int    p, arity;
644                         loc_t *loc = & delayed[i];
645
646                         if (!is_Phi(loc->node)) {
647                                 /* don't use values which are dead in a known predecessors
648                                  * to not induce unnecessary reloads */
649                                 arity = get_irn_arity(block);
650                                 for (p = 0; p < arity; ++p) {
651                                         ir_node      *pred_block = get_Block_cfgpred_block(block, p);
652                                         block_info_t *pred_info  = get_block_info(pred_block);
653
654                                         if (pred_info == NULL)
655                                                 continue;
656
657                                         if (!workset_contains(pred_info->end_workset, loc->node)) {
658                                                 DB((dbg, DBG_START,
659                                                         "    delayed %+F not live at pred %+F\n", loc->node,
660                                                         pred_block));
661                                                 goto skip_delayed;
662                                         }
663                                 }
664                         }
665
666                         DB((dbg, DBG_START, "    delayed %+F taken\n", loc->node));
667                         ARR_APP1(loc_t, starters, *loc);
668                         loc->node = NULL;
669                         --free_slots;
670                 skip_delayed:
671                         ;
672                 }
673         }
674
675         /* spill phis (the actual phis not just their values) that are in this block
676          * but not in the start workset */
677         len = ARR_LEN(delayed);
678         for (i = 0; i < len; ++i) {
679                 ir_node *node = delayed[i].node;
680                 if (node == NULL || !is_Phi(node) || get_nodes_block(node) != block)
681                         continue;
682
683                 DB((dbg, DBG_START, "    spilling delayed phi %+F\n", node));
684                 be_spill_phi(senv, node);
685         }
686         DEL_ARR_F(delayed);
687
688         /* Sort start values by first use */
689         qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
690
691         /* Copy the best ones from starters to start workset */
692         ws_count = MIN((unsigned) ARR_LEN(starters), n_regs);
693         workset_clear(ws);
694         workset_bulk_fill(ws, ws_count, starters);
695
696         /* spill phis (the actual phis not just their values) that are in this block
697          * but not in the start workset */
698         len = ARR_LEN(starters);
699         for (i = ws_count; i < len; ++i) {
700                 ir_node *node = starters[i].node;
701                 if (! is_Phi(node) || get_nodes_block(node) != block)
702                         continue;
703
704                 DB((dbg, DBG_START, "    spilling phi %+F\n", node));
705                 be_spill_phi(senv, node);
706         }
707
708         DEL_ARR_F(starters);
709
710         /* determine spill status of the values: If there's 1 pred block (which
711          * is no backedge) where the value is spilled then we must set it to
712          * spilled here. */
713         for (i = 0; i < ws_count; ++i) {
714                 loc_t   *loc     = &ws->vals[i];
715                 ir_node *value   = loc->node;
716                 bool     spilled;
717                 int      n;
718
719                 /* phis from this block aren't spilled */
720                 if (get_nodes_block(value) == block) {
721                         assert(is_Phi(value));
722                         loc->spilled = false;
723                         continue;
724                 }
725
726                 /* determine if value was spilled on any predecessor */
727                 spilled = false;
728                 for (n = 0; n < arity; ++n) {
729                         workset_t *pred_workset = pred_worksets[n];
730                         int        p_len;
731                         int        p;
732
733                         if (pred_workset == NULL)
734                                 continue;
735
736                         p_len = workset_get_length(pred_workset);
737                         for (p = 0; p < p_len; ++p) {
738                                 loc_t *l = &pred_workset->vals[p];
739
740                                 if (l->node != value)
741                                         continue;
742
743                                 if (l->spilled) {
744                                         spilled = true;
745                                 }
746                                 break;
747                         }
748                 }
749
750                 loc->spilled = spilled;
751         }
752 }
753
754 /**
755  * For the given block @p block, decide for each values
756  * whether it is used from a register or is reloaded
757  * before the use.
758  */
759 static void process_block(ir_node *block)
760 {
761         workset_t    *new_vals;
762         unsigned      iter;
763         block_info_t *block_info;
764         int           arity;
765
766         /* no need to process a block twice */
767         assert(get_block_info(block) == NULL);
768
769         /* construct start workset */
770         arity = get_Block_n_cfgpreds(block);
771         if (arity == 0) {
772                 /* no predecessor -> empty set */
773                 workset_clear(ws);
774         } else if (arity == 1) {
775                 /* one predecessor, copy its end workset */
776                 ir_node      *pred_block = get_Block_cfgpred_block(block, 0);
777                 block_info_t *pred_info  = get_block_info(pred_block);
778
779                 assert(pred_info != NULL);
780                 workset_copy(ws, pred_info->end_workset);
781         } else {
782                 /* multiple predecessors, do more advanced magic :) */
783                 decide_start_workset(block);
784         }
785
786         DB((dbg, DBG_DECIDE, "\n"));
787         DB((dbg, DBG_DECIDE, "Decide for %+F\n", block));
788
789         block_info = new_block_info();
790         set_block_info(block, block_info);
791
792         DB((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
793         {
794                 ir_node *irn;
795                 workset_foreach(ws, irn, iter) {
796                         DB((dbg, DBG_WSETS, "  %+F (%u)\n", irn, workset_get_time(ws, iter)));
797                 }
798         }
799
800         block_info->start_workset = workset_clone(ws);
801
802         /* process the block from start to end */
803         DB((dbg, DBG_WSETS, "Processing...\n"));
804         /* TODO: this leaks (into the obstack)... */
805         new_vals = new_workset();
806
807         sched_foreach(block, irn) {
808                 int i, arity;
809                 assert(workset_get_length(ws) <= n_regs);
810
811                 /* Phis are no real instr (see insert_starters()) */
812                 if (is_Phi(irn)) {
813                         continue;
814                 }
815                 DB((dbg, DBG_DECIDE, "  ...%+F\n", irn));
816
817                 /* set instruction in the workset */
818                 instr = irn;
819
820                 /* allocate all values _used_ by this instruction */
821                 workset_clear(new_vals);
822                 for (i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
823                         ir_node *in = get_irn_n(irn, i);
824                         if (!arch_irn_consider_in_reg_alloc(cls, in))
825                                 continue;
826
827                         /* (note that "spilled" is irrelevant here) */
828                         workset_insert(new_vals, in, false);
829                 }
830                 displace(new_vals, 1);
831
832                 /* allocate all values _defined_ by this instruction */
833                 workset_clear(new_vals);
834                 be_foreach_definition(irn, cls, value,
835                         assert(req_->width == 1);
836                         workset_insert(new_vals, value, false);
837                 );
838                 displace(new_vals, 0);
839         }
840
841         /* Remember end-workset for this block */
842         block_info->end_workset = workset_clone(ws);
843         DB((dbg, DBG_WSETS, "End workset for %+F:\n", block));
844         {
845                 ir_node *irn;
846                 workset_foreach(ws, irn, iter)
847                         DB((dbg, DBG_WSETS, "  %+F (%u)\n", irn, workset_get_time(ws, iter)));
848         }
849 }
850
851 /**
852  * 'decide' is block-local and makes assumptions
853  * about the set of live-ins. Thus we must adapt the
854  * live-outs to the live-ins at each block-border.
855  */
856 static void fix_block_borders(ir_node *block, void *data)
857 {
858         workset_t *start_workset;
859         int        arity;
860         int        i;
861         unsigned   iter;
862         (void) data;
863
864         DB((dbg, DBG_FIX, "\n"));
865         DB((dbg, DBG_FIX, "Fixing %+F\n", block));
866
867         arity = get_irn_arity(block);
868         /* can happen for endless loops */
869         if (arity == 0)
870                 return;
871
872         start_workset = get_block_info(block)->start_workset;
873
874         /* process all pred blocks */
875         for (i = 0; i < arity; ++i) {
876                 ir_node   *pred = get_Block_cfgpred_block(block, i);
877                 workset_t *pred_end_workset = get_block_info(pred)->end_workset;
878                 ir_node   *node;
879
880                 DB((dbg, DBG_FIX, "  Pred %+F\n", pred));
881
882                 /* spill all values not used anymore */
883                 workset_foreach(pred_end_workset, node, iter) {
884                         ir_node *n2;
885                         unsigned iter2;
886                         bool     found = false;
887                         workset_foreach(start_workset, n2, iter2) {
888                                 if (n2 == node) {
889                                         found = true;
890                                         break;
891                                 }
892                                 /* note that we do not look at phi inputs, becuase the values
893                                  * will be either live-end and need no spill or
894                                  * they have other users in which must be somewhere else in the
895                                  * workset */
896                         }
897
898                         if (found)
899                                 continue;
900
901                         if (move_spills && be_is_live_in(lv, block, node)
902                                         && !pred_end_workset->vals[iter].spilled) {
903                                 ir_node *insert_point;
904                                 if (arity > 1) {
905                                         insert_point = be_get_end_of_block_insertion_point(pred);
906                                         insert_point = sched_prev(insert_point);
907                                 } else {
908                                         insert_point = block;
909                                 }
910                                 DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
911                                      insert_point));
912                                 be_add_spill(senv, node, insert_point);
913                         }
914                 }
915
916                 /* reload missing values in predecessors, add missing spills */
917                 workset_foreach(start_workset, node, iter) {
918                         const loc_t *l    = &start_workset->vals[iter];
919                         const loc_t *pred_loc;
920
921                         /* if node is a phi of the current block we reload
922                          * the corresponding argument, else node itself */
923                         if (is_Phi(node) && get_nodes_block(node) == block) {
924                                 node = get_irn_n(node, i);
925                                 assert(!l->spilled);
926
927                                 /* we might have unknowns as argument for the phi */
928                                 if (!arch_irn_consider_in_reg_alloc(cls, node))
929                                         continue;
930                         }
931
932                         /* check if node is in a register at end of pred */
933                         pred_loc = workset_contains(pred_end_workset, node);
934                         if (pred_loc != NULL) {
935                                 /* we might have to spill value on this path */
936                                 if (move_spills && !pred_loc->spilled && l->spilled) {
937                                         ir_node *insert_point
938                                                 = be_get_end_of_block_insertion_point(pred);
939                                         insert_point = sched_prev(insert_point);
940                                         DB((dbg, DBG_SPILL, "Spill %+F after %+F\n", node,
941                                             insert_point));
942                                         be_add_spill(senv, node, insert_point);
943                                 }
944                         } else {
945                                 /* node is not in register at the end of pred -> reload it */
946                                 DB((dbg, DBG_FIX, "    reload %+F\n", node));
947                                 DB((dbg, DBG_SPILL, "Reload %+F before %+F,%d\n", node, block, i));
948                                 be_add_reload_on_edge(senv, node, block, i, cls, 1);
949                         }
950                 }
951         }
952 }
953
954 static void be_spill_belady(ir_graph *irg, const arch_register_class_t *rcls)
955 {
956         int i;
957
958         be_assure_live_sets(irg);
959
960         stat_ev_tim_push();
961         assure_loopinfo(irg);
962         stat_ev_tim_pop("belady_time_backedges");
963
964         stat_ev_tim_push();
965         be_clear_links(irg);
966         stat_ev_tim_pop("belady_time_clear_links");
967
968         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
969
970         /* init belady env */
971         stat_ev_tim_push();
972         obstack_init(&obst);
973         cls       = rcls;
974         lv        = be_get_irg_liveness(irg);
975         n_regs    = be_get_n_allocatable_regs(irg, cls);
976         ws        = new_workset();
977         uses      = be_begin_uses(irg, lv);
978         loop_ana  = be_new_loop_pressure(irg, cls);
979         senv      = be_new_spill_env(irg);
980         blocklist = be_get_cfgpostorder(irg);
981         stat_ev_tim_pop("belady_time_init");
982
983         stat_ev_tim_push();
984         /* walk blocks in reverse postorder */
985         for (i = ARR_LEN(blocklist) - 1; i >= 0; --i) {
986                 process_block(blocklist[i]);
987         }
988         DEL_ARR_F(blocklist);
989         stat_ev_tim_pop("belady_time_belady");
990
991         stat_ev_tim_push();
992         /* belady was block-local, fix the global flow by adding reloads on the
993          * edges */
994         irg_block_walk_graph(irg, fix_block_borders, NULL, NULL);
995         stat_ev_tim_pop("belady_time_fix_borders");
996
997         ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
998
999         /* Insert spill/reload nodes into the graph and fix usages */
1000         be_insert_spills_reloads(senv);
1001
1002         /* clean up */
1003         be_delete_spill_env(senv);
1004         be_end_uses(uses);
1005         be_free_loop_pressure(loop_ana);
1006         obstack_free(&obst, NULL);
1007 }
1008
1009 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady)
1010 void be_init_spillbelady(void)
1011 {
1012         static be_spiller_t belady_spiller = {
1013                 be_spill_belady
1014         };
1015         lc_opt_entry_t *be_grp       = lc_opt_get_grp(firm_opt_get_root(), "be");
1016         lc_opt_entry_t *belady_group = lc_opt_get_grp(be_grp, "belady");
1017         lc_opt_add_table(belady_group, options);
1018
1019         be_register_spiller("belady", &belady_spiller);
1020         FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
1021 }