937adcdc6d5bb5fa622363f352b66d8273c1df93
[libfirm] / ir / be / bespill.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       implementation of the spill/reload placement abstraction layer
23  * @author      Daniel Grund, Sebastian Hack, Matthias Braun
24  * @date                29.09.2005
25  * @version     $Id$
26  */
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include <stdlib.h>
32 #include <stdbool.h>
33
34 #include "pset.h"
35 #include "irnode_t.h"
36 #include "ircons_t.h"
37 #include "iredges_t.h"
38 #include "irbackedge_t.h"
39 #include "irprintf.h"
40 #include "ident_t.h"
41 #include "type_t.h"
42 #include "entity_t.h"
43 #include "debug.h"
44 #include "irgwalk.h"
45 #include "array.h"
46 #include "pdeq.h"
47 #include "execfreq.h"
48 #include "irnodeset.h"
49 #include "error.h"
50
51 #include "bearch_t.h"
52 #include "belive_t.h"
53 #include "besched_t.h"
54 #include "bespill.h"
55 #include "belive_t.h"
56 #include "benode_t.h"
57 #include "bechordal_t.h"
58 #include "bespilloptions.h"
59 #include "bestatevent.h"
60 #include "bessaconstr.h"
61 #include "beirg_t.h"
62 #include "beintlive_t.h"
63 #include "bemodule.h"
64 #include "be_t.h"
65
66 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
67
68 #define REMAT_COST_INFINITE  1000
69
70 typedef struct reloader_t reloader_t;
71 struct reloader_t {
72         reloader_t *next;
73         ir_node    *can_spill_after;
74         ir_node    *reloader;
75         ir_node    *rematted_node;
76         int         remat_cost_delta; /** costs needed for rematerialization,
77                                            compared to placing a reload */
78 };
79
80 typedef struct spill_t spill_t;
81 struct spill_t {
82         spill_t *next;
83         ir_node *after;  /**< spill has to be placed after this node (or earlier) */
84         ir_node *spill;
85 };
86
87 typedef struct spill_info_t spill_info_t;
88 struct spill_info_t {
89         ir_node    *to_spill;  /**< the value that should get spilled */
90         reloader_t *reloaders; /**< list of places where the value should get
91                                     reloaded */
92         spill_t    *spills;    /**< list of latest places where spill must be
93                                     placed */
94         double      spill_costs; /**< costs needed for spilling the value */
95         const arch_register_class_t *reload_cls; /** the register class in which the
96                                                      reload should be placed */
97 };
98
99 struct spill_env_t {
100         const arch_env_t *arch_env;
101         ir_graph         *irg;
102         struct obstack    obst;
103         be_irg_t         *birg;
104         int               spill_cost;     /**< the cost of a single spill node */
105         int               reload_cost;    /**< the cost of a reload node */
106         set              *spills;         /**< all spill_info_t's, which must be
107                                                placed */
108         ir_nodeset_t      mem_phis;       /**< set of all spilled phis. */
109         ir_exec_freq     *exec_freq;
110
111 #ifdef FIRM_STATISTICS
112         unsigned          spill_count;
113         unsigned          reload_count;
114         unsigned          remat_count;
115         unsigned          spilled_phi_count;
116 #endif
117 };
118
119 /**
120  * Compare two spill infos.
121  */
122 static int cmp_spillinfo(const void *x, const void *y, size_t size)
123 {
124         const spill_info_t *xx = x;
125         const spill_info_t *yy = y;
126         (void) size;
127
128         return xx->to_spill != yy->to_spill;
129 }
130
131 /**
132  * Returns spill info for a specific value (the value that is to be spilled)
133  */
134 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
135 {
136         spill_info_t info, *res;
137         int hash = hash_irn(value);
138
139         info.to_spill = value;
140         res = set_find(env->spills, &info, sizeof(info), hash);
141
142         if (res == NULL) {
143                 info.reloaders   = NULL;
144                 info.spills      = NULL;
145                 info.spill_costs = -1;
146                 info.reload_cls  = NULL;
147                 res = set_insert(env->spills, &info, sizeof(info), hash);
148         }
149
150         return res;
151 }
152
153 spill_env_t *be_new_spill_env(be_irg_t *birg)
154 {
155         const arch_env_t *arch_env = birg->main_env->arch_env;
156
157         spill_env_t *env = XMALLOC(spill_env_t);
158         env->spills                     = new_set(cmp_spillinfo, 1024);
159         env->irg            = be_get_birg_irg(birg);
160         env->birg           = birg;
161         env->arch_env       = arch_env;
162         ir_nodeset_init(&env->mem_phis);
163         env->spill_cost     = arch_env->spill_cost;
164         env->reload_cost    = arch_env->reload_cost;
165         env->exec_freq      = be_get_birg_exec_freq(birg);
166         obstack_init(&env->obst);
167
168 #ifdef FIRM_STATISTICS
169         env->spill_count       = 0;
170         env->reload_count      = 0;
171         env->remat_count       = 0;
172         env->spilled_phi_count = 0;
173 #endif
174
175         return env;
176 }
177
178 void be_delete_spill_env(spill_env_t *env)
179 {
180         del_set(env->spills);
181         ir_nodeset_destroy(&env->mem_phis);
182         obstack_free(&env->obst, NULL);
183         free(env);
184 }
185
186 /*
187  *  ____  _                  ____      _                 _
188  * |  _ \| | __ _  ___ ___  |  _ \ ___| | ___   __ _  __| |___
189  * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
190  * |  __/| | (_| | (_|  __/ |  _ <  __/ | (_) | (_| | (_| \__ \
191  * |_|   |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
192  *
193  */
194
195 void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after)
196 {
197         spill_info_t *spill_info = get_spillinfo(env, to_spill);
198         spill_t      *spill;
199         spill_t      *s;
200         spill_t      *last;
201
202         assert(!arch_irn_is(to_spill, dont_spill));
203         DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));
204
205         /* Just for safety make sure that we do not insert the spill in front of a phi */
206         assert(!is_Phi(sched_next(after)));
207
208         /* spills that are dominated by others are not needed */
209         last = NULL;
210         s    = spill_info->spills;
211         for( ; s != NULL; s = s->next) {
212                 /* no need to add this spill if it is dominated by another */
213                 if(value_dominates(s->after, after)) {
214                         DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after));
215                         return;
216                 }
217                 /* remove spills that we dominate */
218                 if(value_dominates(after, s->after)) {
219                         DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after));
220                         if(last != NULL) {
221                                 last->next         = s->next;
222                         } else {
223                                 spill_info->spills = s->next;
224                         }
225                 } else {
226                         last = s;
227                 }
228         }
229
230         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
231         spill->after  = after;
232         spill->next   = spill_info->spills;
233         spill->spill  = NULL;
234
235         spill_info->spills = spill;
236 }
237
238 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before,
239                   ir_node *rematted_node)
240 {
241         spill_info_t *spill_info;
242         reloader_t *reloader;
243
244         spill_info = get_spillinfo(env, to_spill);
245
246         /* add the remat information */
247         reloader                   = obstack_alloc(&env->obst, sizeof(reloader[0]));
248         reloader->next             = spill_info->reloaders;
249         reloader->reloader         = before;
250         reloader->rematted_node    = rematted_node;
251         reloader->remat_cost_delta = 0; /* We will never have a cost win over a
252                                            reload since we're not even allowed to
253                                            create a reload */
254
255         spill_info->reloaders  = reloader;
256
257         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
258                 to_spill, before));
259 }
260
261 void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before,
262                 ir_node *can_spill_after, const arch_register_class_t *reload_cls,
263                 int allow_remat)
264 {
265         spill_info_t *info;
266         reloader_t *rel;
267
268         assert(!arch_irn_is(to_spill, dont_spill));
269
270         info = get_spillinfo(env, to_spill);
271
272         if (is_Phi(to_spill)) {
273                 int i, arity;
274
275                 /* create spillinfos for the phi arguments */
276                 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
277                         ir_node *arg = get_irn_n(to_spill, i);
278                         get_spillinfo(env, arg);
279                 }
280         }
281
282         assert(!is_Proj(before) && !be_is_Keep(before));
283
284         /* put reload into list */
285         rel                   = obstack_alloc(&env->obst, sizeof(rel[0]));
286         rel->next             = info->reloaders;
287         rel->reloader         = before;
288         rel->rematted_node    = NULL;
289         rel->can_spill_after  = can_spill_after;
290         rel->remat_cost_delta = allow_remat ? 0 : REMAT_COST_INFINITE;
291
292         info->reloaders  = rel;
293         assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
294         info->reload_cls = reload_cls;
295
296         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
297                 to_spill, before, allow_remat ? "" : " not"));
298 }
299
300 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
301                    const arch_register_class_t *reload_cls, int allow_remat)
302 {
303         be_add_reload2(senv, to_spill, before, to_spill, reload_cls, allow_remat);
304
305 }
306
307 ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
308 {
309         ir_node *last = sched_last(block);
310
311         /* we might have keeps behind the jump... */
312         while (be_is_Keep(last)) {
313                 last = sched_prev(last);
314                 assert(!sched_is_end(last));
315         }
316
317         assert(is_cfop(last));
318
319         /* add the reload before the (cond-)jump */
320         return last;
321 }
322
323 static ir_node *skip_keeps_phis(ir_node *node)
324 {
325         while(true) {
326                 ir_node *next = sched_next(node);
327                 if(!is_Phi(next) && !be_is_Keep(next))
328                         break;
329                 node = next;
330         }
331         return node;
332 }
333
334 /**
335  * Returns the point at which you can insert a node that should be executed
336  * before block @p block when coming from pred @p pos.
337  */
338 static ir_node *get_block_insertion_point(ir_node *block, int pos)
339 {
340         ir_node *predblock;
341
342         /* simply add the reload to the beginning of the block if we only have 1
343          * predecessor. We don't need to check for phis as there can't be any in a
344          * block with only 1 pred. */
345         if(get_Block_n_cfgpreds(block) == 1) {
346                 assert(!is_Phi(sched_first(block)));
347                 return sched_first(block);
348         }
349
350         /* We have to reload the value in pred-block */
351         predblock = get_Block_cfgpred_block(block, pos);
352         return be_get_end_of_block_insertion_point(predblock);
353 }
354
355 void be_add_reload_at_end(spill_env_t *env, ir_node *to_spill,
356                           const ir_node *block,
357                           const arch_register_class_t *reload_cls,
358                           int allow_remat)
359 {
360         ir_node *before = be_get_end_of_block_insertion_point(block);
361         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
362 }
363
364 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
365                            int pos,     const arch_register_class_t *reload_cls,
366                            int allow_remat)
367 {
368         ir_node *before = get_block_insertion_point(block, pos);
369         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
370 }
371
372 void be_spill_phi(spill_env_t *env, ir_node *node)
373 {
374         ir_node *block;
375         spill_info_t* spill;
376         int i, arity;
377
378         assert(is_Phi(node));
379
380         ir_nodeset_insert(&env->mem_phis, node);
381
382         /* create spills for the phi arguments */
383         block = get_nodes_block(node);
384         spill = get_spillinfo(env, node);
385         for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
386                 ir_node *arg = get_irn_n(node, i);
387                 ir_node *insert;
388                 //get_spillinfo(env, arg);
389
390                 /* some backends have virtual noreg/unknown nodes that are not scheduled
391                  * and simply always available. */
392                 if(!sched_is_scheduled(arg)) {
393                         ir_node *pred_block = get_Block_cfgpred_block(block, i);
394                         insert = be_get_end_of_block_insertion_point(pred_block);
395                         insert = sched_prev(insert);
396                 } else {
397                         insert = skip_keeps_phis(arg);
398                 }
399
400                 be_add_spill(env, arg, insert);
401         }
402 }
403
404 /*
405  *   ____                _         ____        _ _ _
406  *  / ___|_ __ ___  __ _| |_ ___  / ___| _ __ (_) | |___
407  * | |   | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
408  * | |___| | |  __/ (_| | ||  __/  ___) | |_) | | | \__ \
409  *  \____|_|  \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
410  *                                      |_|
411  */
412
413 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
414
415 /**
416  * Creates a spill.
417  *
418  * @param senv      the spill environment
419  * @param irn       the node that should be spilled
420  * @param ctx_irn   an user of the spilled node
421  *
422  * @return a be_Spill node
423  */
424 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
425 {
426         ir_node *to_spill = spillinfo->to_spill;
427         spill_t *spill;
428
429         /* determine_spill_costs must have been run before */
430         assert(spillinfo->spill_costs >= 0);
431
432         /* some backends have virtual noreg/unknown nodes that are not scheduled
433          * and simply always available. */
434         if(!sched_is_scheduled(to_spill)) {
435                 /* override spillinfos or create a new one */
436                 spillinfo->spills->spill = new_NoMem();
437                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
438                 return;
439         }
440
441         DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
442         spill = spillinfo->spills;
443         for( ; spill != NULL; spill = spill->next) {
444                 ir_node *after = spill->after;
445                 ir_node *block = get_block(after);
446
447                 after = skip_keeps_phis(after);
448
449                 spill->spill   = be_spill(env->arch_env, block, to_spill);
450                 sched_add_after(after, spill->spill);
451                 DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
452 #ifdef FIRM_STATISTICS
453                 env->spill_count++;
454 #endif
455         }
456         DBG((dbg, LEVEL_1, "\n"));
457 }
458
459 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
460
461 /**
462  * If the first usage of a Phi result would be out of memory
463  * there is no sense in allocating a register for it.
464  * Thus we spill it and all its operands to the same spill slot.
465  * Therefore the phi/dataB becomes a phi/Memory
466  *
467  * @param senv      the spill environment
468  * @param phi       the Phi node that should be spilled
469  * @param ctx_irn   an user of the spilled node
470  */
471 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
472 {
473         ir_graph *irg   = env->irg;
474         ir_node  *phi   = spillinfo->to_spill;
475         ir_node  *block = get_nodes_block(phi);
476         ir_node  *unknown;
477         ir_node **ins;
478         spill_t  *spill;
479         int       i;
480         int       arity;
481
482         assert(is_Phi(phi));
483         assert(!get_opt_cse());
484         DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
485
486         /* build a new PhiM */
487         arity   = get_irn_arity(phi);
488         ins     = alloca(sizeof(ir_node*) * arity);
489         unknown = new_r_Unknown(irg, mode_M);
490         for(i = 0; i < arity; ++i) {
491                 ins[i] = unknown;
492         }
493
494         /* override or replace spills list... */
495         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
496         spill->after  = skip_keeps_phis(phi);
497         spill->spill  = new_r_Phi(irg, block, arity, ins, mode_M);
498         spill->next   = NULL;
499
500         spillinfo->spills = spill;
501 #ifdef FIRM_STATISTICS
502         env->spilled_phi_count++;
503 #endif
504
505         for(i = 0; i < arity; ++i) {
506                 ir_node      *arg      = get_irn_n(phi, i);
507                 spill_info_t *arg_info = get_spillinfo(env, arg);
508
509                 determine_spill_costs(env, arg_info);
510                 spill_node(env, arg_info);
511
512                 set_irn_n(spill->spill, i, arg_info->spills->spill);
513         }
514         DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi,
515              spill->spill));
516 }
517
518 /**
519  * Spill a node.
520  *
521  * @param senv      the spill environment
522  * @param to_spill  the node that should be spilled
523  */
524 static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
525 {
526         ir_node *to_spill;
527
528         /* node is already spilled */
529         if(spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
530                 return;
531
532         to_spill = spillinfo->to_spill;
533
534         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
535                 spill_phi(env, spillinfo);
536         } else {
537                 spill_irn(env, spillinfo);
538         }
539 }
540
541 /*
542  *
543  *  ____                      _            _       _ _
544  * |  _ \ ___ _ __ ___   __ _| |_ ___ _ __(_) __ _| (_)_______
545  * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_  / _ \
546  * |  _ <  __/ | | | | | (_| | ||  __/ |  | | (_| | | |/ /  __/
547  * |_| \_\___|_| |_| |_|\__,_|\__\___|_|  |_|\__,_|_|_/___\___|
548  *
549  */
550
551 /**
552  * Tests whether value @p arg is available before node @p reloader
553  * @returns 1 if value is available, 0 otherwise
554  */
555 static int is_value_available(spill_env_t *env, const ir_node *arg,
556                               const ir_node *reloader)
557 {
558         if(is_Unknown(arg) || arg == new_NoMem())
559                 return 1;
560
561         if(be_is_Spill(arg))
562                 return 1;
563
564         if(arg == get_irg_frame(env->irg))
565                 return 1;
566
567         /* hack for now (happens when command should be inserted at end of block) */
568         if(is_Block(reloader)) {
569                 return 0;
570         }
571
572         /*
573          * Ignore registers are always available
574          */
575         if (arch_irn_is(arg, ignore)) {
576                 return 1;
577         }
578
579         /* the following test does not work while spilling,
580          * because the liveness info is not adapted yet to the effects of the
581          * additional spills/reloads.
582          */
583 #if 0
584         /* we want to remat before the insn reloader
585          * thus an arguments is alive if
586          *   - it interferes with the reloaders result
587          *   - or it is (last-) used by reloader itself
588          */
589         if (values_interfere(env->birg->lv, reloader, arg)) {
590                 return 1;
591         }
592
593         arity = get_irn_arity(reloader);
594         for (i = 0; i < arity; ++i) {
595                 ir_node *rel_arg = get_irn_n(reloader, i);
596                 if (rel_arg == arg)
597                         return 1;
598         }
599 #endif
600
601         return 0;
602 }
603
604 /**
605  * Checks whether the node can principally be rematerialized
606  */
607 static int is_remat_node(const ir_node *node)
608 {
609         assert(!be_is_Spill(node));
610
611         if (arch_irn_is(node, rematerializable))
612                 return 1;
613
614         return 0;
615 }
616
617 /**
618  * Check if a node is rematerializable. This tests for the following conditions:
619  *
620  * - The node itself is rematerializable
621  * - All arguments of the node are available or also rematerialisable
622  * - The costs for the rematerialisation operation is less or equal a limit
623  *
624  * Returns the costs needed for rematerialisation or something
625  * >= REMAT_COST_INFINITE if remat is not possible.
626  */
627 static int check_remat_conditions_costs(spill_env_t *env,
628                 const ir_node *spilled, const ir_node *reloader, int parentcosts)
629 {
630         int i, arity;
631         int argremats;
632         int costs = 0;
633
634         if (!is_remat_node(spilled))
635                 return REMAT_COST_INFINITE;
636
637         if(be_is_Reload(spilled)) {
638                 costs += 2;
639         } else {
640                 costs += arch_get_op_estimated_cost(spilled);
641         }
642         if(parentcosts + costs >= env->reload_cost + env->spill_cost) {
643                 return REMAT_COST_INFINITE;
644         }
645         if (arch_irn_is(spilled, modify_flags)) {
646                 return REMAT_COST_INFINITE;
647         }
648
649         argremats = 0;
650         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
651                 ir_node *arg = get_irn_n(spilled, i);
652
653                 if(is_value_available(env, arg, reloader))
654                         continue;
655
656                 /* we have to rematerialize the argument as well */
657                 if(argremats >= 1) {
658                         /* we only support rematerializing 1 argument at the moment,
659                          * so that we don't have to care about register pressure
660                          */
661                         return REMAT_COST_INFINITE;
662                 }
663                 argremats++;
664
665                 costs += check_remat_conditions_costs(env, arg, reloader,
666                                                       parentcosts + costs);
667                 if(parentcosts + costs >= env->reload_cost + env->spill_cost)
668                         return REMAT_COST_INFINITE;
669         }
670
671         return costs;
672 }
673
674 /**
675  * Re-materialize a node.
676  *
677  * @param senv      the spill environment
678  * @param spilled   the node that was spilled
679  * @param reloader  a irn that requires a reload
680  */
681 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
682 {
683         int i, arity;
684         ir_node *res;
685         ir_node *bl;
686         ir_node **ins;
687
688         if(is_Block(reloader)) {
689                 bl = reloader;
690         } else {
691                 bl = get_nodes_block(reloader);
692         }
693
694         ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
695         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
696                 ir_node *arg = get_irn_n(spilled, i);
697
698                 if(is_value_available(env, arg, reloader)) {
699                         ins[i] = arg;
700                 } else {
701                         ins[i] = do_remat(env, arg, reloader);
702 #ifdef FIRM_STATISTICS
703                         /* don't count the recursive call as remat */
704                         env->remat_count--;
705 #endif
706                 }
707         }
708
709         /* create a copy of the node */
710         res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
711                           get_irn_op(spilled), get_irn_mode(spilled),
712                           get_irn_arity(spilled), ins);
713         copy_node_attr(spilled, res);
714         arch_env_mark_remat(env->arch_env, res);
715         new_backedge_info(res);
716
717         DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
718
719         if (! is_Proj(res)) {
720                 /* insert in schedule */
721                 sched_reset(res);
722                 sched_add_before(reloader, res);
723 #ifdef FIRM_STATISTICS
724                 env->remat_count++;
725 #endif
726         }
727
728         return res;
729 }
730
731 double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
732 {
733         ir_node *block = get_nodes_block(before);
734         double   freq  = get_block_execfreq(env->exec_freq, block);
735         (void) to_spill;
736
737         return env->spill_cost * freq;
738 }
739
740 unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
741                                        const ir_node *before)
742 {
743         if(be_do_remats) {
744                 /* is the node rematerializable? */
745                 unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
746                 if(costs < (unsigned) env->reload_cost)
747                         return costs;
748         }
749
750         return env->reload_cost;
751 }
752
753 double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
754 {
755         ir_node      *block = get_nodes_block(before);
756         double        freq  = get_block_execfreq(env->exec_freq, block);
757
758         if(be_do_remats) {
759                 /* is the node rematerializable? */
760                 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
761                 if(costs < env->reload_cost)
762                         return costs * freq;
763         }
764
765         return env->reload_cost * freq;
766 }
767
768 int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat,
769                            const ir_node *before)
770 {
771         return check_remat_conditions_costs(env, to_remat, before, 0) < REMAT_COST_INFINITE;
772 }
773
774 double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
775                                    ir_node *block, int pos)
776 {
777         ir_node *before = get_block_insertion_point(block, pos);
778         return be_get_reload_costs(env, to_spill, before);
779 }
780
781 /*
782  *  ___                     _     ____      _                 _
783  * |_ _|_ __  ___  ___ _ __| |_  |  _ \ ___| | ___   __ _  __| |___
784  *  | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
785  *  | || | | \__ \  __/ |  | |_  |  _ <  __/ | (_) | (_| | (_| \__ \
786  * |___|_| |_|___/\___|_|   \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
787  *
788  */
789
790 /**
791  * analyzes how to best spill a node and determine costs for that
792  */
793 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
794 {
795         ir_node *to_spill = spillinfo->to_spill;
796         ir_node *spill_block;
797         spill_t *spill;
798         double   spill_execfreq;
799
800         /* already calculated? */
801         if(spillinfo->spill_costs >= 0)
802                 return;
803
804         assert(!arch_irn_is(to_spill, dont_spill));
805         assert(!be_is_Reload(to_spill));
806
807         /* some backends have virtual noreg/unknown nodes that are not scheduled
808          * and simply always available.
809          * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi
810          * predecessor (of a PhiM) but this test might match other things too...
811          */
812         if(!sched_is_scheduled(to_spill)) {
813                 /* override spillinfos or create a new one */
814                 spill_t *spill = obstack_alloc(&env->obst, sizeof(spill[0]));
815                 spill->after = NULL;
816                 spill->next  = NULL;
817                 spill->spill = new_NoMem();
818
819                 spillinfo->spills      = spill;
820                 spillinfo->spill_costs = 0;
821
822                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
823                 return;
824         }
825
826         spill_block    = get_nodes_block(to_spill);
827         spill_execfreq = get_block_execfreq(env->exec_freq, spill_block);
828
829         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
830                 /* TODO calculate correct costs...
831                  * (though we can't remat this node anyway so no big problem) */
832                 spillinfo->spill_costs = env->spill_cost * spill_execfreq;
833                 return;
834         }
835
836         if(spillinfo->spills != NULL) {
837                 spill_t *s;
838                 double   spills_execfreq;
839
840                 /* calculate sum of executaion frequencies of individual spills */
841                 spills_execfreq = 0;
842                 s               = spillinfo->spills;
843                 for( ; s != NULL; s = s->next) {
844                         ir_node *spill_block = get_block(s->after);
845                         double   freq = get_block_execfreq(env->exec_freq, spill_block);
846
847                         spills_execfreq += freq;
848                 }
849
850                 DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
851                     spills_execfreq * env->spill_cost,
852                     spill_execfreq * env->spill_cost));
853
854                 /* multi-/latespill is advantageous -> return*/
855                 if(spills_execfreq < spill_execfreq) {
856                         DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
857                         spillinfo->spill_costs = spills_execfreq * env->spill_cost;
858                         return;
859                 }
860         }
861
862         /* override spillinfos or create a new one */
863         spill        = obstack_alloc(&env->obst, sizeof(spill[0]));
864         spill->after = skip_keeps_phis(to_spill);
865         spill->next  = NULL;
866         spill->spill = NULL;
867
868         spillinfo->spills      = spill;
869         spillinfo->spill_costs = spill_execfreq * env->spill_cost;
870         DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
871 }
872
873 void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn)
874 {
875         const spill_info_t *si = get_spillinfo(env, irn);
876         ir_node *start_block   = get_irg_start_block(get_irn_irg(irn));
877         int n_blocks           = get_Block_dom_max_subtree_pre_num(start_block);
878         bitset_t *reloads      = bitset_alloca(n_blocks);
879         reloader_t *r;
880         spill_t *s;
881
882         if (si == NULL)
883                 return;
884
885         /* Fill the bitset with the dominance pre-order numbers
886          * of the blocks the reloads are located in. */
887         for (r = si->reloaders; r != NULL; r = r->next) {
888                 ir_node *bl = get_nodes_block(r->reloader);
889                 bitset_set(reloads, get_Block_dom_tree_pre_num(bl));
890         }
891
892         /* Now, cancel out all the blocks that are dominated by each spill.
893          * If the bitset is not empty after that, we have reloads that are
894          * not dominated by any spill. */
895         for (s = si->spills; s != NULL; s = s->next) {
896                 ir_node *bl = get_nodes_block(s->after);
897                 int start   = get_Block_dom_tree_pre_num(bl);
898                 int end     = get_Block_dom_max_subtree_pre_num(bl);
899
900                 bitset_clear_range(reloads, start, end);
901         }
902
903         if (!bitset_is_empty(reloads))
904                 be_add_spill(env, si->to_spill, si->to_spill);
905 }
906
907 void be_insert_spills_reloads(spill_env_t *env)
908 {
909         const arch_env_t      *arch_env  = env->arch_env;
910         const ir_exec_freq    *exec_freq = env->exec_freq;
911         spill_info_t          *si;
912         ir_nodeset_iterator_t  iter;
913         ir_node               *node;
914
915         BE_TIMER_PUSH(t_ra_spill_apply);
916
917         /* create all phi-ms first, this is needed so, that phis, hanging on
918            spilled phis work correctly */
919         foreach_ir_nodeset(&env->mem_phis, node, iter) {
920                 spill_info_t *info = get_spillinfo(env, node);
921                 spill_node(env, info);
922         }
923
924         /* process each spilled node */
925         for (si = set_first(env->spills); si; si = set_next(env->spills)) {
926                 reloader_t *rld;
927                 ir_node  *to_spill        = si->to_spill;
928                 ir_mode  *mode            = get_irn_mode(to_spill);
929                 ir_node **copies          = NEW_ARR_F(ir_node*, 0);
930                 double    all_remat_costs = 0; /** costs when we would remat all nodes */
931                 int       force_remat     = 0;
932
933                 DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
934
935                 determine_spill_costs(env, si);
936
937                 /* determine possibility of rematerialisations */
938                 if(be_do_remats) {
939                         /* calculate cost savings for each indivial value when it would
940                            be rematted instead of reloaded */
941                         for (rld = si->reloaders; rld != NULL; rld = rld->next) {
942                                 double   freq;
943                                 int      remat_cost;
944                                 int      remat_cost_delta;
945                                 ir_node *block;
946                                 ir_node *reloader = rld->reloader;
947
948                                 if(rld->rematted_node != NULL) {
949                                         DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
950                                              rld->rematted_node, reloader));
951                                         continue;
952                                 }
953                                 if(rld->remat_cost_delta >= REMAT_COST_INFINITE) {
954                                         DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
955                                              reloader));
956                                         all_remat_costs = REMAT_COST_INFINITE;
957                                         continue;
958                                 }
959
960                                 remat_cost  = check_remat_conditions_costs(env, to_spill,
961                                                                            reloader, 0);
962                                 if(remat_cost >= REMAT_COST_INFINITE) {
963                                         DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
964                                              reloader));
965                                         rld->remat_cost_delta = REMAT_COST_INFINITE;
966                                         all_remat_costs       = REMAT_COST_INFINITE;
967                                         continue;
968                                 }
969
970                                 remat_cost_delta      = remat_cost - env->reload_cost;
971                                 rld->remat_cost_delta = remat_cost_delta;
972                                 block                 = is_Block(reloader) ? reloader : get_nodes_block(reloader);
973                                 freq                  = get_block_execfreq(exec_freq, block);
974                                 all_remat_costs      += remat_cost_delta * freq;
975                                 DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
976                                      "%d (rel %f)\n", reloader, remat_cost_delta,
977                                      remat_cost_delta * freq));
978                         }
979                         if(all_remat_costs < REMAT_COST_INFINITE) {
980                                 /* we don't need the costs for the spill if we can remat
981                                    all reloaders */
982                                 all_remat_costs -= si->spill_costs;
983
984                                 DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
985                                      env->spill_cost, si->spill_costs));
986                         }
987
988                         if(all_remat_costs < 0) {
989                                 DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
990                                      all_remat_costs));
991                                 force_remat = 1;
992                         }
993                 }
994
995                 /* go through all reloads for this spill */
996                 for (rld = si->reloaders; rld != NULL; rld = rld->next) {
997                         ir_node *copy; /* a reload is a "copy" of the original value */
998
999                         if (rld->rematted_node != NULL) {
1000                                 copy = rld->rematted_node;
1001                                 sched_add_before(rld->reloader, copy);
1002                         } else if (be_do_remats &&
1003                                         (force_remat || rld->remat_cost_delta < 0)) {
1004                                 copy = do_remat(env, to_spill, rld->reloader);
1005                         } else {
1006                                 /* make sure we have a spill */
1007                                 spill_node(env, si);
1008
1009                                 /* create a reload, use the first spill for now SSA
1010                                  * reconstruction for memory comes below */
1011                                 assert(si->spills != NULL);
1012                                 copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode,
1013                                                  si->spills->spill);
1014 #ifdef FIRM_STATISTICS
1015                                 env->reload_count++;
1016 #endif
1017                         }
1018
1019                         DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
1020                              copy, to_spill, rld->reloader));
1021                         ARR_APP1(ir_node*, copies, copy);
1022                 }
1023
1024                 /* if we had any reloads or remats, then we need to reconstruct the
1025                  * SSA form for the spilled value */
1026                 if (ARR_LEN(copies) > 0) {
1027                         be_ssa_construction_env_t senv;
1028                         /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
1029
1030                         be_ssa_construction_init(&senv, env->birg);
1031                         be_ssa_construction_add_copy(&senv, to_spill);
1032                         be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
1033                         be_ssa_construction_fix_users(&senv, to_spill);
1034
1035 #if 0
1036                         /* no need to enable this as long as we invalidate liveness
1037                            after this function... */
1038                         be_ssa_construction_update_liveness_phis(&senv);
1039                         be_liveness_update(to_spill);
1040                         len = ARR_LEN(copies);
1041                         for(i = 0; i < len; ++i) {
1042                                 be_liveness_update(lv, copies[i]);
1043                         }
1044 #endif
1045                         be_ssa_construction_destroy(&senv);
1046                 }
1047                 /* need to reconstruct SSA form if we had multiple spills */
1048                 if (si->spills != NULL && si->spills->next != NULL) {
1049                         spill_t *spill;
1050                         int      spill_count = 0;
1051
1052                         be_ssa_construction_env_t senv;
1053
1054                         be_ssa_construction_init(&senv, env->birg);
1055                         spill = si->spills;
1056                         for( ; spill != NULL; spill = spill->next) {
1057                                 /* maybe we rematerialized the value and need no spill */
1058                                 if(spill->spill == NULL)
1059                                         continue;
1060                                 be_ssa_construction_add_copy(&senv, spill->spill);
1061                                 spill_count++;
1062                         }
1063                         if(spill_count > 1) {
1064                                 /* all reloads are attached to the first spill, fix them now */
1065                                 be_ssa_construction_fix_users(&senv, si->spills->spill);
1066                         }
1067
1068                         be_ssa_construction_destroy(&senv);
1069                 }
1070
1071                 DEL_ARR_F(copies);
1072                 si->reloaders = NULL;
1073         }
1074
1075         stat_ev_dbl("spill_spills", env->spill_count);
1076         stat_ev_dbl("spill_reloads", env->reload_count);
1077         stat_ev_dbl("spill_remats", env->remat_count);
1078         stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);
1079
1080         /* Matze: In theory be_ssa_construction should take care of the liveness...
1081          * try to disable this again in the future */
1082         be_liveness_invalidate(env->birg->lv);
1083
1084         be_remove_dead_nodes_from_schedule(env->birg);
1085
1086         BE_TIMER_POP(t_ra_spill_apply);
1087 }
1088
1089 void be_init_spill(void)
1090 {
1091         FIRM_DBG_REGISTER(dbg, "firm.be.spill");
1092 }
1093
1094 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill);