- fixed comment: bs cannot be NULL anymore (and was never NULL previously)
[libfirm] / ir / be / bespill.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       implementation of the spill/reload placement abstraction layer
23  * @author      Daniel Grund, Sebastian Hack, Matthias Braun
24  * @date                29.09.2005
25  * @version     $Id$
26  */
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include <stdlib.h>
32 #include <stdbool.h>
33
34 #include "pset.h"
35 #include "irnode_t.h"
36 #include "ircons_t.h"
37 #include "iredges_t.h"
38 #include "irbackedge_t.h"
39 #include "irprintf.h"
40 #include "ident_t.h"
41 #include "type_t.h"
42 #include "entity_t.h"
43 #include "debug.h"
44 #include "irgwalk.h"
45 #include "array.h"
46 #include "pdeq.h"
47 #include "execfreq.h"
48 #include "irnodeset.h"
49 #include "error.h"
50
51 #include "bearch_t.h"
52 #include "belive_t.h"
53 #include "besched_t.h"
54 #include "bespill.h"
55 #include "belive_t.h"
56 #include "benode_t.h"
57 #include "bechordal_t.h"
58 #include "bejavacoal.h"
59 #include "bespilloptions.h"
60 #include "bestatevent.h"
61 #include "bessaconstr.h"
62 #include "beirg_t.h"
63 #include "beintlive_t.h"
64 #include "bemodule.h"
65 #include "be_t.h"
66
67 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
68
69 #define REMAT_COST_INFINITE  1000
70
71 typedef struct reloader_t reloader_t;
72 struct reloader_t {
73         reloader_t *next;
74         ir_node    *can_spill_after;
75         ir_node    *reloader;
76         ir_node    *rematted_node;
77         int         remat_cost_delta; /** costs needed for rematerialization,
78                                            compared to placing a reload */
79 };
80
81 typedef struct spill_t spill_t;
82 struct spill_t {
83         spill_t *next;
84         ir_node *after;  /**< spill has to be placed after this node (or earlier) */
85         ir_node *spill;
86 };
87
88 typedef struct spill_info_t spill_info_t;
89 struct spill_info_t {
90         ir_node    *to_spill;  /**< the value that should get spilled */
91         reloader_t *reloaders; /**< list of places where the value should get
92                                     reloaded */
93         spill_t    *spills;    /**< list of latest places where spill must be
94                                     placed */
95         double      spill_costs; /**< costs needed for spilling the value */
96         const arch_register_class_t *reload_cls; /** the register class in which the
97                                                      reload should be placed */
98 };
99
100 struct spill_env_t {
101         const arch_env_t *arch_env;
102         ir_graph         *irg;
103         struct obstack    obst;
104         be_irg_t         *birg;
105         int               spill_cost;     /**< the cost of a single spill node */
106         int               reload_cost;    /**< the cost of a reload node */
107         set              *spills;         /**< all spill_info_t's, which must be
108                                                placed */
109         ir_nodeset_t      mem_phis;       /**< set of all spilled phis. */
110         ir_exec_freq     *exec_freq;
111
112 #ifdef FIRM_STATISTICS
113         unsigned          spill_count;
114         unsigned          reload_count;
115         unsigned          remat_count;
116         unsigned          spilled_phi_count;
117 #endif
118 };
119
120 /**
121  * Compare two spill infos.
122  */
123 static int cmp_spillinfo(const void *x, const void *y, size_t size)
124 {
125         const spill_info_t *xx = x;
126         const spill_info_t *yy = y;
127         (void) size;
128
129         return xx->to_spill != yy->to_spill;
130 }
131
132 /**
133  * Returns spill info for a specific value (the value that is to be spilled)
134  */
135 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
136 {
137         spill_info_t info, *res;
138         int hash = hash_irn(value);
139
140         info.to_spill = value;
141         res = set_find(env->spills, &info, sizeof(info), hash);
142
143         if (res == NULL) {
144                 info.reloaders   = NULL;
145                 info.spills      = NULL;
146                 info.spill_costs = -1;
147                 info.reload_cls  = NULL;
148                 res = set_insert(env->spills, &info, sizeof(info), hash);
149         }
150
151         return res;
152 }
153
154 spill_env_t *be_new_spill_env(be_irg_t *birg)
155 {
156         const arch_env_t *arch_env = birg->main_env->arch_env;
157
158         spill_env_t *env        = xmalloc(sizeof(env[0]));
159         env->spills                     = new_set(cmp_spillinfo, 1024);
160         env->irg            = be_get_birg_irg(birg);
161         env->birg           = birg;
162         env->arch_env       = arch_env;
163         ir_nodeset_init(&env->mem_phis);
164         env->spill_cost     = arch_env->spill_cost;
165         env->reload_cost    = arch_env->reload_cost;
166         env->exec_freq      = be_get_birg_exec_freq(birg);
167         obstack_init(&env->obst);
168
169 #ifdef FIRM_STATISTICS
170         env->spill_count       = 0;
171         env->reload_count      = 0;
172         env->remat_count       = 0;
173         env->spilled_phi_count = 0;
174 #endif
175
176         return env;
177 }
178
179 void be_delete_spill_env(spill_env_t *env)
180 {
181         del_set(env->spills);
182         ir_nodeset_destroy(&env->mem_phis);
183         obstack_free(&env->obst, NULL);
184         free(env);
185 }
186
187 /*
188  *  ____  _                  ____      _                 _
189  * |  _ \| | __ _  ___ ___  |  _ \ ___| | ___   __ _  __| |___
190  * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
191  * |  __/| | (_| | (_|  __/ |  _ <  __/ | (_) | (_| | (_| \__ \
192  * |_|   |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
193  *
194  */
195
196 void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after)
197 {
198         spill_info_t *spill_info = get_spillinfo(env, to_spill);
199         spill_t      *spill;
200         spill_t      *s;
201         spill_t      *last;
202
203         assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
204         DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));
205
206         /* Just for safety make sure that we do not insert the spill in front of a phi */
207         assert(!is_Phi(sched_next(after)));
208
209         /* spills that are dominated by others are not needed */
210         last = NULL;
211         s    = spill_info->spills;
212         for( ; s != NULL; s = s->next) {
213                 /* no need to add this spill if it is dominated by another */
214                 if(value_dominates(s->after, after)) {
215                         DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after));
216                         return;
217                 }
218                 /* remove spills that we dominate */
219                 if(value_dominates(after, s->after)) {
220                         DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after));
221                         if(last != NULL) {
222                                 last->next         = s->next;
223                         } else {
224                                 spill_info->spills = s->next;
225                         }
226                 } else {
227                         last = s;
228                 }
229         }
230
231         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
232         spill->after  = after;
233         spill->next   = spill_info->spills;
234         spill->spill  = NULL;
235
236         spill_info->spills = spill;
237 }
238
239 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before,
240                   ir_node *rematted_node)
241 {
242         spill_info_t *spill_info;
243         reloader_t *reloader;
244
245         spill_info = get_spillinfo(env, to_spill);
246
247         /* add the remat information */
248         reloader                   = obstack_alloc(&env->obst, sizeof(reloader[0]));
249         reloader->next             = spill_info->reloaders;
250         reloader->reloader         = before;
251         reloader->rematted_node    = rematted_node;
252         reloader->remat_cost_delta = 0; /* We will never have a cost win over a
253                                            reload since we're not even allowed to
254                                            create a reload */
255
256         spill_info->reloaders  = reloader;
257
258         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
259                 to_spill, before));
260 }
261
262 void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before,
263                 ir_node *can_spill_after, const arch_register_class_t *reload_cls,
264                 int allow_remat)
265 {
266         spill_info_t *info;
267         reloader_t *rel;
268
269         assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
270
271         info = get_spillinfo(env, to_spill);
272
273         if (is_Phi(to_spill)) {
274                 int i, arity;
275
276                 /* create spillinfos for the phi arguments */
277                 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
278                         ir_node *arg = get_irn_n(to_spill, i);
279                         get_spillinfo(env, arg);
280                 }
281         }
282
283         assert(!is_Proj(before) && !be_is_Keep(before));
284
285         /* put reload into list */
286         rel                   = obstack_alloc(&env->obst, sizeof(rel[0]));
287         rel->next             = info->reloaders;
288         rel->reloader         = before;
289         rel->rematted_node    = NULL;
290         rel->can_spill_after  = can_spill_after;
291         rel->remat_cost_delta = allow_remat ? 0 : REMAT_COST_INFINITE;
292
293         info->reloaders  = rel;
294         assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
295         info->reload_cls = reload_cls;
296
297         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
298                 to_spill, before, allow_remat ? "" : " not"));
299 }
300
301 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
302                    const arch_register_class_t *reload_cls, int allow_remat)
303 {
304         be_add_reload2(senv, to_spill, before, to_spill, reload_cls, allow_remat);
305
306 }
307
308 ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
309 {
310         ir_node *last = sched_last(block);
311
312         /* we might have keeps behind the jump... */
313         while (be_is_Keep(last)) {
314                 last = sched_prev(last);
315                 assert(!sched_is_end(last));
316         }
317
318         assert(is_cfop(last));
319
320         /* add the reload before the (cond-)jump */
321         return last;
322 }
323
324 static ir_node *skip_keeps_phis(ir_node *node)
325 {
326         while(true) {
327                 ir_node *next = sched_next(node);
328                 if(!is_Phi(next) && !be_is_Keep(next))
329                         break;
330                 node = next;
331         }
332         return node;
333 }
334
335 /**
336  * Returns the point at which you can insert a node that should be executed
337  * before block @p block when coming from pred @p pos.
338  */
339 static ir_node *get_block_insertion_point(ir_node *block, int pos)
340 {
341         ir_node *predblock;
342
343         /* simply add the reload to the beginning of the block if we only have 1
344          * predecessor. We don't need to check for phis as there can't be any in a
345          * block with only 1 pred. */
346         if(get_Block_n_cfgpreds(block) == 1) {
347                 assert(!is_Phi(sched_first(block)));
348                 return sched_first(block);
349         }
350
351         /* We have to reload the value in pred-block */
352         predblock = get_Block_cfgpred_block(block, pos);
353         return be_get_end_of_block_insertion_point(predblock);
354 }
355
356 void be_add_reload_at_end(spill_env_t *env, ir_node *to_spill,
357                           const ir_node *block,
358                           const arch_register_class_t *reload_cls,
359                           int allow_remat)
360 {
361         ir_node *before = be_get_end_of_block_insertion_point(block);
362         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
363 }
364
365 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
366                            int pos,     const arch_register_class_t *reload_cls,
367                            int allow_remat)
368 {
369         ir_node *before = get_block_insertion_point(block, pos);
370         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
371 }
372
373 void be_spill_phi(spill_env_t *env, ir_node *node)
374 {
375         ir_node *block;
376         spill_info_t* spill;
377         int i, arity;
378
379         assert(is_Phi(node));
380
381         ir_nodeset_insert(&env->mem_phis, node);
382
383         /* create spills for the phi arguments */
384         block = get_nodes_block(node);
385         spill = get_spillinfo(env, node);
386         for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
387                 ir_node *arg = get_irn_n(node, i);
388                 ir_node *insert;
389                 //get_spillinfo(env, arg);
390
391                 /* some backends have virtual noreg/unknown nodes that are not scheduled
392                  * and simply always available. */
393                 if(!sched_is_scheduled(arg)) {
394                         ir_node *pred_block = get_Block_cfgpred_block(block, i);
395                         insert = be_get_end_of_block_insertion_point(pred_block);
396                         insert = sched_prev(insert);
397                 } else {
398                         insert = skip_keeps_phis(arg);
399                 }
400
401                 be_add_spill(env, arg, insert);
402         }
403 }
404
405 /*
406  *   ____                _         ____        _ _ _
407  *  / ___|_ __ ___  __ _| |_ ___  / ___| _ __ (_) | |___
408  * | |   | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
409  * | |___| | |  __/ (_| | ||  __/  ___) | |_) | | | \__ \
410  *  \____|_|  \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
411  *                                      |_|
412  */
413
414 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
415
416 /**
417  * Creates a spill.
418  *
419  * @param senv      the spill environment
420  * @param irn       the node that should be spilled
421  * @param ctx_irn   an user of the spilled node
422  *
423  * @return a be_Spill node
424  */
425 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
426 {
427         ir_node *to_spill = spillinfo->to_spill;
428         spill_t *spill;
429
430         /* determine_spill_costs must have been run before */
431         assert(spillinfo->spill_costs >= 0);
432
433         /* some backends have virtual noreg/unknown nodes that are not scheduled
434          * and simply always available. */
435         if(!sched_is_scheduled(to_spill)) {
436                 /* override spillinfos or create a new one */
437                 spillinfo->spills->spill = new_NoMem();
438                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
439                 return;
440         }
441
442         DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
443         spill = spillinfo->spills;
444         for( ; spill != NULL; spill = spill->next) {
445                 ir_node *after = spill->after;
446                 ir_node *block = get_block(after);
447
448                 after = skip_keeps_phis(after);
449
450                 spill->spill   = be_spill(env->arch_env, block, to_spill);
451                 sched_add_after(after, spill->spill);
452                 DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
453 #ifdef FIRM_STATISTICS
454                 env->spill_count++;
455 #endif
456         }
457         DBG((dbg, LEVEL_1, "\n"));
458 }
459
460 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
461
462 /**
463  * If the first usage of a Phi result would be out of memory
464  * there is no sense in allocating a register for it.
465  * Thus we spill it and all its operands to the same spill slot.
466  * Therefore the phi/dataB becomes a phi/Memory
467  *
468  * @param senv      the spill environment
469  * @param phi       the Phi node that should be spilled
470  * @param ctx_irn   an user of the spilled node
471  */
472 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
473 {
474         ir_graph *irg   = env->irg;
475         ir_node  *phi   = spillinfo->to_spill;
476         ir_node  *block = get_nodes_block(phi);
477         ir_node  *unknown;
478         ir_node **ins;
479         spill_t  *spill;
480         int       i;
481         int       arity;
482
483         assert(is_Phi(phi));
484         assert(!get_opt_cse());
485         DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
486
487         /* build a new PhiM */
488         arity   = get_irn_arity(phi);
489         ins     = alloca(sizeof(ir_node*) * arity);
490         unknown = new_r_Unknown(irg, mode_M);
491         for(i = 0; i < arity; ++i) {
492                 ins[i] = unknown;
493         }
494
495         /* override or replace spills list... */
496         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
497         spill->after  = skip_keeps_phis(phi);
498         spill->spill  = new_r_Phi(irg, block, arity, ins, mode_M);
499         spill->next   = NULL;
500
501         spillinfo->spills = spill;
502 #ifdef FIRM_STATISTICS
503         env->spilled_phi_count++;
504 #endif
505
506         for(i = 0; i < arity; ++i) {
507                 ir_node      *arg      = get_irn_n(phi, i);
508                 spill_info_t *arg_info = get_spillinfo(env, arg);
509
510                 determine_spill_costs(env, arg_info);
511                 spill_node(env, arg_info);
512
513                 set_irn_n(spill->spill, i, arg_info->spills->spill);
514         }
515         DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi,
516              spill->spill));
517 }
518
519 /**
520  * Spill a node.
521  *
522  * @param senv      the spill environment
523  * @param to_spill  the node that should be spilled
524  */
525 static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
526 {
527         ir_node *to_spill;
528
529         /* node is already spilled */
530         if(spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
531                 return;
532
533         to_spill = spillinfo->to_spill;
534
535         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
536                 spill_phi(env, spillinfo);
537         } else {
538                 spill_irn(env, spillinfo);
539         }
540 }
541
542 /*
543  *
544  *  ____                      _            _       _ _
545  * |  _ \ ___ _ __ ___   __ _| |_ ___ _ __(_) __ _| (_)_______
546  * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_  / _ \
547  * |  _ <  __/ | | | | | (_| | ||  __/ |  | | (_| | | |/ /  __/
548  * |_| \_\___|_| |_| |_|\__,_|\__\___|_|  |_|\__,_|_|_/___\___|
549  *
550  */
551
552 /**
553  * Tests whether value @p arg is available before node @p reloader
554  * @returns 1 if value is available, 0 otherwise
555  */
556 static int is_value_available(spill_env_t *env, const ir_node *arg,
557                               const ir_node *reloader)
558 {
559         if(is_Unknown(arg) || arg == new_NoMem())
560                 return 1;
561
562         if(be_is_Spill(arg))
563                 return 1;
564
565         if(arg == get_irg_frame(env->irg))
566                 return 1;
567
568         /* hack for now (happens when command should be inserted at end of block) */
569         if(is_Block(reloader)) {
570                 return 0;
571         }
572
573         /*
574          * Ignore registers are always available
575          */
576         if(arch_irn_is(env->arch_env, arg, ignore)) {
577                 return 1;
578         }
579
580         /* the following test does not work while spilling,
581          * because the liveness info is not adapted yet to the effects of the
582          * additional spills/reloads.
583          */
584 #if 0
585         /* we want to remat before the insn reloader
586          * thus an arguments is alive if
587          *   - it interferes with the reloaders result
588          *   - or it is (last-) used by reloader itself
589          */
590         if (values_interfere(env->birg->lv, reloader, arg)) {
591                 return 1;
592         }
593
594         arity = get_irn_arity(reloader);
595         for (i = 0; i < arity; ++i) {
596                 ir_node *rel_arg = get_irn_n(reloader, i);
597                 if (rel_arg == arg)
598                         return 1;
599         }
600 #endif
601
602         return 0;
603 }
604
605 /**
606  * Checks whether the node can principally be rematerialized
607  */
608 static int is_remat_node(spill_env_t *env, const ir_node *node)
609 {
610         const arch_env_t *arch_env = env->arch_env;
611
612         assert(!be_is_Spill(node));
613
614         if(arch_irn_is(arch_env, node, rematerializable))
615                 return 1;
616
617         return 0;
618 }
619
620 /**
621  * Check if a node is rematerializable. This tests for the following conditions:
622  *
623  * - The node itself is rematerializable
624  * - All arguments of the node are available or also rematerialisable
625  * - The costs for the rematerialisation operation is less or equal a limit
626  *
627  * Returns the costs needed for rematerialisation or something
628  * >= REMAT_COST_INFINITE if remat is not possible.
629  */
630 static int check_remat_conditions_costs(spill_env_t *env,
631                 const ir_node *spilled, const ir_node *reloader, int parentcosts)
632 {
633         int i, arity;
634         int argremats;
635         int costs = 0;
636
637         if(!is_remat_node(env, spilled))
638                 return REMAT_COST_INFINITE;
639
640         if(be_is_Reload(spilled)) {
641                 costs += 2;
642         } else {
643                 costs += arch_get_op_estimated_cost(env->arch_env, spilled);
644         }
645         if(parentcosts + costs >= env->reload_cost + env->spill_cost) {
646                 return REMAT_COST_INFINITE;
647         }
648         if(arch_irn_is(env->arch_env, spilled, modify_flags)) {
649                 return REMAT_COST_INFINITE;
650         }
651
652         argremats = 0;
653         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
654                 ir_node *arg = get_irn_n(spilled, i);
655
656                 if(is_value_available(env, arg, reloader))
657                         continue;
658
659                 /* we have to rematerialize the argument as well */
660                 if(argremats >= 1) {
661                         /* we only support rematerializing 1 argument at the moment,
662                          * so that we don't have to care about register pressure
663                          */
664                         return REMAT_COST_INFINITE;
665                 }
666                 argremats++;
667
668                 costs += check_remat_conditions_costs(env, arg, reloader,
669                                                       parentcosts + costs);
670                 if(parentcosts + costs >= env->reload_cost + env->spill_cost)
671                         return REMAT_COST_INFINITE;
672         }
673
674         return costs;
675 }
676
677 /**
678  * Re-materialize a node.
679  *
680  * @param senv      the spill environment
681  * @param spilled   the node that was spilled
682  * @param reloader  a irn that requires a reload
683  */
684 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
685 {
686         int i, arity;
687         ir_node *res;
688         ir_node *bl;
689         ir_node **ins;
690
691         if(is_Block(reloader)) {
692                 bl = reloader;
693         } else {
694                 bl = get_nodes_block(reloader);
695         }
696
697         ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
698         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
699                 ir_node *arg = get_irn_n(spilled, i);
700
701                 if(is_value_available(env, arg, reloader)) {
702                         ins[i] = arg;
703                 } else {
704                         ins[i] = do_remat(env, arg, reloader);
705 #ifdef FIRM_STATISTICS
706                         /* don't count the recursive call as remat */
707                         env->remat_count--;
708 #endif
709                 }
710         }
711
712         /* create a copy of the node */
713         res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
714                           get_irn_op(spilled), get_irn_mode(spilled),
715                           get_irn_arity(spilled), ins);
716         copy_node_attr(spilled, res);
717         new_backedge_info(res);
718
719         DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
720
721         if (! is_Proj(res)) {
722                 /* insert in schedule */
723                 sched_reset(res);
724                 sched_add_before(reloader, res);
725 #ifdef FIRM_STATISTICS
726                 env->remat_count++;
727 #endif
728         }
729
730         return res;
731 }
732
733 double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
734 {
735         ir_node *block = get_nodes_block(before);
736         double   freq  = get_block_execfreq(env->exec_freq, block);
737         (void) to_spill;
738
739         return env->spill_cost * freq;
740 }
741
742 unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
743                                        const ir_node *before)
744 {
745         if(be_do_remats) {
746                 /* is the node rematerializable? */
747                 unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
748                 if(costs < (unsigned) env->reload_cost)
749                         return costs;
750         }
751
752         return env->reload_cost;
753 }
754
755 double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
756 {
757         ir_node      *block = get_nodes_block(before);
758         double        freq  = get_block_execfreq(env->exec_freq, block);
759
760         if(be_do_remats) {
761                 /* is the node rematerializable? */
762                 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
763                 if(costs < env->reload_cost)
764                         return costs * freq;
765         }
766
767         return env->reload_cost * freq;
768 }
769
770 int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat,
771                            const ir_node *before)
772 {
773         return check_remat_conditions_costs(env, to_remat, before, 0) < REMAT_COST_INFINITE;
774 }
775
776 double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
777                                    ir_node *block, int pos)
778 {
779         ir_node *before = get_block_insertion_point(block, pos);
780         return be_get_reload_costs(env, to_spill, before);
781 }
782
783 /*
784  *  ___                     _     ____      _                 _
785  * |_ _|_ __  ___  ___ _ __| |_  |  _ \ ___| | ___   __ _  __| |___
786  *  | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
787  *  | || | | \__ \  __/ |  | |_  |  _ <  __/ | (_) | (_| | (_| \__ \
788  * |___|_| |_|___/\___|_|   \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
789  *
790  */
791
792 /**
793  * analyzes how to best spill a node and determine costs for that
794  */
795 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
796 {
797         ir_node *to_spill = spillinfo->to_spill;
798         ir_node *spill_block;
799         spill_t *spill;
800         double   spill_execfreq;
801
802         /* already calculated? */
803         if(spillinfo->spill_costs >= 0)
804                 return;
805
806         assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
807         assert(!be_is_Reload(to_spill));
808
809         /* some backends have virtual noreg/unknown nodes that are not scheduled
810          * and simply always available.
811          * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi
812          * predecessor (of a PhiM) but this test might match other things too...
813          */
814         if(!sched_is_scheduled(to_spill)) {
815                 /* override spillinfos or create a new one */
816                 spill_t *spill = obstack_alloc(&env->obst, sizeof(spill[0]));
817                 spill->after = NULL;
818                 spill->next  = NULL;
819                 spill->spill = new_NoMem();
820
821                 spillinfo->spills      = spill;
822                 spillinfo->spill_costs = 0;
823
824                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
825                 return;
826         }
827
828         spill_block    = get_nodes_block(to_spill);
829         spill_execfreq = get_block_execfreq(env->exec_freq, spill_block);
830
831         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
832                 /* TODO calculate correct costs...
833                  * (though we can't remat this node anyway so no big problem) */
834                 spillinfo->spill_costs = env->spill_cost * spill_execfreq;
835                 return;
836         }
837
838         if(spillinfo->spills != NULL) {
839                 spill_t *s;
840                 double   spills_execfreq;
841
842                 /* calculate sum of executaion frequencies of individual spills */
843                 spills_execfreq = 0;
844                 s               = spillinfo->spills;
845                 for( ; s != NULL; s = s->next) {
846                         ir_node *spill_block = get_block(s->after);
847                         double   freq = get_block_execfreq(env->exec_freq, spill_block);
848
849                         spills_execfreq += freq;
850                 }
851
852                 DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
853                     spills_execfreq * env->spill_cost,
854                     spill_execfreq * env->spill_cost));
855
856                 /* multi-/latespill is advantageous -> return*/
857                 if(spills_execfreq < spill_execfreq) {
858                         DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
859                         spillinfo->spill_costs = spills_execfreq * env->spill_cost;
860                         return;
861                 }
862         }
863
864         /* override spillinfos or create a new one */
865         spill        = obstack_alloc(&env->obst, sizeof(spill[0]));
866         spill->after = skip_keeps_phis(to_spill);
867         spill->next  = NULL;
868         spill->spill = NULL;
869
870         spillinfo->spills      = spill;
871         spillinfo->spill_costs = spill_execfreq * env->spill_cost;
872         DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
873 }
874
875 void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn)
876 {
877         const spill_info_t *si = get_spillinfo(env, irn);
878         ir_node *start_block   = get_irg_start_block(get_irn_irg(irn));
879         int n_blocks           = get_Block_dom_max_subtree_pre_num(start_block);
880         bitset_t *reloads      = bitset_alloca(n_blocks);
881         reloader_t *r;
882         spill_t *s;
883
884         if (si == NULL)
885                 return;
886
887         /* Fill the bitset with the dominance pre-order numbers
888          * of the blocks the reloads are located in. */
889         for (r = si->reloaders; r != NULL; r = r->next) {
890                 ir_node *bl = get_nodes_block(r->reloader);
891                 bitset_set(reloads, get_Block_dom_tree_pre_num(bl));
892         }
893
894         /* Now, cancel out all the blocks that are dominated by each spill.
895          * If the bitset is not empty after that, we have reloads that are
896          * not dominated by any spill. */
897         for (s = si->spills; s != NULL; s = s->next) {
898                 ir_node *bl = get_nodes_block(s->after);
899                 int start   = get_Block_dom_tree_pre_num(bl);
900                 int end     = get_Block_dom_max_subtree_pre_num(bl);
901
902                 bitset_clear_range(reloads, start, end);
903         }
904
905         if (!bitset_is_empty(reloads))
906                 be_add_spill(env, si->to_spill, si->to_spill);
907 }
908
909 void be_insert_spills_reloads(spill_env_t *env)
910 {
911         const arch_env_t      *arch_env  = env->arch_env;
912         const ir_exec_freq    *exec_freq = env->exec_freq;
913         spill_info_t          *si;
914         ir_nodeset_iterator_t  iter;
915         ir_node               *node;
916
917         BE_TIMER_PUSH(t_ra_spill_apply);
918
919         /* create all phi-ms first, this is needed so, that phis, hanging on
920            spilled phis work correctly */
921         foreach_ir_nodeset(&env->mem_phis, node, iter) {
922                 spill_info_t *info = get_spillinfo(env, node);
923                 spill_node(env, info);
924         }
925
926         /* process each spilled node */
927         for (si = set_first(env->spills); si; si = set_next(env->spills)) {
928                 reloader_t *rld;
929                 ir_node  *to_spill        = si->to_spill;
930                 ir_mode  *mode            = get_irn_mode(to_spill);
931                 ir_node **copies          = NEW_ARR_F(ir_node*, 0);
932                 double    all_remat_costs = 0; /** costs when we would remat all nodes */
933                 int       force_remat     = 0;
934
935                 DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
936
937                 determine_spill_costs(env, si);
938
939                 /* determine possibility of rematerialisations */
940                 if(be_do_remats) {
941                         /* calculate cost savings for each indivial value when it would
942                            be rematted instead of reloaded */
943                         for (rld = si->reloaders; rld != NULL; rld = rld->next) {
944                                 double   freq;
945                                 int      remat_cost;
946                                 int      remat_cost_delta;
947                                 ir_node *block;
948                                 ir_node *reloader = rld->reloader;
949
950                                 if(rld->rematted_node != NULL) {
951                                         DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
952                                              rld->rematted_node, reloader));
953                                         continue;
954                                 }
955                                 if(rld->remat_cost_delta >= REMAT_COST_INFINITE) {
956                                         DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
957                                              reloader));
958                                         all_remat_costs = REMAT_COST_INFINITE;
959                                         continue;
960                                 }
961
962                                 remat_cost  = check_remat_conditions_costs(env, to_spill,
963                                                                            reloader, 0);
964                                 if(remat_cost >= REMAT_COST_INFINITE) {
965                                         DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
966                                              reloader));
967                                         rld->remat_cost_delta = REMAT_COST_INFINITE;
968                                         all_remat_costs       = REMAT_COST_INFINITE;
969                                         continue;
970                                 }
971
972                                 remat_cost_delta      = remat_cost - env->reload_cost;
973                                 rld->remat_cost_delta = remat_cost_delta;
974                                 block                 = is_Block(reloader) ? reloader : get_nodes_block(reloader);
975                                 freq                  = get_block_execfreq(exec_freq, block);
976                                 all_remat_costs      += remat_cost_delta * freq;
977                                 DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
978                                      "%d (rel %f)\n", reloader, remat_cost_delta,
979                                      remat_cost_delta * freq));
980                         }
981                         if(all_remat_costs < REMAT_COST_INFINITE) {
982                                 /* we don't need the costs for the spill if we can remat
983                                    all reloaders */
984                                 all_remat_costs -= si->spill_costs;
985
986                                 DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
987                                      env->spill_cost, si->spill_costs));
988                         }
989
990                         if(all_remat_costs < 0) {
991                                 DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
992                                      all_remat_costs));
993                                 force_remat = 1;
994                         }
995                 }
996
997                 /* go through all reloads for this spill */
998                 for (rld = si->reloaders; rld != NULL; rld = rld->next) {
999                         ir_node *copy; /* a reload is a "copy" of the original value */
1000
1001                         if (rld->rematted_node != NULL) {
1002                                 copy = rld->rematted_node;
1003                                 sched_add_before(rld->reloader, copy);
1004                         } else if (be_do_remats &&
1005                                         (force_remat || rld->remat_cost_delta < 0)) {
1006                                 copy = do_remat(env, to_spill, rld->reloader);
1007                         } else {
1008                                 /* make sure we have a spill */
1009                                 spill_node(env, si);
1010
1011                                 /* create a reload, use the first spill for now SSA
1012                                  * reconstruction for memory comes below */
1013                                 assert(si->spills != NULL);
1014                                 copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode,
1015                                                  si->spills->spill);
1016 #ifdef FIRM_STATISTICS
1017                                 env->reload_count++;
1018 #endif
1019                         }
1020
1021                         DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
1022                              copy, to_spill, rld->reloader));
1023                         ARR_APP1(ir_node*, copies, copy);
1024                 }
1025
1026                 /* if we had any reloads or remats, then we need to reconstruct the
1027                  * SSA form for the spilled value */
1028                 if (ARR_LEN(copies) > 0) {
1029                         be_ssa_construction_env_t senv;
1030                         /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
1031
1032                         be_ssa_construction_init(&senv, env->birg);
1033                         be_ssa_construction_add_copy(&senv, to_spill);
1034                         be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
1035                         be_ssa_construction_fix_users(&senv, to_spill);
1036
1037 #if 0
1038                         /* no need to enable this as long as we invalidate liveness
1039                            after this function... */
1040                         be_ssa_construction_update_liveness_phis(&senv);
1041                         be_liveness_update(to_spill);
1042                         len = ARR_LEN(copies);
1043                         for(i = 0; i < len; ++i) {
1044                                 be_liveness_update(lv, copies[i]);
1045                         }
1046 #endif
1047                         be_ssa_construction_destroy(&senv);
1048                 }
1049                 /* need to reconstruct SSA form if we had multiple spills */
1050                 if (si->spills != NULL && si->spills->next != NULL) {
1051                         spill_t *spill;
1052                         int      spill_count = 0;
1053
1054                         be_ssa_construction_env_t senv;
1055
1056                         be_ssa_construction_init(&senv, env->birg);
1057                         spill = si->spills;
1058                         for( ; spill != NULL; spill = spill->next) {
1059                                 /* maybe we rematerialized the value and need no spill */
1060                                 if(spill->spill == NULL)
1061                                         continue;
1062                                 be_ssa_construction_add_copy(&senv, spill->spill);
1063                                 spill_count++;
1064                         }
1065                         if(spill_count > 1) {
1066                                 /* all reloads are attached to the first spill, fix them now */
1067                                 be_ssa_construction_fix_users(&senv, si->spills->spill);
1068                         }
1069
1070                         be_ssa_construction_destroy(&senv);
1071                 }
1072
1073                 DEL_ARR_F(copies);
1074                 si->reloaders = NULL;
1075         }
1076
1077         stat_ev_dbl("spill_spills", env->spill_count);
1078         stat_ev_dbl("spill_reloads", env->reload_count);
1079         stat_ev_dbl("spill_remats", env->remat_count);
1080         stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);
1081
1082         /* Matze: In theory be_ssa_construction should take care of the liveness...
1083          * try to disable this again in the future */
1084         be_liveness_invalidate(env->birg->lv);
1085
1086         be_remove_dead_nodes_from_schedule(env->birg);
1087
1088         BE_TIMER_POP(t_ra_spill_apply);
1089 }
1090
1091 void be_init_spill(void)
1092 {
1093         FIRM_DBG_REGISTER(dbg, "firm.be.spill");
1094 }
1095
1096 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill);