fehler109
[libfirm] / ir / be / bespill.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       implementation of the spill/reload placement abstraction layer
23  * @author      Daniel Grund, Sebastian Hack, Matthias Braun
24  * @date                29.09.2005
25  * @version     $Id$
26  */
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include <stdlib.h>
32
33 #include "pset.h"
34 #include "irnode_t.h"
35 #include "ircons_t.h"
36 #include "iredges_t.h"
37 #include "irbackedge_t.h"
38 #include "irprintf.h"
39 #include "ident_t.h"
40 #include "type_t.h"
41 #include "entity_t.h"
42 #include "debug.h"
43 #include "irgwalk.h"
44 #include "array.h"
45 #include "pdeq.h"
46 #include "execfreq.h"
47 #include "irnodeset.h"
48 #include "error.h"
49
50 #include "bearch_t.h"
51 #include "belive_t.h"
52 #include "besched_t.h"
53 #include "bespill.h"
54 #include "belive_t.h"
55 #include "benode_t.h"
56 #include "bechordal_t.h"
57 #include "bejavacoal.h"
58 #include "bespilloptions.h"
59 #include "bestatevent.h"
60 #include "bessaconstr.h"
61 #include "beirg_t.h"
62 #include "beintlive_t.h"
63 #include "bemodule.h"
64 #include "be_t.h"
65
66 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
67
68 #define REMAT_COST_INFINITE  1000
69
70 typedef struct reloader_t reloader_t;
71 struct reloader_t {
72         reloader_t *next;
73         ir_node    *can_spill_after;
74         ir_node    *reloader;
75         ir_node    *rematted_node;
76         int         remat_cost_delta; /** costs needed for rematerialization,
77                                            compared to placing a reload */
78 };
79
80 typedef struct spill_t spill_t;
81 struct spill_t {
82         spill_t *next;
83         ir_node *before;   /**< spill has to be placed before this node (or earlier) */
84         ir_node *spill;
85 };
86
87 typedef struct spill_info_t spill_info_t;
88 struct spill_info_t {
89         ir_node    *to_spill;  /**< the value that should get spilled */
90         reloader_t *reloaders; /**< list of places where the value should get
91                                     reloaded */
92         spill_t    *spills;    /**< list of latest places where spill must be
93                                     placed */
94         double      spill_costs; /**< costs needed for spilling the value */
95         const arch_register_class_t *reload_cls; /** the register class in which the
96                                                      reload should be placed */
97 };
98
99 struct spill_env_t {
100         const arch_env_t *arch_env;
101         ir_graph         *irg;
102         struct obstack    obst;
103         be_irg_t         *birg;
104         int               spill_cost;     /**< the cost of a single spill node */
105         int               reload_cost;    /**< the cost of a reload node */
106         set              *spills;         /**< all spill_info_t's, which must be
107                                                placed */
108         ir_nodeset_t      mem_phis;       /**< set of all spilled phis. */
109         ir_exec_freq     *exec_freq;
110         unsigned          new_nodes_idx;  /**< all old nodes idx is smaller than
111                                                this */
112
113 #ifdef FIRM_STATISTICS
114         unsigned          spill_count;
115         unsigned          reload_count;
116         unsigned          remat_count;
117         unsigned          spilled_phi_count;
118 #endif
119 };
120
121 /**
122  * Compare two spill infos.
123  */
124 static int cmp_spillinfo(const void *x, const void *y, size_t size)
125 {
126         const spill_info_t *xx = x;
127         const spill_info_t *yy = y;
128         (void) size;
129
130         return xx->to_spill != yy->to_spill;
131 }
132
133 /**
134  * Returns spill info for a specific value (the value that is to be spilled)
135  */
136 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
137 {
138         spill_info_t info, *res;
139         int hash = hash_irn(value);
140
141         info.to_spill = value;
142         res = set_find(env->spills, &info, sizeof(info), hash);
143
144         if (res == NULL) {
145                 info.reloaders   = NULL;
146                 info.spills      = NULL;
147                 info.spill_costs = -1;
148                 info.reload_cls  = NULL;
149                 res = set_insert(env->spills, &info, sizeof(info), hash);
150         }
151
152         return res;
153 }
154
155 spill_env_t *be_new_spill_env(be_irg_t *birg)
156 {
157         const arch_env_t *arch_env = birg->main_env->arch_env;
158
159         spill_env_t *env        = xmalloc(sizeof(env[0]));
160         env->spills                     = new_set(cmp_spillinfo, 1024);
161         env->irg            = be_get_birg_irg(birg);
162         env->birg           = birg;
163         env->arch_env       = arch_env;
164         ir_nodeset_init(&env->mem_phis);
165         env->spill_cost     = arch_env->isa->spill_cost;
166         env->reload_cost    = arch_env->isa->reload_cost;
167         env->exec_freq      = be_get_birg_exec_freq(birg);
168         obstack_init(&env->obst);
169
170 #ifdef FIRM_STATISTICS
171         env->spill_count       = 0;
172         env->reload_count      = 0;
173         env->remat_count       = 0;
174         env->spilled_phi_count = 0;
175 #endif
176
177         return env;
178 }
179
180 void be_delete_spill_env(spill_env_t *env)
181 {
182         del_set(env->spills);
183         ir_nodeset_destroy(&env->mem_phis);
184         obstack_free(&env->obst, NULL);
185         free(env);
186 }
187
188 /*
189  *  ____  _                  ____      _                 _
190  * |  _ \| | __ _  ___ ___  |  _ \ ___| | ___   __ _  __| |___
191  * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
192  * |  __/| | (_| | (_|  __/ |  _ <  __/ | (_) | (_| | (_| \__ \
193  * |_|   |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
194  *
195  */
196
197 void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *before)
198 {
199 #if 1
200         spill_info_t *spill_info = get_spillinfo(env, to_spill);
201         spill_t      *spill;
202         spill_t      *s;
203         spill_t      *last;
204
205         assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
206         DB((dbg, LEVEL_1, "Add spill of %+F before %+F\n", to_spill, before));
207
208         /* spills that are dominated by others are not needed */
209         last = NULL;
210         s    = spill_info->spills;
211         for( ; s != NULL; s = s->next) {
212                 /* no need to add this spill if it is dominated by another */
213                 if(value_dominates(s->before, before)) {
214                         DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->before));
215                         return;
216                 }
217                 /* remove spills that we dominate */
218                 if(value_dominates(before, s->before)) {
219                         DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->before));
220                         if(last != NULL) {
221                                 last->next         = s->next;
222                         } else {
223                                 spill_info->spills = s->next;
224                         }
225                 } else {
226                         last = s;
227                 }
228         }
229
230         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
231         spill->before = before;
232         spill->next   = spill_info->spills;
233         spill->spill  = NULL;
234
235         spill_info->spills = spill;
236 #endif
237 }
238
239 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before,
240                   ir_node *rematted_node)
241 {
242         spill_info_t *spill_info;
243         reloader_t *reloader;
244
245         spill_info = get_spillinfo(env, to_spill);
246
247         /* add the remat information */
248         reloader                   = obstack_alloc(&env->obst, sizeof(reloader[0]));
249         reloader->next             = spill_info->reloaders;
250         reloader->reloader         = before;
251         reloader->rematted_node    = rematted_node;
252         reloader->remat_cost_delta = 0; /* We will never have a cost win over a
253                                            reload since we're not even allowed to
254                                            create a reload */
255
256         spill_info->reloaders  = reloader;
257
258         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
259                 to_spill, before));
260 }
261
262 void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before,
263                 ir_node *can_spill_after, const arch_register_class_t *reload_cls,
264                 int allow_remat)
265 {
266         spill_info_t *info;
267         reloader_t *rel;
268
269         assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
270
271         info = get_spillinfo(env, to_spill);
272
273         if (is_Phi(to_spill)) {
274                 int i, arity;
275
276                 /* create spillinfos for the phi arguments */
277                 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
278                         ir_node *arg = get_irn_n(to_spill, i);
279                         get_spillinfo(env, arg);
280                 }
281         }
282
283         assert(!is_Proj(before) && !be_is_Keep(before));
284
285         /* put reload into list */
286         rel                   = obstack_alloc(&env->obst, sizeof(rel[0]));
287         rel->next             = info->reloaders;
288         rel->reloader         = before;
289         rel->rematted_node    = NULL;
290         rel->can_spill_after  = can_spill_after;
291         rel->remat_cost_delta = allow_remat ? 0 : REMAT_COST_INFINITE;
292
293         info->reloaders  = rel;
294         assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
295         info->reload_cls = reload_cls;
296
297         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
298                 to_spill, before, allow_remat ? "" : " not"));
299 }
300
301 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
302                    const arch_register_class_t *reload_cls, int allow_remat)
303 {
304         be_add_reload2(senv, to_spill, before, to_spill, reload_cls, allow_remat);
305
306 }
307
308 ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
309 {
310         ir_node *last = sched_last(block);
311
312         /* we might have keeps behind the jump... */
313         while(be_is_Keep(last)) {
314                 last = sched_prev(last);
315                 assert(!sched_is_end(last));
316         }
317
318         assert(is_cfop(last));
319
320         /* add the reload before the (cond-)jump */
321         return last;
322 }
323
324 static ir_node *skip_keeps_phis(ir_node *node)
325 {
326         node = sched_next(node);
327         while(is_Phi(node) || be_is_Keep(node)) {
328                 node = sched_next(node);
329         }
330         return node;
331 }
332
333 /**
334  * Returns the point at which you can insert a node that should be executed
335  * before block @p block when coming from pred @p pos.
336  */
337 static ir_node *get_block_insertion_point(ir_node *block, int pos)
338 {
339         ir_node *predblock;
340
341         /* simply add the reload to the beginning of the block if we only have 1
342          * predecessor. We don't need to check for phis as there can't be any in a
343          * block with only 1 pred. */
344         if(get_Block_n_cfgpreds(block) == 1) {
345                 assert(!is_Phi(sched_first(block)));
346                 return sched_first(block);
347         }
348
349         /* We have to reload the value in pred-block */
350         predblock = get_Block_cfgpred_block(block, pos);
351         return be_get_end_of_block_insertion_point(predblock);
352 }
353
354 void be_add_reload_at_end(spill_env_t *env, ir_node *to_spill,
355                           const ir_node *block,
356                           const arch_register_class_t *reload_cls,
357                           int allow_remat)
358 {
359         ir_node *before = be_get_end_of_block_insertion_point(block);
360         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
361 }
362
363 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
364                            int pos,     const arch_register_class_t *reload_cls,
365                            int allow_remat)
366 {
367         ir_node *before = get_block_insertion_point(block, pos);
368         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
369 }
370
371 void be_spill_phi(spill_env_t *env, ir_node *node)
372 {
373         ir_node *block;
374         spill_info_t* spill;
375         int i, arity;
376
377         assert(is_Phi(node));
378
379         ir_nodeset_insert(&env->mem_phis, node);
380
381         /* create spills for the phi arguments */
382         block = get_nodes_block(node);
383         spill = get_spillinfo(env, node);
384         for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
385                 ir_node *arg        = get_irn_n(node, i);
386                 ir_node *insert;
387                 //get_spillinfo(env, arg);
388
389                 /* some backends have virtual noreg/unknown nodes that are not scheduled
390                  * and simply always available. */
391                 if(!sched_is_scheduled(arg)) {
392                         ir_node *pred_block = get_Block_cfgpred_block(block, i);
393                         insert = be_get_end_of_block_insertion_point(pred_block);
394                 } else {
395                         insert = skip_keeps_phis(arg);
396                 }
397
398                 be_add_spill(env, arg, insert);
399         }
400 }
401
402 /*
403  *   ____                _         ____        _ _ _
404  *  / ___|_ __ ___  __ _| |_ ___  / ___| _ __ (_) | |___
405  * | |   | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
406  * | |___| | |  __/ (_| | ||  __/  ___) | |_) | | | \__ \
407  *  \____|_|  \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
408  *                                      |_|
409  */
410
411 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
412
413 /**
414  * Creates a spill.
415  *
416  * @param senv      the spill environment
417  * @param irn       the node that should be spilled
418  * @param ctx_irn   an user of the spilled node
419  *
420  * @return a be_Spill node
421  */
422 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
423 {
424         ir_node *to_spill = spillinfo->to_spill;
425         spill_t *spill;
426
427         /* determine_spill_costs must have been run before */
428         assert(spillinfo->spill_costs >= 0);
429
430         /* some backends have virtual noreg/unknown nodes that are not scheduled
431          * and simply always available. */
432         if(!sched_is_scheduled(to_spill)) {
433                 /* override spillinfos or create a new one */
434                 spillinfo->spills->spill = new_NoMem();
435                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
436                 return;
437         }
438
439         DBG((dbg, LEVEL_1, "spilling %+F ... ", to_spill));
440         spill = spillinfo->spills;
441         for( ; spill != NULL; spill = spill->next) {
442                 ir_node *block  = get_block(spill->before);
443                 ir_node *before = spill->before;
444
445                 /* place all spills before the reloads (as we can't guarantee the
446                  * same order as the be_add_spill and be_add_reload calls */
447                 while(get_irn_idx(sched_prev(before)) > env->new_nodes_idx) {
448                         before = sched_prev(before);
449                 }
450
451                 spill->spill    = be_spill(env->arch_env, block, to_spill);
452                 sched_add_before(before, spill->spill);
453                 DB((dbg, LEVEL_1, "\t%+F before %+F\n", spill->spill, before));
454 #ifdef FIRM_STATISTICS
455                 env->spill_count++;
456 #endif
457         }
458         DBG((dbg, LEVEL_1, "\n"));
459 }
460
461 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
462
463 /**
464  * If the first usage of a Phi result would be out of memory
465  * there is no sense in allocating a register for it.
466  * Thus we spill it and all its operands to the same spill slot.
467  * Therefore the phi/dataB becomes a phi/Memory
468  *
469  * @param senv      the spill environment
470  * @param phi       the Phi node that should be spilled
471  * @param ctx_irn   an user of the spilled node
472  */
473 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
474 {
475         ir_graph *irg   = env->irg;
476         ir_node  *phi   = spillinfo->to_spill;
477         ir_node  *block = get_nodes_block(phi);
478         ir_node  *unknown;
479         ir_node **ins;
480         spill_t  *spill;
481         int       i;
482         int       arity;
483
484         assert(is_Phi(phi));
485         assert(!get_opt_cse());
486         DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
487
488         /* build a new PhiM */
489         arity   = get_irn_arity(phi);
490         ins     = alloca(sizeof(ir_node*) * arity);
491         unknown = new_r_Unknown(irg, mode_M);
492         for(i = 0; i < arity; ++i) {
493                 ins[i] = unknown;
494         }
495
496         /* override or replace spills list... */
497         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
498         spill->before = skip_keeps_phis(phi);
499         spill->spill  = new_r_Phi(irg, block, arity, ins, mode_M);
500         spill->next   = NULL;
501
502         spillinfo->spills = spill;
503 #ifdef FIRM_STATISTICS
504         env->spilled_phi_count++;
505 #endif
506
507         for(i = 0; i < arity; ++i) {
508                 ir_node      *arg      = get_irn_n(phi, i);
509                 spill_info_t *arg_info = get_spillinfo(env, arg);
510
511                 determine_spill_costs(env, arg_info);
512                 spill_node(env, arg_info);
513
514                 set_irn_n(spill->spill, i, arg_info->spills->spill);
515         }
516         DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi,
517              spill->spill));
518 }
519
520 /**
521  * Spill a node.
522  *
523  * @param senv      the spill environment
524  * @param to_spill  the node that should be spilled
525  */
526 static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
527 {
528         ir_node *to_spill;
529
530         /* node is already spilled */
531         if(spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
532                 return;
533
534         to_spill = spillinfo->to_spill;
535
536         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
537                 spill_phi(env, spillinfo);
538         } else {
539                 spill_irn(env, spillinfo);
540         }
541 }
542
543 /*
544  *
545  *  ____                      _            _       _ _
546  * |  _ \ ___ _ __ ___   __ _| |_ ___ _ __(_) __ _| (_)_______
547  * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_  / _ \
548  * |  _ <  __/ | | | | | (_| | ||  __/ |  | | (_| | | |/ /  __/
549  * |_| \_\___|_| |_| |_|\__,_|\__\___|_|  |_|\__,_|_|_/___\___|
550  *
551  */
552
553 /**
554  * Tests whether value @p arg is available before node @p reloader
555  * @returns 1 if value is available, 0 otherwise
556  */
557 static int is_value_available(spill_env_t *env, const ir_node *arg,
558                               const ir_node *reloader)
559 {
560         if(is_Unknown(arg) || arg == new_NoMem())
561                 return 1;
562
563         if(be_is_Spill(arg))
564                 return 1;
565
566         if(arg == get_irg_frame(env->irg))
567                 return 1;
568
569         /* hack for now (happens when command should be inserted at end of block) */
570         if(is_Block(reloader)) {
571                 return 0;
572         }
573
574         /*
575          * Ignore registers are always available
576          */
577         if(arch_irn_is(env->arch_env, arg, ignore)) {
578                 return 1;
579         }
580
581         /* the following test does not work while spilling,
582          * because the liveness info is not adapted yet to the effects of the
583          * additional spills/reloads.
584          */
585 #if 0
586         /* we want to remat before the insn reloader
587          * thus an arguments is alive if
588          *   - it interferes with the reloaders result
589          *   - or it is (last-) used by reloader itself
590          */
591         if (values_interfere(env->birg->lv, reloader, arg)) {
592                 return 1;
593         }
594
595         arity = get_irn_arity(reloader);
596         for (i = 0; i < arity; ++i) {
597                 ir_node *rel_arg = get_irn_n(reloader, i);
598                 if (rel_arg == arg)
599                         return 1;
600         }
601 #endif
602
603         return 0;
604 }
605
606 /**
607  * Checks whether the node can principally be rematerialized
608  */
609 static int is_remat_node(spill_env_t *env, const ir_node *node)
610 {
611         const arch_env_t *arch_env = env->arch_env;
612
613         assert(!be_is_Spill(node));
614
615         if(arch_irn_is(arch_env, node, rematerializable))
616                 return 1;
617
618         return 0;
619 }
620
621 /**
622  * Check if a node is rematerializable. This tests for the following conditions:
623  *
624  * - The node itself is rematerializable
625  * - All arguments of the node are available or also rematerialisable
626  * - The costs for the rematerialisation operation is less or equal a limit
627  *
628  * Returns the costs needed for rematerialisation or something
629  * >= REMAT_COST_INFINITE if remat is not possible.
630  */
631 static int check_remat_conditions_costs(spill_env_t *env,
632                 const ir_node *spilled, const ir_node *reloader, int parentcosts)
633 {
634         int i, arity;
635         int argremats;
636         int costs = 0;
637
638         if(!is_remat_node(env, spilled))
639                 return REMAT_COST_INFINITE;
640
641         if(be_is_Reload(spilled)) {
642                 costs += 2;
643         } else {
644                 costs += arch_get_op_estimated_cost(env->arch_env, spilled);
645         }
646         if(parentcosts + costs >= env->reload_cost + env->spill_cost) {
647                 return REMAT_COST_INFINITE;
648         }
649         if(arch_irn_is(env->arch_env, spilled, modify_flags)) {
650                 return REMAT_COST_INFINITE;
651         }
652
653         argremats = 0;
654         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
655                 ir_node *arg = get_irn_n(spilled, i);
656
657                 if(is_value_available(env, arg, reloader))
658                         continue;
659
660                 /* we have to rematerialize the argument as well */
661                 if(argremats >= 1) {
662                         /* we only support rematerializing 1 argument at the moment,
663                          * so that we don't have to care about register pressure
664                          */
665                         return REMAT_COST_INFINITE;
666                 }
667                 argremats++;
668
669                 costs += check_remat_conditions_costs(env, arg, reloader,
670                                                       parentcosts + costs);
671                 if(parentcosts + costs >= env->reload_cost + env->spill_cost)
672                         return REMAT_COST_INFINITE;
673         }
674
675         return costs;
676 }
677
678 /**
679  * Re-materialize a node.
680  *
681  * @param senv      the spill environment
682  * @param spilled   the node that was spilled
683  * @param reloader  a irn that requires a reload
684  */
685 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
686 {
687         int i, arity;
688         ir_node *res;
689         ir_node *bl;
690         ir_node **ins;
691
692         if(is_Block(reloader)) {
693                 bl = reloader;
694         } else {
695                 bl = get_nodes_block(reloader);
696         }
697
698         ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
699         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
700                 ir_node *arg = get_irn_n(spilled, i);
701
702                 if(is_value_available(env, arg, reloader)) {
703                         ins[i] = arg;
704                 } else {
705                         ins[i] = do_remat(env, arg, reloader);
706 #ifdef FIRM_STATISTICS
707                         /* don't count the recursive call as remat */
708                         env->remat_count--;
709 #endif
710                 }
711         }
712
713         /* create a copy of the node */
714         res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
715                           get_irn_op(spilled), get_irn_mode(spilled),
716                           get_irn_arity(spilled), ins);
717         copy_node_attr(spilled, res);
718         new_backedge_info(res);
719
720         DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
721
722         if (! is_Proj(res)) {
723                 /* insert in schedule */
724                 sched_reset(res);
725                 sched_add_before(reloader, res);
726 #ifdef FIRM_STATISTICS
727                 env->remat_count++;
728 #endif
729         }
730
731         return res;
732 }
733
734 double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
735 {
736         ir_node *block = get_nodes_block(before);
737         double   freq  = get_block_execfreq(env->exec_freq, block);
738         (void) to_spill;
739
740         return env->spill_cost * freq;
741 }
742
743 unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
744                                        const ir_node *before)
745 {
746         if(be_do_remats) {
747                 /* is the node rematerializable? */
748                 unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
749                 if(costs < (unsigned) env->reload_cost)
750                         return costs;
751         }
752
753         return env->reload_cost;
754 }
755
756 double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
757 {
758         ir_node      *block = get_nodes_block(before);
759         double        freq  = get_block_execfreq(env->exec_freq, block);
760
761         if(be_do_remats) {
762                 /* is the node rematerializable? */
763                 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
764                 if(costs < env->reload_cost)
765                         return costs * freq;
766         }
767
768         return env->reload_cost * freq;
769 }
770
771 int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat,
772                            const ir_node *before)
773 {
774         return check_remat_conditions_costs(env, to_remat, before, 0) < REMAT_COST_INFINITE;
775 }
776
777 double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
778                                    ir_node *block, int pos)
779 {
780         ir_node *before = get_block_insertion_point(block, pos);
781         return be_get_reload_costs(env, to_spill, before);
782 }
783
784 /*
785  *  ___                     _     ____      _                 _
786  * |_ _|_ __  ___  ___ _ __| |_  |  _ \ ___| | ___   __ _  __| |___
787  *  | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
788  *  | || | | \__ \  __/ |  | |_  |  _ <  __/ | (_) | (_| | (_| \__ \
789  * |___|_| |_|___/\___|_|   \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
790  *
791  */
792
793 /**
794  * analyzes how to best spill a node and determine costs for that
795  */
796 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
797 {
798         ir_node *to_spill = spillinfo->to_spill;
799         ir_node *spill_block;
800         spill_t *spill;
801         double   spill_execfreq;
802
803         /* already calculated? */
804         if(spillinfo->spill_costs >= 0)
805                 return;
806
807         assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
808         assert(!be_is_Reload(to_spill));
809
810         /* some backends have virtual noreg/unknown nodes that are not scheduled
811          * and simply always available.
812          * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi
813          * predecessor (of a PhiM) but this test might match other things too...
814          */
815         if(!sched_is_scheduled(to_spill)) {
816                 /* override spillinfos or create a new one */
817                 spill_t *spill = obstack_alloc(&env->obst, sizeof(spill[0]));
818                 spill->before  = NULL;
819                 spill->next    = NULL;
820                 spill->spill   = new_NoMem();
821
822                 spillinfo->spills      = spill;
823                 spillinfo->spill_costs = 0;
824
825                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
826                 return;
827         }
828
829         spill_block    = get_nodes_block(to_spill);
830         spill_execfreq = get_block_execfreq(env->exec_freq, spill_block);
831
832         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
833                 /* TODO calculate correct costs...
834                  * (though we can't remat this node anyway so no big problem) */
835                 spillinfo->spill_costs = env->spill_cost * spill_execfreq;
836                 return;
837         }
838
839         if(spillinfo->spills != NULL) {
840                 spill_t *s;
841                 double   spills_execfreq;
842
843                 /* calculate sum of executaion frequencies of individual spills */
844                 spills_execfreq = 0;
845                 s               = spillinfo->spills;
846                 for( ; s != NULL; s = s->next) {
847                         ir_node *spill_block = s->before;
848                         double   freq;
849
850                         if(!is_Block(spill_block)) {
851                                 spill_block = get_nodes_block(spill_block);
852                         }
853                         freq = get_block_execfreq(env->exec_freq, spill_block);
854
855                         spills_execfreq += freq;
856                 }
857
858                 DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
859                     spills_execfreq * env->spill_cost,
860                     spill_execfreq * env->spill_cost));
861
862                 /* multi-/latespill is advantageous -> return*/
863                 if(spills_execfreq < spill_execfreq) {
864                         DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
865                         spillinfo->spill_costs = spills_execfreq * env->spill_cost;
866                         return;
867                 }
868         }
869
870         /* override spillinfos or create a new one */
871         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
872         spill->before = skip_keeps_phis(to_spill);
873         spill->next   = NULL;
874         spill->spill  = NULL;
875
876         spillinfo->spills      = spill;
877         spillinfo->spill_costs = spill_execfreq * env->spill_cost;
878         DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
879 }
880
881 void be_insert_spills_reloads(spill_env_t *env)
882 {
883         ir_graph              *irg       = env->irg;
884         const arch_env_t      *arch_env  = env->arch_env;
885         const ir_exec_freq    *exec_freq = env->exec_freq;
886         spill_info_t          *si;
887         ir_nodeset_iterator_t  iter;
888         ir_node               *node;
889
890         BE_TIMER_PUSH(t_ra_spill_apply);
891
892         env->new_nodes_idx = get_irg_last_idx(irg);
893
894         /* create all phi-ms first, this is needed so, that phis, hanging on
895            spilled phis work correctly */
896         foreach_ir_nodeset(&env->mem_phis, node, iter) {
897                 spill_info_t *info = get_spillinfo(env, node);
898                 spill_node(env, info);
899         }
900
901         /* process each spilled node */
902         for (si = set_first(env->spills); si; si = set_next(env->spills)) {
903                 reloader_t *rld;
904                 ir_node  *to_spill        = si->to_spill;
905                 ir_mode  *mode            = get_irn_mode(to_spill);
906                 ir_node **copies          = NEW_ARR_F(ir_node*, 0);
907                 double    all_remat_costs = 0; /** costs when we would remat all nodes */
908                 int       force_remat     = 0;
909
910                 DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
911
912                 determine_spill_costs(env, si);
913
914                 /* determine possibility of rematerialisations */
915                 if(be_do_remats) {
916                         /* calculate cost savings for each indivial value when it would
917                            be rematted instead of reloaded */
918                         for (rld = si->reloaders; rld != NULL; rld = rld->next) {
919                                 double   freq;
920                                 int      remat_cost;
921                                 int      remat_cost_delta;
922                                 ir_node *block;
923                                 ir_node *reloader = rld->reloader;
924
925                                 if(rld->rematted_node != NULL) {
926                                         DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
927                                              rld->rematted_node, reloader));
928                                         continue;
929                                 }
930                                 if(rld->remat_cost_delta >= REMAT_COST_INFINITE) {
931                                         DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
932                                              reloader));
933                                         all_remat_costs = REMAT_COST_INFINITE;
934                                         continue;
935                                 }
936
937                                 remat_cost  = check_remat_conditions_costs(env, to_spill,
938                                                                            reloader, 0);
939                                 if(remat_cost >= REMAT_COST_INFINITE) {
940                                         DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
941                                              reloader));
942                                         rld->remat_cost_delta = REMAT_COST_INFINITE;
943                                         all_remat_costs       = REMAT_COST_INFINITE;
944                                         continue;
945                                 }
946
947                                 remat_cost_delta      = remat_cost - env->reload_cost;
948                                 rld->remat_cost_delta = remat_cost_delta;
949                                 block                 = is_Block(reloader) ? reloader : get_nodes_block(reloader);
950                                 freq                  = get_block_execfreq(exec_freq, block);
951                                 all_remat_costs      += remat_cost_delta * freq;
952                                 DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
953                                      "%d (rel %f)\n", reloader, remat_cost_delta,
954                                      remat_cost_delta * freq));
955                         }
956                         if(all_remat_costs < REMAT_COST_INFINITE) {
957                                 /* we don't need the costs for the spill if we can remat
958                                    all reloaders */
959                                 all_remat_costs -= si->spill_costs;
960
961                                 DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
962                                      env->spill_cost, si->spill_costs));
963                         }
964
965                         if(all_remat_costs < 0) {
966                                 DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
967                                      all_remat_costs));
968                                 force_remat = 1;
969                         }
970                 }
971
972                 /* go through all reloads for this spill */
973                 for (rld = si->reloaders; rld != NULL; rld = rld->next) {
974                         ir_node *copy; /* a reload is a "copy" of the original value */
975
976                         if (rld->rematted_node != NULL) {
977                                 copy = rld->rematted_node;
978                                 sched_add_before(rld->reloader, copy);
979                         } else if (be_do_remats &&
980                                         (force_remat || rld->remat_cost_delta < 0)) {
981                                 copy = do_remat(env, to_spill, rld->reloader);
982                         } else {
983                                 /* make sure we have a spill */
984                                 spill_node(env, si);
985
986                                 /* create a reload, use the first spill for now SSA
987                                  * reconstruction for memory comes below */
988                                 assert(si->spills != NULL);
989                                 copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode,
990                                                  si->spills->spill);
991 #ifdef FIRM_STATISTICS
992                                 env->reload_count++;
993 #endif
994                         }
995
996                         DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
997                              copy, to_spill, rld->reloader));
998                         ARR_APP1(ir_node*, copies, copy);
999                 }
1000
1001                 /* if we had any reloads or remats, then we need to reconstruct the
1002                  * SSA form for the spilled value */
1003                 if (ARR_LEN(copies) > 0) {
1004                         be_ssa_construction_env_t senv;
1005                         /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
1006
1007                         be_ssa_construction_init(&senv, env->birg);
1008                         be_ssa_construction_add_copy(&senv, to_spill);
1009                         be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
1010                         be_ssa_construction_fix_users(&senv, to_spill);
1011
1012 #if 0
1013                         /* no need to enable this as long as we invalidate liveness
1014                            after this function... */
1015                         be_ssa_construction_update_liveness_phis(&senv);
1016                         be_liveness_update(to_spill);
1017                         len = ARR_LEN(copies);
1018                         for(i = 0; i < len; ++i) {
1019                                 be_liveness_update(lv, copies[i]);
1020                         }
1021 #endif
1022                         be_ssa_construction_destroy(&senv);
1023                 }
1024                 /* need to reconstruct SSA form if we had multiple spills */
1025                 if (si->spills != NULL && si->spills->next != NULL) {
1026                         spill_t *spill;
1027                         int      spill_count = 0;
1028
1029                         be_ssa_construction_env_t senv;
1030
1031                         be_ssa_construction_init(&senv, env->birg);
1032                         spill = si->spills;
1033                         for( ; spill != NULL; spill = spill->next) {
1034                                 /* maybe we rematerialized the value and need no spill */
1035                                 if(spill->spill == NULL)
1036                                         continue;
1037                                 be_ssa_construction_add_copy(&senv, spill->spill);
1038                                 spill_count++;
1039                         }
1040                         if(spill_count > 1) {
1041                                 /* all reloads are attached to the first spill, fix them now */
1042                                 be_ssa_construction_fix_users(&senv, si->spills->spill);
1043                         }
1044
1045                         be_ssa_construction_destroy(&senv);
1046                 }
1047
1048                 DEL_ARR_F(copies);
1049                 si->reloaders = NULL;
1050         }
1051
1052         stat_ev_dbl("spill_spills", env->spill_count);
1053         stat_ev_dbl("spill_reloads", env->reload_count);
1054         stat_ev_dbl("spill_remats", env->remat_count);
1055         stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);
1056
1057         /* Matze: In theory be_ssa_construction should take care of the liveness...
1058          * try to disable this again in the future */
1059         be_liveness_invalidate(env->birg->lv);
1060
1061         be_remove_dead_nodes_from_schedule(env->birg);
1062
1063         BE_TIMER_POP(t_ra_spill_apply);
1064 }
1065
1066 void be_init_spill(void)
1067 {
1068         FIRM_DBG_REGISTER(dbg, "firm.be.spill");
1069 }
1070
1071 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill);