Add ALLOCAN() and ALLOCANZ().
[libfirm] / ir / be / bespill.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       implementation of the spill/reload placement abstraction layer
23  * @author      Daniel Grund, Sebastian Hack, Matthias Braun
24  * @date                29.09.2005
25  * @version     $Id$
26  */
27 #include "config.h"
28
29 #include <stdlib.h>
30 #include <stdbool.h>
31
32 #include "pset.h"
33 #include "irnode_t.h"
34 #include "ircons_t.h"
35 #include "iredges_t.h"
36 #include "irbackedge_t.h"
37 #include "irprintf.h"
38 #include "ident_t.h"
39 #include "type_t.h"
40 #include "entity_t.h"
41 #include "debug.h"
42 #include "irgwalk.h"
43 #include "array.h"
44 #include "pdeq.h"
45 #include "execfreq.h"
46 #include "irnodeset.h"
47 #include "error.h"
48
49 #include "bearch_t.h"
50 #include "belive_t.h"
51 #include "besched_t.h"
52 #include "bespill.h"
53 #include "belive_t.h"
54 #include "benode_t.h"
55 #include "bechordal_t.h"
56 #include "bespilloptions.h"
57 #include "bestatevent.h"
58 #include "bessaconstr.h"
59 #include "beirg_t.h"
60 #include "beintlive_t.h"
61 #include "bemodule.h"
62 #include "be_t.h"
63
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
65
66 #define REMAT_COST_INFINITE  1000
67
68 typedef struct reloader_t reloader_t;
69 struct reloader_t {
70         reloader_t *next;
71         ir_node    *can_spill_after;
72         ir_node    *reloader;
73         ir_node    *rematted_node;
74         int         remat_cost_delta; /** costs needed for rematerialization,
75                                            compared to placing a reload */
76 };
77
78 typedef struct spill_t spill_t;
79 struct spill_t {
80         spill_t *next;
81         ir_node *after;  /**< spill has to be placed after this node (or earlier) */
82         ir_node *spill;
83 };
84
85 typedef struct spill_info_t spill_info_t;
86 struct spill_info_t {
87         ir_node    *to_spill;  /**< the value that should get spilled */
88         reloader_t *reloaders; /**< list of places where the value should get
89                                     reloaded */
90         spill_t    *spills;    /**< list of latest places where spill must be
91                                     placed */
92         double      spill_costs; /**< costs needed for spilling the value */
93         const arch_register_class_t *reload_cls; /** the register class in which the
94                                                      reload should be placed */
95 };
96
97 struct spill_env_t {
98         const arch_env_t *arch_env;
99         ir_graph         *irg;
100         struct obstack    obst;
101         be_irg_t         *birg;
102         int               spill_cost;     /**< the cost of a single spill node */
103         int               reload_cost;    /**< the cost of a reload node */
104         set              *spills;         /**< all spill_info_t's, which must be
105                                                placed */
106         ir_nodeset_t      mem_phis;       /**< set of all spilled phis. */
107         ir_exec_freq     *exec_freq;
108
109 #ifdef FIRM_STATISTICS
110         unsigned          spill_count;
111         unsigned          reload_count;
112         unsigned          remat_count;
113         unsigned          spilled_phi_count;
114 #endif
115 };
116
117 /**
118  * Compare two spill infos.
119  */
120 static int cmp_spillinfo(const void *x, const void *y, size_t size)
121 {
122         const spill_info_t *xx = x;
123         const spill_info_t *yy = y;
124         (void) size;
125
126         return xx->to_spill != yy->to_spill;
127 }
128
129 /**
130  * Returns spill info for a specific value (the value that is to be spilled)
131  */
132 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
133 {
134         spill_info_t info, *res;
135         int hash = hash_irn(value);
136
137         info.to_spill = value;
138         res = set_find(env->spills, &info, sizeof(info), hash);
139
140         if (res == NULL) {
141                 info.reloaders   = NULL;
142                 info.spills      = NULL;
143                 info.spill_costs = -1;
144                 info.reload_cls  = NULL;
145                 res = set_insert(env->spills, &info, sizeof(info), hash);
146         }
147
148         return res;
149 }
150
151 spill_env_t *be_new_spill_env(be_irg_t *birg)
152 {
153         const arch_env_t *arch_env = birg->main_env->arch_env;
154
155         spill_env_t *env = XMALLOC(spill_env_t);
156         env->spills                     = new_set(cmp_spillinfo, 1024);
157         env->irg            = be_get_birg_irg(birg);
158         env->birg           = birg;
159         env->arch_env       = arch_env;
160         ir_nodeset_init(&env->mem_phis);
161         env->spill_cost     = arch_env->spill_cost;
162         env->reload_cost    = arch_env->reload_cost;
163         env->exec_freq      = be_get_birg_exec_freq(birg);
164         obstack_init(&env->obst);
165
166 #ifdef FIRM_STATISTICS
167         env->spill_count       = 0;
168         env->reload_count      = 0;
169         env->remat_count       = 0;
170         env->spilled_phi_count = 0;
171 #endif
172
173         return env;
174 }
175
176 void be_delete_spill_env(spill_env_t *env)
177 {
178         del_set(env->spills);
179         ir_nodeset_destroy(&env->mem_phis);
180         obstack_free(&env->obst, NULL);
181         free(env);
182 }
183
184 /*
185  *  ____  _                  ____      _                 _
186  * |  _ \| | __ _  ___ ___  |  _ \ ___| | ___   __ _  __| |___
187  * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
188  * |  __/| | (_| | (_|  __/ |  _ <  __/ | (_) | (_| | (_| \__ \
189  * |_|   |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
190  *
191  */
192
193 void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after)
194 {
195         spill_info_t *spill_info = get_spillinfo(env, to_spill);
196         spill_t      *spill;
197         spill_t      *s;
198         spill_t      *last;
199
200         assert(!arch_irn_is(to_spill, dont_spill));
201         DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));
202
203         /* Just for safety make sure that we do not insert the spill in front of a phi */
204         assert(!is_Phi(sched_next(after)));
205
206         /* spills that are dominated by others are not needed */
207         last = NULL;
208         s    = spill_info->spills;
209         for( ; s != NULL; s = s->next) {
210                 /* no need to add this spill if it is dominated by another */
211                 if(value_dominates(s->after, after)) {
212                         DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after));
213                         return;
214                 }
215                 /* remove spills that we dominate */
216                 if(value_dominates(after, s->after)) {
217                         DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after));
218                         if(last != NULL) {
219                                 last->next         = s->next;
220                         } else {
221                                 spill_info->spills = s->next;
222                         }
223                 } else {
224                         last = s;
225                 }
226         }
227
228         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
229         spill->after  = after;
230         spill->next   = spill_info->spills;
231         spill->spill  = NULL;
232
233         spill_info->spills = spill;
234 }
235
236 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before,
237                   ir_node *rematted_node)
238 {
239         spill_info_t *spill_info;
240         reloader_t *reloader;
241
242         spill_info = get_spillinfo(env, to_spill);
243
244         /* add the remat information */
245         reloader                   = obstack_alloc(&env->obst, sizeof(reloader[0]));
246         reloader->next             = spill_info->reloaders;
247         reloader->reloader         = before;
248         reloader->rematted_node    = rematted_node;
249         reloader->remat_cost_delta = 0; /* We will never have a cost win over a
250                                            reload since we're not even allowed to
251                                            create a reload */
252
253         spill_info->reloaders  = reloader;
254
255         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
256                 to_spill, before));
257 }
258
259 void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before,
260                 ir_node *can_spill_after, const arch_register_class_t *reload_cls,
261                 int allow_remat)
262 {
263         spill_info_t *info;
264         reloader_t *rel;
265
266         assert(!arch_irn_is(to_spill, dont_spill));
267
268         info = get_spillinfo(env, to_spill);
269
270         if (is_Phi(to_spill)) {
271                 int i, arity;
272
273                 /* create spillinfos for the phi arguments */
274                 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
275                         ir_node *arg = get_irn_n(to_spill, i);
276                         get_spillinfo(env, arg);
277                 }
278         }
279
280         assert(!is_Proj(before) && !be_is_Keep(before));
281
282         /* put reload into list */
283         rel                   = obstack_alloc(&env->obst, sizeof(rel[0]));
284         rel->next             = info->reloaders;
285         rel->reloader         = before;
286         rel->rematted_node    = NULL;
287         rel->can_spill_after  = can_spill_after;
288         rel->remat_cost_delta = allow_remat ? 0 : REMAT_COST_INFINITE;
289
290         info->reloaders  = rel;
291         assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
292         info->reload_cls = reload_cls;
293
294         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
295                 to_spill, before, allow_remat ? "" : " not"));
296 }
297
298 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
299                    const arch_register_class_t *reload_cls, int allow_remat)
300 {
301         be_add_reload2(senv, to_spill, before, to_spill, reload_cls, allow_remat);
302
303 }
304
305 ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
306 {
307         ir_node *last = sched_last(block);
308
309         /* we might have keeps behind the jump... */
310         while (be_is_Keep(last)) {
311                 last = sched_prev(last);
312                 assert(!sched_is_end(last));
313         }
314
315         assert(is_cfop(last));
316
317         /* add the reload before the (cond-)jump */
318         return last;
319 }
320
321 static ir_node *skip_keeps_phis(ir_node *node)
322 {
323         while(true) {
324                 ir_node *next = sched_next(node);
325                 if(!is_Phi(next) && !be_is_Keep(next))
326                         break;
327                 node = next;
328         }
329         return node;
330 }
331
332 /**
333  * Returns the point at which you can insert a node that should be executed
334  * before block @p block when coming from pred @p pos.
335  */
336 static ir_node *get_block_insertion_point(ir_node *block, int pos)
337 {
338         ir_node *predblock;
339
340         /* simply add the reload to the beginning of the block if we only have 1
341          * predecessor. We don't need to check for phis as there can't be any in a
342          * block with only 1 pred. */
343         if(get_Block_n_cfgpreds(block) == 1) {
344                 assert(!is_Phi(sched_first(block)));
345                 return sched_first(block);
346         }
347
348         /* We have to reload the value in pred-block */
349         predblock = get_Block_cfgpred_block(block, pos);
350         return be_get_end_of_block_insertion_point(predblock);
351 }
352
353 void be_add_reload_at_end(spill_env_t *env, ir_node *to_spill,
354                           const ir_node *block,
355                           const arch_register_class_t *reload_cls,
356                           int allow_remat)
357 {
358         ir_node *before = be_get_end_of_block_insertion_point(block);
359         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
360 }
361
362 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
363                            int pos,     const arch_register_class_t *reload_cls,
364                            int allow_remat)
365 {
366         ir_node *before = get_block_insertion_point(block, pos);
367         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
368 }
369
370 void be_spill_phi(spill_env_t *env, ir_node *node)
371 {
372         ir_node *block;
373         spill_info_t* spill;
374         int i, arity;
375
376         assert(is_Phi(node));
377
378         ir_nodeset_insert(&env->mem_phis, node);
379
380         /* create spills for the phi arguments */
381         block = get_nodes_block(node);
382         spill = get_spillinfo(env, node);
383         for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
384                 ir_node *arg = get_irn_n(node, i);
385                 ir_node *insert;
386                 //get_spillinfo(env, arg);
387
388                 /* some backends have virtual noreg/unknown nodes that are not scheduled
389                  * and simply always available. */
390                 if(!sched_is_scheduled(arg)) {
391                         ir_node *pred_block = get_Block_cfgpred_block(block, i);
392                         insert = be_get_end_of_block_insertion_point(pred_block);
393                         insert = sched_prev(insert);
394                 } else {
395                         insert = skip_keeps_phis(arg);
396                 }
397
398                 be_add_spill(env, arg, insert);
399         }
400 }
401
402 /*
403  *   ____                _         ____        _ _ _
404  *  / ___|_ __ ___  __ _| |_ ___  / ___| _ __ (_) | |___
405  * | |   | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
406  * | |___| | |  __/ (_| | ||  __/  ___) | |_) | | | \__ \
407  *  \____|_|  \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
408  *                                      |_|
409  */
410
411 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
412
413 /**
414  * Creates a spill.
415  *
416  * @param senv      the spill environment
417  * @param irn       the node that should be spilled
418  * @param ctx_irn   an user of the spilled node
419  *
420  * @return a be_Spill node
421  */
422 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
423 {
424         ir_node *to_spill = spillinfo->to_spill;
425         spill_t *spill;
426
427         /* determine_spill_costs must have been run before */
428         assert(spillinfo->spill_costs >= 0);
429
430         /* some backends have virtual noreg/unknown nodes that are not scheduled
431          * and simply always available. */
432         if(!sched_is_scheduled(to_spill)) {
433                 /* override spillinfos or create a new one */
434                 spillinfo->spills->spill = new_NoMem();
435                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
436                 return;
437         }
438
439         DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
440         spill = spillinfo->spills;
441         for( ; spill != NULL; spill = spill->next) {
442                 ir_node *after = spill->after;
443                 ir_node *block = get_block(after);
444
445                 after = skip_keeps_phis(after);
446
447                 spill->spill = be_spill(block, to_spill);
448                 sched_add_after(after, spill->spill);
449                 DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
450 #ifdef FIRM_STATISTICS
451                 env->spill_count++;
452 #endif
453         }
454         DBG((dbg, LEVEL_1, "\n"));
455 }
456
457 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
458
459 /**
460  * If the first usage of a Phi result would be out of memory
461  * there is no sense in allocating a register for it.
462  * Thus we spill it and all its operands to the same spill slot.
463  * Therefore the phi/dataB becomes a phi/Memory
464  *
465  * @param senv      the spill environment
466  * @param phi       the Phi node that should be spilled
467  * @param ctx_irn   an user of the spilled node
468  */
469 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
470 {
471         ir_graph *irg   = env->irg;
472         ir_node  *phi   = spillinfo->to_spill;
473         ir_node  *block = get_nodes_block(phi);
474         ir_node  *unknown;
475         ir_node **ins;
476         spill_t  *spill;
477         int       i;
478         int       arity;
479
480         assert(is_Phi(phi));
481         assert(!get_opt_cse());
482         DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
483
484         /* build a new PhiM */
485         arity   = get_irn_arity(phi);
486         ins     = ALLOCAN(ir_node*, arity);
487         unknown = new_r_Unknown(irg, mode_M);
488         for(i = 0; i < arity; ++i) {
489                 ins[i] = unknown;
490         }
491
492         /* override or replace spills list... */
493         spill         = obstack_alloc(&env->obst, sizeof(spill[0]));
494         spill->after  = skip_keeps_phis(phi);
495         spill->spill  = new_r_Phi(irg, block, arity, ins, mode_M);
496         spill->next   = NULL;
497
498         spillinfo->spills = spill;
499 #ifdef FIRM_STATISTICS
500         env->spilled_phi_count++;
501 #endif
502
503         for(i = 0; i < arity; ++i) {
504                 ir_node      *arg      = get_irn_n(phi, i);
505                 spill_info_t *arg_info = get_spillinfo(env, arg);
506
507                 determine_spill_costs(env, arg_info);
508                 spill_node(env, arg_info);
509
510                 set_irn_n(spill->spill, i, arg_info->spills->spill);
511         }
512         DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi,
513              spill->spill));
514 }
515
516 /**
517  * Spill a node.
518  *
519  * @param senv      the spill environment
520  * @param to_spill  the node that should be spilled
521  */
522 static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
523 {
524         ir_node *to_spill;
525
526         /* node is already spilled */
527         if(spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
528                 return;
529
530         to_spill = spillinfo->to_spill;
531
532         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
533                 spill_phi(env, spillinfo);
534         } else {
535                 spill_irn(env, spillinfo);
536         }
537 }
538
539 /*
540  *
541  *  ____                      _            _       _ _
542  * |  _ \ ___ _ __ ___   __ _| |_ ___ _ __(_) __ _| (_)_______
543  * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_  / _ \
544  * |  _ <  __/ | | | | | (_| | ||  __/ |  | | (_| | | |/ /  __/
545  * |_| \_\___|_| |_| |_|\__,_|\__\___|_|  |_|\__,_|_|_/___\___|
546  *
547  */
548
549 /**
550  * Tests whether value @p arg is available before node @p reloader
551  * @returns 1 if value is available, 0 otherwise
552  */
553 static int is_value_available(spill_env_t *env, const ir_node *arg,
554                               const ir_node *reloader)
555 {
556         if(is_Unknown(arg) || arg == new_NoMem())
557                 return 1;
558
559         if(be_is_Spill(arg))
560                 return 1;
561
562         if(arg == get_irg_frame(env->irg))
563                 return 1;
564
565         /* hack for now (happens when command should be inserted at end of block) */
566         if(is_Block(reloader)) {
567                 return 0;
568         }
569
570         /*
571          * Ignore registers are always available
572          */
573         if (arch_irn_is(arg, ignore)) {
574                 return 1;
575         }
576
577         /* the following test does not work while spilling,
578          * because the liveness info is not adapted yet to the effects of the
579          * additional spills/reloads.
580          */
581 #if 0
582         /* we want to remat before the insn reloader
583          * thus an arguments is alive if
584          *   - it interferes with the reloaders result
585          *   - or it is (last-) used by reloader itself
586          */
587         if (values_interfere(env->birg->lv, reloader, arg)) {
588                 return 1;
589         }
590
591         arity = get_irn_arity(reloader);
592         for (i = 0; i < arity; ++i) {
593                 ir_node *rel_arg = get_irn_n(reloader, i);
594                 if (rel_arg == arg)
595                         return 1;
596         }
597 #endif
598
599         return 0;
600 }
601
602 /**
603  * Checks whether the node can principally be rematerialized
604  */
605 static int is_remat_node(const ir_node *node)
606 {
607         assert(!be_is_Spill(node));
608
609         if (arch_irn_is(node, rematerializable))
610                 return 1;
611
612         return 0;
613 }
614
615 /**
616  * Check if a node is rematerializable. This tests for the following conditions:
617  *
618  * - The node itself is rematerializable
619  * - All arguments of the node are available or also rematerialisable
620  * - The costs for the rematerialisation operation is less or equal a limit
621  *
622  * Returns the costs needed for rematerialisation or something
623  * >= REMAT_COST_INFINITE if remat is not possible.
624  */
625 static int check_remat_conditions_costs(spill_env_t *env,
626                 const ir_node *spilled, const ir_node *reloader, int parentcosts)
627 {
628         int i, arity;
629         int argremats;
630         int costs = 0;
631
632         if (!is_remat_node(spilled))
633                 return REMAT_COST_INFINITE;
634
635         if(be_is_Reload(spilled)) {
636                 costs += 2;
637         } else {
638                 costs += arch_get_op_estimated_cost(spilled);
639         }
640         if(parentcosts + costs >= env->reload_cost + env->spill_cost) {
641                 return REMAT_COST_INFINITE;
642         }
643         if (arch_irn_is(spilled, modify_flags)) {
644                 return REMAT_COST_INFINITE;
645         }
646
647         argremats = 0;
648         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
649                 ir_node *arg = get_irn_n(spilled, i);
650
651                 if(is_value_available(env, arg, reloader))
652                         continue;
653
654                 /* we have to rematerialize the argument as well */
655                 if(argremats >= 1) {
656                         /* we only support rematerializing 1 argument at the moment,
657                          * so that we don't have to care about register pressure
658                          */
659                         return REMAT_COST_INFINITE;
660                 }
661                 argremats++;
662
663                 costs += check_remat_conditions_costs(env, arg, reloader,
664                                                       parentcosts + costs);
665                 if(parentcosts + costs >= env->reload_cost + env->spill_cost)
666                         return REMAT_COST_INFINITE;
667         }
668
669         return costs;
670 }
671
672 /**
673  * Re-materialize a node.
674  *
675  * @param senv      the spill environment
676  * @param spilled   the node that was spilled
677  * @param reloader  a irn that requires a reload
678  */
679 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
680 {
681         int i, arity;
682         ir_node *res;
683         ir_node *bl;
684         ir_node **ins;
685
686         if(is_Block(reloader)) {
687                 bl = reloader;
688         } else {
689                 bl = get_nodes_block(reloader);
690         }
691
692         ins = ALLOCAN(ir_node*, get_irn_arity(spilled));
693         for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
694                 ir_node *arg = get_irn_n(spilled, i);
695
696                 if(is_value_available(env, arg, reloader)) {
697                         ins[i] = arg;
698                 } else {
699                         ins[i] = do_remat(env, arg, reloader);
700 #ifdef FIRM_STATISTICS
701                         /* don't count the recursive call as remat */
702                         env->remat_count--;
703 #endif
704                 }
705         }
706
707         /* create a copy of the node */
708         res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
709                           get_irn_op(spilled), get_irn_mode(spilled),
710                           get_irn_arity(spilled), ins);
711         copy_node_attr(spilled, res);
712         arch_env_mark_remat(env->arch_env, res);
713         new_backedge_info(res);
714
715         DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
716
717         if (! is_Proj(res)) {
718                 /* insert in schedule */
719                 sched_reset(res);
720                 sched_add_before(reloader, res);
721 #ifdef FIRM_STATISTICS
722                 env->remat_count++;
723 #endif
724         }
725
726         return res;
727 }
728
729 double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
730 {
731         ir_node *block = get_nodes_block(before);
732         double   freq  = get_block_execfreq(env->exec_freq, block);
733         (void) to_spill;
734
735         return env->spill_cost * freq;
736 }
737
738 unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
739                                        const ir_node *before)
740 {
741         if(be_do_remats) {
742                 /* is the node rematerializable? */
743                 unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
744                 if(costs < (unsigned) env->reload_cost)
745                         return costs;
746         }
747
748         return env->reload_cost;
749 }
750
751 double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
752 {
753         ir_node      *block = get_nodes_block(before);
754         double        freq  = get_block_execfreq(env->exec_freq, block);
755
756         if(be_do_remats) {
757                 /* is the node rematerializable? */
758                 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
759                 if(costs < env->reload_cost)
760                         return costs * freq;
761         }
762
763         return env->reload_cost * freq;
764 }
765
766 int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat,
767                            const ir_node *before)
768 {
769         return check_remat_conditions_costs(env, to_remat, before, 0) < REMAT_COST_INFINITE;
770 }
771
772 double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
773                                    ir_node *block, int pos)
774 {
775         ir_node *before = get_block_insertion_point(block, pos);
776         return be_get_reload_costs(env, to_spill, before);
777 }
778
779 /*
780  *  ___                     _     ____      _                 _
781  * |_ _|_ __  ___  ___ _ __| |_  |  _ \ ___| | ___   __ _  __| |___
782  *  | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
783  *  | || | | \__ \  __/ |  | |_  |  _ <  __/ | (_) | (_| | (_| \__ \
784  * |___|_| |_|___/\___|_|   \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
785  *
786  */
787
788 /**
789  * analyzes how to best spill a node and determine costs for that
790  */
791 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
792 {
793         ir_node *to_spill = spillinfo->to_spill;
794         ir_node *spill_block;
795         spill_t *spill;
796         double   spill_execfreq;
797
798         /* already calculated? */
799         if(spillinfo->spill_costs >= 0)
800                 return;
801
802         assert(!arch_irn_is(to_spill, dont_spill));
803         assert(!be_is_Reload(to_spill));
804
805         /* some backends have virtual noreg/unknown nodes that are not scheduled
806          * and simply always available.
807          * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi
808          * predecessor (of a PhiM) but this test might match other things too...
809          */
810         if(!sched_is_scheduled(to_spill)) {
811                 /* override spillinfos or create a new one */
812                 spill_t *spill = obstack_alloc(&env->obst, sizeof(spill[0]));
813                 spill->after = NULL;
814                 spill->next  = NULL;
815                 spill->spill = new_NoMem();
816
817                 spillinfo->spills      = spill;
818                 spillinfo->spill_costs = 0;
819
820                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
821                 return;
822         }
823
824         spill_block    = get_nodes_block(to_spill);
825         spill_execfreq = get_block_execfreq(env->exec_freq, spill_block);
826
827         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
828                 /* TODO calculate correct costs...
829                  * (though we can't remat this node anyway so no big problem) */
830                 spillinfo->spill_costs = env->spill_cost * spill_execfreq;
831                 return;
832         }
833
834         if(spillinfo->spills != NULL) {
835                 spill_t *s;
836                 double   spills_execfreq;
837
838                 /* calculate sum of execution frequencies of individual spills */
839                 spills_execfreq = 0;
840                 s               = spillinfo->spills;
841                 for( ; s != NULL; s = s->next) {
842                         ir_node *spill_block = get_block(s->after);
843                         double   freq = get_block_execfreq(env->exec_freq, spill_block);
844
845                         spills_execfreq += freq;
846                 }
847
848                 DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
849                     spills_execfreq * env->spill_cost,
850                     spill_execfreq * env->spill_cost));
851
852                 /* multi-/latespill is advantageous -> return*/
853                 if(spills_execfreq < spill_execfreq) {
854                         DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
855                         spillinfo->spill_costs = spills_execfreq * env->spill_cost;
856                         return;
857                 }
858         }
859
860         /* override spillinfos or create a new one */
861         spill        = obstack_alloc(&env->obst, sizeof(spill[0]));
862         spill->after = skip_keeps_phis(to_spill);
863         spill->next  = NULL;
864         spill->spill = NULL;
865
866         spillinfo->spills      = spill;
867         spillinfo->spill_costs = spill_execfreq * env->spill_cost;
868         DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
869 }
870
871 void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn)
872 {
873         const spill_info_t *si = get_spillinfo(env, irn);
874         ir_node *start_block   = get_irg_start_block(get_irn_irg(irn));
875         int n_blocks           = get_Block_dom_max_subtree_pre_num(start_block);
876         bitset_t *reloads      = bitset_alloca(n_blocks);
877         reloader_t *r;
878         spill_t *s;
879
880         if (si == NULL)
881                 return;
882
883         /* Fill the bitset with the dominance pre-order numbers
884          * of the blocks the reloads are located in. */
885         for (r = si->reloaders; r != NULL; r = r->next) {
886                 ir_node *bl = get_nodes_block(r->reloader);
887                 bitset_set(reloads, get_Block_dom_tree_pre_num(bl));
888         }
889
890         /* Now, cancel out all the blocks that are dominated by each spill.
891          * If the bitset is not empty after that, we have reloads that are
892          * not dominated by any spill. */
893         for (s = si->spills; s != NULL; s = s->next) {
894                 ir_node *bl = get_nodes_block(s->after);
895                 int start   = get_Block_dom_tree_pre_num(bl);
896                 int end     = get_Block_dom_max_subtree_pre_num(bl);
897
898                 bitset_clear_range(reloads, start, end);
899         }
900
901         if (!bitset_is_empty(reloads))
902                 be_add_spill(env, si->to_spill, si->to_spill);
903 }
904
905 void be_insert_spills_reloads(spill_env_t *env)
906 {
907         const ir_exec_freq    *exec_freq = env->exec_freq;
908         spill_info_t          *si;
909         ir_nodeset_iterator_t  iter;
910         ir_node               *node;
911
912         BE_TIMER_PUSH(t_ra_spill_apply);
913
914         /* create all phi-ms first, this is needed so, that phis, hanging on
915            spilled phis work correctly */
916         foreach_ir_nodeset(&env->mem_phis, node, iter) {
917                 spill_info_t *info = get_spillinfo(env, node);
918                 spill_node(env, info);
919         }
920
921         /* process each spilled node */
922         for (si = set_first(env->spills); si; si = set_next(env->spills)) {
923                 reloader_t *rld;
924                 ir_node  *to_spill        = si->to_spill;
925                 ir_mode  *mode            = get_irn_mode(to_spill);
926                 ir_node **copies          = NEW_ARR_F(ir_node*, 0);
927                 double    all_remat_costs = 0; /** costs when we would remat all nodes */
928                 int       force_remat     = 0;
929
930                 DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
931
932                 determine_spill_costs(env, si);
933
934                 /* determine possibility of rematerialisations */
935                 if(be_do_remats) {
936                         /* calculate cost savings for each indivial value when it would
937                            be rematted instead of reloaded */
938                         for (rld = si->reloaders; rld != NULL; rld = rld->next) {
939                                 double   freq;
940                                 int      remat_cost;
941                                 int      remat_cost_delta;
942                                 ir_node *block;
943                                 ir_node *reloader = rld->reloader;
944
945                                 if(rld->rematted_node != NULL) {
946                                         DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
947                                              rld->rematted_node, reloader));
948                                         continue;
949                                 }
950                                 if(rld->remat_cost_delta >= REMAT_COST_INFINITE) {
951                                         DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
952                                              reloader));
953                                         all_remat_costs = REMAT_COST_INFINITE;
954                                         continue;
955                                 }
956
957                                 remat_cost  = check_remat_conditions_costs(env, to_spill,
958                                                                            reloader, 0);
959                                 if(remat_cost >= REMAT_COST_INFINITE) {
960                                         DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
961                                              reloader));
962                                         rld->remat_cost_delta = REMAT_COST_INFINITE;
963                                         all_remat_costs       = REMAT_COST_INFINITE;
964                                         continue;
965                                 }
966
967                                 remat_cost_delta      = remat_cost - env->reload_cost;
968                                 rld->remat_cost_delta = remat_cost_delta;
969                                 block                 = is_Block(reloader) ? reloader : get_nodes_block(reloader);
970                                 freq                  = get_block_execfreq(exec_freq, block);
971                                 all_remat_costs      += remat_cost_delta * freq;
972                                 DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
973                                      "%d (rel %f)\n", reloader, remat_cost_delta,
974                                      remat_cost_delta * freq));
975                         }
976                         if(all_remat_costs < REMAT_COST_INFINITE) {
977                                 /* we don't need the costs for the spill if we can remat
978                                    all reloaders */
979                                 all_remat_costs -= si->spill_costs;
980
981                                 DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
982                                      env->spill_cost, si->spill_costs));
983                         }
984
985                         if(all_remat_costs < 0) {
986                                 DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
987                                      all_remat_costs));
988                                 force_remat = 1;
989                         }
990                 }
991
992                 /* go through all reloads for this spill */
993                 for (rld = si->reloaders; rld != NULL; rld = rld->next) {
994                         ir_node *copy; /* a reload is a "copy" of the original value */
995
996                         if (rld->rematted_node != NULL) {
997                                 copy = rld->rematted_node;
998                                 sched_add_before(rld->reloader, copy);
999                         } else if (be_do_remats &&
1000                                         (force_remat || rld->remat_cost_delta < 0)) {
1001                                 copy = do_remat(env, to_spill, rld->reloader);
1002                         } else {
1003                                 /* make sure we have a spill */
1004                                 spill_node(env, si);
1005
1006                                 /* create a reload, use the first spill for now SSA
1007                                  * reconstruction for memory comes below */
1008                                 assert(si->spills != NULL);
1009                                 copy = be_reload(si->reload_cls, rld->reloader, mode,
1010                                                  si->spills->spill);
1011 #ifdef FIRM_STATISTICS
1012                                 env->reload_count++;
1013 #endif
1014                         }
1015
1016                         DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
1017                              copy, to_spill, rld->reloader));
1018                         ARR_APP1(ir_node*, copies, copy);
1019                 }
1020
1021                 /* if we had any reloads or remats, then we need to reconstruct the
1022                  * SSA form for the spilled value */
1023                 if (ARR_LEN(copies) > 0) {
1024                         be_ssa_construction_env_t senv;
1025                         /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
1026
1027                         be_ssa_construction_init(&senv, env->birg);
1028                         be_ssa_construction_add_copy(&senv, to_spill);
1029                         be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
1030                         be_ssa_construction_fix_users(&senv, to_spill);
1031
1032 #if 0
1033                         /* no need to enable this as long as we invalidate liveness
1034                            after this function... */
1035                         be_ssa_construction_update_liveness_phis(&senv);
1036                         be_liveness_update(to_spill);
1037                         len = ARR_LEN(copies);
1038                         for(i = 0; i < len; ++i) {
1039                                 be_liveness_update(lv, copies[i]);
1040                         }
1041 #endif
1042                         be_ssa_construction_destroy(&senv);
1043                 }
1044                 /* need to reconstruct SSA form if we had multiple spills */
1045                 if (si->spills != NULL && si->spills->next != NULL) {
1046                         spill_t *spill;
1047                         int      spill_count = 0;
1048
1049                         be_ssa_construction_env_t senv;
1050
1051                         be_ssa_construction_init(&senv, env->birg);
1052                         spill = si->spills;
1053                         for( ; spill != NULL; spill = spill->next) {
1054                                 /* maybe we rematerialized the value and need no spill */
1055                                 if(spill->spill == NULL)
1056                                         continue;
1057                                 be_ssa_construction_add_copy(&senv, spill->spill);
1058                                 spill_count++;
1059                         }
1060                         if(spill_count > 1) {
1061                                 /* all reloads are attached to the first spill, fix them now */
1062                                 be_ssa_construction_fix_users(&senv, si->spills->spill);
1063                         }
1064
1065                         be_ssa_construction_destroy(&senv);
1066                 }
1067
1068                 DEL_ARR_F(copies);
1069                 si->reloaders = NULL;
1070         }
1071
1072         stat_ev_dbl("spill_spills", env->spill_count);
1073         stat_ev_dbl("spill_reloads", env->reload_count);
1074         stat_ev_dbl("spill_remats", env->remat_count);
1075         stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);
1076
1077         /* Matze: In theory be_ssa_construction should take care of the liveness...
1078          * try to disable this again in the future */
1079         be_liveness_invalidate(env->birg->lv);
1080
1081         be_remove_dead_nodes_from_schedule(env->birg);
1082
1083         BE_TIMER_POP(t_ra_spill_apply);
1084 }
1085
1086 void be_init_spill(void)
1087 {
1088         FIRM_DBG_REGISTER(dbg, "firm.be.spill");
1089 }
1090
1091 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill);