Another rewrite of prolog/epilog handling: Delay their creation until after register...
[libfirm] / ir / be / bespillutil.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       implementation of the spill/reload placement abstraction layer
23  * @author      Daniel Grund, Sebastian Hack, Matthias Braun
24  * @date        29.09.2005
25  * @version     $Id$
26  */
27 #include "config.h"
28
29 #include <stdlib.h>
30 #include <stdbool.h>
31
32 #include "pset.h"
33 #include "irnode_t.h"
34 #include "ircons_t.h"
35 #include "iredges_t.h"
36 #include "irbackedge_t.h"
37 #include "irprintf.h"
38 #include "ident_t.h"
39 #include "type_t.h"
40 #include "entity_t.h"
41 #include "debug.h"
42 #include "irgwalk.h"
43 #include "array.h"
44 #include "pdeq.h"
45 #include "execfreq.h"
46 #include "irnodeset.h"
47 #include "error.h"
48
49 #include "bearch.h"
50 #include "belive_t.h"
51 #include "besched.h"
52 #include "bespill.h"
53 #include "bespillutil.h"
54 #include "belive_t.h"
55 #include "benode.h"
56 #include "bechordal_t.h"
57 #include "bestatevent.h"
58 #include "bessaconstr.h"
59 #include "beirg.h"
60 #include "beirgmod.h"
61 #include "beintlive_t.h"
62 #include "bemodule.h"
63 #include "be_t.h"
64
65 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66
67 #define REMAT_COST_INFINITE  1000
68
69 typedef struct reloader_t reloader_t;
70 struct reloader_t {
71         reloader_t *next;
72         ir_node    *can_spill_after;
73         ir_node    *reloader;
74         ir_node    *rematted_node;
75         int         remat_cost_delta; /** costs needed for rematerialization,
76                                            compared to placing a reload */
77 };
78
79 typedef struct spill_t spill_t;
80 struct spill_t {
81         spill_t *next;
82         ir_node *after;  /**< spill has to be placed after this node (or earlier) */
83         ir_node *spill;
84 };
85
86 typedef struct spill_info_t spill_info_t;
87 struct spill_info_t {
88         ir_node    *to_spill;  /**< the value that should get spilled */
89         reloader_t *reloaders; /**< list of places where the value should get
90                                     reloaded */
91         spill_t    *spills;    /**< list of latest places where spill must be
92                                     placed */
93         double      spill_costs; /**< costs needed for spilling the value */
94         const arch_register_class_t *reload_cls; /** the register class in which the
95                                                      reload should be placed */
96 };
97
98 struct spill_env_t {
99         const arch_env_t *arch_env;
100         ir_graph         *irg;
101         struct obstack    obst;
102         int               spill_cost;     /**< the cost of a single spill node */
103         int               reload_cost;    /**< the cost of a reload node */
104         set              *spills;         /**< all spill_info_t's, which must be
105                                                placed */
106         ir_nodeset_t      mem_phis;       /**< set of all spilled phis. */
107         ir_exec_freq     *exec_freq;
108
109 #ifdef FIRM_STATISTICS
110         unsigned          spill_count;
111         unsigned          reload_count;
112         unsigned          remat_count;
113         unsigned          spilled_phi_count;
114 #endif
115 };
116
117 /**
118  * Compare two spill infos.
119  */
120 static int cmp_spillinfo(const void *x, const void *y, size_t size)
121 {
122         const spill_info_t *xx = (const spill_info_t*)x;
123         const spill_info_t *yy = (const spill_info_t*)y;
124         (void) size;
125
126         return xx->to_spill != yy->to_spill;
127 }
128
129 /**
130  * Returns spill info for a specific value (the value that is to be spilled)
131  */
132 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
133 {
134         spill_info_t info, *res;
135         int hash = hash_irn(value);
136
137         info.to_spill = value;
138         res = (spill_info_t*)set_find(env->spills, &info, sizeof(info), hash);
139
140         if (res == NULL) {
141                 info.reloaders   = NULL;
142                 info.spills      = NULL;
143                 info.spill_costs = -1;
144                 info.reload_cls  = NULL;
145                 res = (spill_info_t*)set_insert(env->spills, &info, sizeof(info), hash);
146         }
147
148         return res;
149 }
150
151 spill_env_t *be_new_spill_env(ir_graph *irg)
152 {
153         const arch_env_t *arch_env = be_get_irg_arch_env(irg);
154
155         spill_env_t *env = XMALLOC(spill_env_t);
156         env->spills         = new_set(cmp_spillinfo, 1024);
157         env->irg            = irg;
158         env->arch_env       = arch_env;
159         ir_nodeset_init(&env->mem_phis);
160         env->spill_cost     = arch_env->spill_cost;
161         env->reload_cost    = arch_env->reload_cost;
162         env->exec_freq      = be_get_irg_exec_freq(irg);
163         obstack_init(&env->obst);
164
165 #ifdef FIRM_STATISTICS
166         env->spill_count       = 0;
167         env->reload_count      = 0;
168         env->remat_count       = 0;
169         env->spilled_phi_count = 0;
170 #endif
171
172         return env;
173 }
174
175 void be_delete_spill_env(spill_env_t *env)
176 {
177         del_set(env->spills);
178         ir_nodeset_destroy(&env->mem_phis);
179         obstack_free(&env->obst, NULL);
180         free(env);
181 }
182
183 /*
184  *  ____  _                  ____      _                 _
185  * |  _ \| | __ _  ___ ___  |  _ \ ___| | ___   __ _  __| |___
186  * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
187  * |  __/| | (_| | (_|  __/ |  _ <  __/ | (_) | (_| | (_| \__ \
188  * |_|   |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
189  *
190  */
191
192 void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after)
193 {
194         spill_info_t  *spill_info = get_spillinfo(env, to_spill);
195         spill_t       *spill;
196         spill_t       *s;
197         spill_t       *last;
198
199         assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
200         DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));
201
202         /* Just for safety make sure that we do not insert the spill in front of a phi */
203         assert(!is_Phi(sched_next(after)));
204
205         /* spills that are dominated by others are not needed */
206         last = NULL;
207         s    = spill_info->spills;
208         for ( ; s != NULL; s = s->next) {
209                 /* no need to add this spill if it is dominated by another */
210                 if (value_dominates(s->after, after)) {
211                         DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after));
212                         return;
213                 }
214                 /* remove spills that we dominate */
215                 if (value_dominates(after, s->after)) {
216                         DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after));
217                         if (last != NULL) {
218                                 last->next         = s->next;
219                         } else {
220                                 spill_info->spills = s->next;
221                         }
222                 } else {
223                         last = s;
224                 }
225         }
226
227         spill         = OALLOC(&env->obst, spill_t);
228         spill->after  = after;
229         spill->next   = spill_info->spills;
230         spill->spill  = NULL;
231
232         spill_info->spills = spill;
233 }
234
235 void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before,
236                 ir_node *can_spill_after, const arch_register_class_t *reload_cls,
237                 int allow_remat)
238 {
239         spill_info_t  *info;
240         reloader_t    *rel;
241
242         assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
243
244         info = get_spillinfo(env, to_spill);
245
246         if (is_Phi(to_spill)) {
247                 int i, arity;
248
249                 /* create spillinfos for the phi arguments */
250                 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
251                         ir_node *arg = get_irn_n(to_spill, i);
252                         get_spillinfo(env, arg);
253                 }
254         }
255
256         assert(!is_Proj(before) && !be_is_Keep(before));
257
258         /* put reload into list */
259         rel                   = OALLOC(&env->obst, reloader_t);
260         rel->next             = info->reloaders;
261         rel->reloader         = before;
262         rel->rematted_node    = NULL;
263         rel->can_spill_after  = can_spill_after;
264         rel->remat_cost_delta = allow_remat ? 0 : REMAT_COST_INFINITE;
265
266         info->reloaders  = rel;
267         assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
268         info->reload_cls = reload_cls;
269
270         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
271                 to_spill, before, allow_remat ? "" : " not"));
272 }
273
274 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
275                    const arch_register_class_t *reload_cls, int allow_remat)
276 {
277         be_add_reload2(senv, to_spill, before, to_spill, reload_cls, allow_remat);
278
279 }
280
281 ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
282 {
283         ir_node *last = sched_last(block);
284
285         /* we might have keeps behind the jump... */
286         while (be_is_Keep(last)) {
287                 last = sched_prev(last);
288                 assert(!sched_is_end(last));
289         }
290
291         assert(is_cfop(last));
292
293         /* add the reload before the (cond-)jump */
294         return last;
295 }
296
297 /**
298  * determine final spill position: it should be after all phis, keep nodes
299  * and behind nodes marked as prolog
300  */
301 static ir_node *determine_spill_point(ir_node *node)
302 {
303         node = skip_Proj(node);
304         while (true) {
305                 ir_node *next = sched_next(node);
306                 if (!is_Phi(next) && !be_is_Keep(next) && !be_is_CopyKeep(next))
307                         break;
308                 node = next;
309         }
310         return node;
311 }
312
313 /**
314  * Returns the point at which you can insert a node that should be executed
315  * before block @p block when coming from pred @p pos.
316  */
317 static ir_node *get_block_insertion_point(ir_node *block, int pos)
318 {
319         ir_node *predblock;
320
321         /* simply add the reload to the beginning of the block if we only have 1
322          * predecessor. We don't need to check for phis as there can't be any in a
323          * block with only 1 pred. */
324         if (get_Block_n_cfgpreds(block) == 1) {
325                 assert(!is_Phi(sched_first(block)));
326                 return sched_first(block);
327         }
328
329         /* We have to reload the value in pred-block */
330         predblock = get_Block_cfgpred_block(block, pos);
331         return be_get_end_of_block_insertion_point(predblock);
332 }
333
334 void be_add_reload_at_end(spill_env_t *env, ir_node *to_spill,
335                           const ir_node *block,
336                           const arch_register_class_t *reload_cls,
337                           int allow_remat)
338 {
339         ir_node *before = be_get_end_of_block_insertion_point(block);
340         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
341 }
342
343 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
344                            int pos, const arch_register_class_t *reload_cls,
345                            int allow_remat)
346 {
347         ir_node *before = get_block_insertion_point(block, pos);
348         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
349 }
350
351 void be_spill_phi(spill_env_t *env, ir_node *node)
352 {
353         ir_node *block;
354         int i, arity;
355
356         assert(is_Phi(node));
357
358         ir_nodeset_insert(&env->mem_phis, node);
359
360         /* create spills for the phi arguments */
361         block = get_nodes_block(node);
362         for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
363                 ir_node *arg = get_irn_n(node, i);
364                 ir_node *insert;
365
366                 /* some backends have virtual noreg/unknown nodes that are not scheduled
367                  * and simply always available. */
368                 if (!sched_is_scheduled(arg)) {
369                         ir_node *pred_block = get_Block_cfgpred_block(block, i);
370                         insert = be_get_end_of_block_insertion_point(pred_block);
371                         insert = sched_prev(insert);
372                 } else {
373                         insert = determine_spill_point(arg);
374                 }
375
376                 be_add_spill(env, arg, insert);
377         }
378 }
379
380 /*
381  *   ____                _         ____        _ _ _
382  *  / ___|_ __ ___  __ _| |_ ___  / ___| _ __ (_) | |___
383  * | |   | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
384  * | |___| | |  __/ (_| | ||  __/  ___) | |_) | | | \__ \
385  *  \____|_|  \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
386  *                                      |_|
387  */
388
389 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
390
391 /**
392  * Creates a spill.
393  *
394  * @param senv      the spill environment
395  * @param irn       the node that should be spilled
396  * @param ctx_irn   an user of the spilled node
397  *
398  * @return a be_Spill node
399  */
400 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
401 {
402         ir_node       *to_spill = spillinfo->to_spill;
403         const ir_node *insn     = skip_Proj_const(to_spill);
404         spill_t *spill;
405
406         /* determine_spill_costs must have been run before */
407         assert(spillinfo->spill_costs >= 0);
408
409         /* some backends have virtual noreg/unknown nodes that are not scheduled
410          * and simply always available. */
411         if (!sched_is_scheduled(insn)) {
412                 /* override spillinfos or create a new one */
413                 ir_graph *irg = get_irn_irg(to_spill);
414                 spillinfo->spills->spill = new_r_NoMem(irg);
415                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
416                 return;
417         }
418
419         DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
420         spill = spillinfo->spills;
421         for ( ; spill != NULL; spill = spill->next) {
422                 ir_node *after = spill->after;
423                 ir_node *block = get_block(after);
424
425                 after = determine_spill_point(after);
426
427                 spill->spill = be_spill(block, to_spill);
428                 sched_add_after(skip_Proj(after), spill->spill);
429                 DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
430 #ifdef FIRM_STATISTICS
431                 env->spill_count++;
432 #endif
433         }
434         DBG((dbg, LEVEL_1, "\n"));
435 }
436
437 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
438
439 /**
440  * If the first usage of a Phi result would be out of memory
441  * there is no sense in allocating a register for it.
442  * Thus we spill it and all its operands to the same spill slot.
443  * Therefore the phi/dataB becomes a phi/Memory
444  *
445  * @param senv      the spill environment
446  * @param phi       the Phi node that should be spilled
447  * @param ctx_irn   an user of the spilled node
448  */
449 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
450 {
451         ir_graph *irg   = env->irg;
452         ir_node  *phi   = spillinfo->to_spill;
453         ir_node  *block = get_nodes_block(phi);
454         ir_node  *unknown;
455         ir_node **ins;
456         spill_t  *spill;
457         int       i;
458         int       arity;
459
460         assert(is_Phi(phi));
461         assert(!get_opt_cse());
462         DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
463
464         /* build a new PhiM */
465         arity   = get_irn_arity(phi);
466         ins     = ALLOCAN(ir_node*, arity);
467         unknown = new_r_Unknown(irg, mode_M);
468         for (i = 0; i < arity; ++i) {
469                 ins[i] = unknown;
470         }
471
472         /* override or replace spills list... */
473         spill         = OALLOC(&env->obst, spill_t);
474         spill->after  = determine_spill_point(phi);
475         spill->spill  = be_new_Phi(block, arity, ins, mode_M, NULL);
476         spill->next   = NULL;
477         sched_add_after(block, spill->spill);
478
479         spillinfo->spills = spill;
480 #ifdef FIRM_STATISTICS
481         env->spilled_phi_count++;
482 #endif
483
484         for (i = 0; i < arity; ++i) {
485                 ir_node      *arg      = get_irn_n(phi, i);
486                 spill_info_t *arg_info = get_spillinfo(env, arg);
487
488                 determine_spill_costs(env, arg_info);
489                 spill_node(env, arg_info);
490
491                 set_irn_n(spill->spill, i, arg_info->spills->spill);
492         }
493         DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi,
494              spill->spill));
495 }
496
497 /**
498  * Spill a node.
499  *
500  * @param senv      the spill environment
501  * @param to_spill  the node that should be spilled
502  */
503 static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
504 {
505         ir_node *to_spill;
506
507         /* node is already spilled */
508         if (spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
509                 return;
510
511         to_spill = spillinfo->to_spill;
512
513         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
514                 spill_phi(env, spillinfo);
515         } else {
516                 spill_irn(env, spillinfo);
517         }
518 }
519
520 /*
521  *
522  *  ____                      _            _       _ _
523  * |  _ \ ___ _ __ ___   __ _| |_ ___ _ __(_) __ _| (_)_______
524  * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_  / _ \
525  * |  _ <  __/ | | | | | (_| | ||  __/ |  | | (_| | | |/ /  __/
526  * |_| \_\___|_| |_| |_|\__,_|\__\___|_|  |_|\__,_|_|_/___\___|
527  *
528  */
529
530 /**
531  * Tests whether value @p arg is available before node @p reloader
532  * @returns 1 if value is available, 0 otherwise
533  */
534 static int is_value_available(spill_env_t *env, const ir_node *arg,
535                               const ir_node *reloader)
536 {
537         if (is_Unknown(arg) || is_NoMem(arg))
538                 return 1;
539
540         if (be_is_Spill(skip_Proj_const(arg)))
541                 return 1;
542
543         if (arg == get_irg_frame(env->irg))
544                 return 1;
545
546         (void)reloader;
547
548         if (get_irn_mode(arg) == mode_T)
549                 return 0;
550
551         /*
552          * Ignore registers are always available
553          */
554         if (arch_irn_is_ignore(arg))
555                 return 1;
556
557         return 0;
558 }
559
560 /**
561  * Check if a node is rematerializable. This tests for the following conditions:
562  *
563  * - The node itself is rematerializable
564  * - All arguments of the node are available or also rematerialisable
565  * - The costs for the rematerialisation operation is less or equal a limit
566  *
567  * Returns the costs needed for rematerialisation or something
568  * >= REMAT_COST_INFINITE if remat is not possible.
569  */
570 static int check_remat_conditions_costs(spill_env_t *env,
571                 const ir_node *spilled, const ir_node *reloader, int parentcosts)
572 {
573         int i, arity;
574         int argremats;
575         int costs = 0;
576         const ir_node *insn = skip_Proj_const(spilled);
577
578         assert(!be_is_Spill(insn));
579         if (!arch_irn_is(insn, rematerializable))
580                 return REMAT_COST_INFINITE;
581
582         if (be_is_Reload(insn)) {
583                 costs += 2;
584         } else {
585                 costs += arch_get_op_estimated_cost(insn);
586         }
587         if (parentcosts + costs >= env->reload_cost + env->spill_cost) {
588                 return REMAT_COST_INFINITE;
589         }
590         /* never rematerialize a node which modifies the flags.
591          * (would be better to test whether the flags are actually live at point
592          * reloader...)
593          */
594         if (arch_irn_is(insn, modify_flags)) {
595                 return REMAT_COST_INFINITE;
596         }
597
598         argremats = 0;
599         for (i = 0, arity = get_irn_arity(insn); i < arity; ++i) {
600                 ir_node *arg = get_irn_n(insn, i);
601
602                 if (is_value_available(env, arg, reloader))
603                         continue;
604
605                 /* we have to rematerialize the argument as well */
606                 ++argremats;
607                 if (argremats > 1) {
608                         /* we only support rematerializing 1 argument at the moment,
609                          * as multiple arguments could increase register pressure */
610                         return REMAT_COST_INFINITE;
611                 }
612
613                 costs += check_remat_conditions_costs(env, arg, reloader,
614                                                       parentcosts + costs);
615                 if (parentcosts + costs >= env->reload_cost + env->spill_cost)
616                         return REMAT_COST_INFINITE;
617         }
618
619         return costs;
620 }
621
622 /**
623  * Re-materialize a node.
624  *
625  * @param senv      the spill environment
626  * @param spilled   the node that was spilled
627  * @param reloader  a irn that requires a reload
628  */
629 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
630 {
631         int i, arity;
632         ir_node *res;
633         ir_node *bl;
634         ir_node **ins;
635
636         if (is_Block(reloader)) {
637                 bl = reloader;
638         } else {
639                 bl = get_nodes_block(reloader);
640         }
641
642         ins = ALLOCAN(ir_node*, get_irn_arity(spilled));
643         for (i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
644                 ir_node *arg = get_irn_n(spilled, i);
645
646                 if (is_value_available(env, arg, reloader)) {
647                         ins[i] = arg;
648                 } else {
649                         ins[i] = do_remat(env, arg, reloader);
650 #ifdef FIRM_STATISTICS
651                         /* don't count the recursive call as remat */
652                         env->remat_count--;
653 #endif
654                 }
655         }
656
657         /* create a copy of the node */
658         res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
659                           get_irn_op(spilled), get_irn_mode(spilled),
660                           get_irn_arity(spilled), ins);
661         copy_node_attr(env->irg, spilled, res);
662         arch_env_mark_remat(env->arch_env, res);
663
664         DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
665
666         if (! is_Proj(res)) {
667                 /* insert in schedule */
668                 sched_reset(res);
669                 sched_add_before(reloader, res);
670 #ifdef FIRM_STATISTICS
671                 env->remat_count++;
672 #endif
673         }
674
675         return res;
676 }
677
678 double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
679 {
680         ir_node *block = get_nodes_block(before);
681         double   freq  = get_block_execfreq(env->exec_freq, block);
682         (void) to_spill;
683
684         return env->spill_cost * freq;
685 }
686
687 unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
688                                        const ir_node *before)
689 {
690         if (be_do_remats) {
691                 /* is the node rematerializable? */
692                 unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
693                 if (costs < (unsigned) env->reload_cost)
694                         return costs;
695         }
696
697         return env->reload_cost;
698 }
699
700 double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
701 {
702         ir_node      *block = get_nodes_block(before);
703         double        freq  = get_block_execfreq(env->exec_freq, block);
704
705         if (be_do_remats) {
706                 /* is the node rematerializable? */
707                 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
708                 if (costs < env->reload_cost)
709                         return costs * freq;
710         }
711
712         return env->reload_cost * freq;
713 }
714
715 int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat,
716                            const ir_node *before)
717 {
718         return check_remat_conditions_costs(env, to_remat, before, 0) < REMAT_COST_INFINITE;
719 }
720
721 double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
722                                    ir_node *block, int pos)
723 {
724         ir_node *before = get_block_insertion_point(block, pos);
725         return be_get_reload_costs(env, to_spill, before);
726 }
727
728 /*
729  *  ___                     _     ____      _                 _
730  * |_ _|_ __  ___  ___ _ __| |_  |  _ \ ___| | ___   __ _  __| |___
731  *  | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
732  *  | || | | \__ \  __/ |  | |_  |  _ <  __/ | (_) | (_| | (_| \__ \
733  * |___|_| |_|___/\___|_|   \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
734  *
735  */
736
737 /**
738  * analyzes how to best spill a node and determine costs for that
739  */
740 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
741 {
742         ir_node       *to_spill = spillinfo->to_spill;
743         const ir_node *insn     = skip_Proj_const(to_spill);
744         ir_node       *spill_block;
745         spill_t       *spill;
746         double         spill_execfreq;
747
748         /* already calculated? */
749         if (spillinfo->spill_costs >= 0)
750                 return;
751
752         assert(!arch_irn_is(insn, dont_spill));
753         assert(!be_is_Reload(insn));
754
755         /* some backends have virtual noreg/unknown nodes that are not scheduled
756          * and simply always available.
757          * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi
758          * predecessor (of a PhiM) but this test might match other things too...
759          */
760         if (!sched_is_scheduled(insn)) {
761                 ir_graph *irg = get_irn_irg(to_spill);
762                 /* override spillinfos or create a new one */
763                 spill_t *spill = OALLOC(&env->obst, spill_t);
764                 spill->after = NULL;
765                 spill->next  = NULL;
766                 spill->spill = new_r_NoMem(irg);
767
768                 spillinfo->spills      = spill;
769                 spillinfo->spill_costs = 0;
770
771                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
772                 return;
773         }
774
775         spill_block    = get_nodes_block(insn);
776         spill_execfreq = get_block_execfreq(env->exec_freq, spill_block);
777
778         if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
779                 /* TODO calculate correct costs...
780                  * (though we can't remat this node anyway so no big problem) */
781                 spillinfo->spill_costs = env->spill_cost * spill_execfreq;
782                 return;
783         }
784
785         if (spillinfo->spills != NULL) {
786                 spill_t *s;
787                 double   spills_execfreq;
788
789                 /* calculate sum of execution frequencies of individual spills */
790                 spills_execfreq = 0;
791                 s               = spillinfo->spills;
792                 for ( ; s != NULL; s = s->next) {
793                         ir_node *spill_block = get_block(s->after);
794                         double   freq = get_block_execfreq(env->exec_freq, spill_block);
795
796                         spills_execfreq += freq;
797                 }
798
799                 DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
800                     spills_execfreq * env->spill_cost,
801                     spill_execfreq * env->spill_cost));
802
803                 /* multi-/latespill is advantageous -> return*/
804                 if (spills_execfreq < spill_execfreq) {
805                         DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
806                         spillinfo->spill_costs = spills_execfreq * env->spill_cost;
807                         return;
808                 }
809         }
810
811         /* override spillinfos or create a new one */
812         spill        = OALLOC(&env->obst, spill_t);
813         spill->after = determine_spill_point(to_spill);
814         spill->next  = NULL;
815         spill->spill = NULL;
816
817         spillinfo->spills      = spill;
818         spillinfo->spill_costs = spill_execfreq * env->spill_cost;
819         DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
820 }
821
822 void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn)
823 {
824         const spill_info_t *si = get_spillinfo(env, irn);
825         ir_node *start_block   = get_irg_start_block(get_irn_irg(irn));
826         int n_blocks           = get_Block_dom_max_subtree_pre_num(start_block);
827         bitset_t *reloads      = bitset_alloca(n_blocks);
828         reloader_t *r;
829         spill_t *s;
830
831         if (si == NULL)
832                 return;
833
834         /* Fill the bitset with the dominance pre-order numbers
835          * of the blocks the reloads are located in. */
836         for (r = si->reloaders; r != NULL; r = r->next) {
837                 ir_node *bl = get_nodes_block(r->reloader);
838                 bitset_set(reloads, get_Block_dom_tree_pre_num(bl));
839         }
840
841         /* Now, cancel out all the blocks that are dominated by each spill.
842          * If the bitset is not empty after that, we have reloads that are
843          * not dominated by any spill. */
844         for (s = si->spills; s != NULL; s = s->next) {
845                 ir_node *bl = get_nodes_block(s->after);
846                 int start   = get_Block_dom_tree_pre_num(bl);
847                 int end     = get_Block_dom_max_subtree_pre_num(bl);
848
849                 bitset_clear_range(reloads, start, end);
850         }
851
852         if (!bitset_is_empty(reloads))
853                 be_add_spill(env, si->to_spill, si->to_spill);
854 }
855
856 void be_insert_spills_reloads(spill_env_t *env)
857 {
858         const ir_exec_freq    *exec_freq = env->exec_freq;
859         spill_info_t          *si;
860         ir_nodeset_iterator_t  iter;
861         ir_node               *node;
862
863         be_timer_push(T_RA_SPILL_APPLY);
864
865         /* create all phi-ms first, this is needed so, that phis, hanging on
866            spilled phis work correctly */
867         foreach_ir_nodeset(&env->mem_phis, node, iter) {
868                 spill_info_t *info = get_spillinfo(env, node);
869                 spill_node(env, info);
870         }
871
872         /* process each spilled node */
873         foreach_set(env->spills, spill_info_t*, si) {
874                 reloader_t *rld;
875                 ir_node  *to_spill        = si->to_spill;
876                 ir_mode  *mode            = get_irn_mode(to_spill);
877                 ir_node **copies          = NEW_ARR_F(ir_node*, 0);
878                 double    all_remat_costs = 0; /** costs when we would remat all nodes */
879                 int       force_remat     = 0;
880
881                 DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
882
883                 determine_spill_costs(env, si);
884
885                 /* determine possibility of rematerialisations */
886                 if (be_do_remats) {
887                         /* calculate cost savings for each indivial value when it would
888                            be rematted instead of reloaded */
889                         for (rld = si->reloaders; rld != NULL; rld = rld->next) {
890                                 double   freq;
891                                 int      remat_cost;
892                                 int      remat_cost_delta;
893                                 ir_node *block;
894                                 ir_node *reloader = rld->reloader;
895
896                                 if (rld->rematted_node != NULL) {
897                                         DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
898                                              rld->rematted_node, reloader));
899                                         continue;
900                                 }
901                                 if (rld->remat_cost_delta >= REMAT_COST_INFINITE) {
902                                         DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
903                                              reloader));
904                                         all_remat_costs = REMAT_COST_INFINITE;
905                                         continue;
906                                 }
907
908                                 remat_cost  = check_remat_conditions_costs(env, to_spill,
909                                                                            reloader, 0);
910                                 if (remat_cost >= REMAT_COST_INFINITE) {
911                                         DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
912                                              reloader));
913                                         rld->remat_cost_delta = REMAT_COST_INFINITE;
914                                         all_remat_costs       = REMAT_COST_INFINITE;
915                                         continue;
916                                 }
917
918                                 remat_cost_delta      = remat_cost - env->reload_cost;
919                                 rld->remat_cost_delta = remat_cost_delta;
920                                 block                 = is_Block(reloader) ? reloader : get_nodes_block(reloader);
921                                 freq                  = get_block_execfreq(exec_freq, block);
922                                 all_remat_costs      += remat_cost_delta * freq;
923                                 DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
924                                      "%d (rel %f)\n", reloader, remat_cost_delta,
925                                      remat_cost_delta * freq));
926                         }
927                         if (all_remat_costs < REMAT_COST_INFINITE) {
928                                 /* we don't need the costs for the spill if we can remat
929                                    all reloaders */
930                                 all_remat_costs -= si->spill_costs;
931
932                                 DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
933                                      env->spill_cost, si->spill_costs));
934                         }
935
936                         if (all_remat_costs < 0) {
937                                 DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
938                                      all_remat_costs));
939                                 force_remat = 1;
940                         }
941                 }
942
943                 /* go through all reloads for this spill */
944                 for (rld = si->reloaders; rld != NULL; rld = rld->next) {
945                         ir_node *copy; /* a reload is a "copy" of the original value */
946
947                         if (rld->rematted_node != NULL) {
948                                 copy = rld->rematted_node;
949                                 sched_add_before(rld->reloader, copy);
950                         } else if (be_do_remats &&
951                                         (force_remat || rld->remat_cost_delta < 0)) {
952                                 copy = do_remat(env, to_spill, rld->reloader);
953                         } else {
954                                 /* make sure we have a spill */
955                                 spill_node(env, si);
956
957                                 /* create a reload, use the first spill for now SSA
958                                  * reconstruction for memory comes below */
959                                 assert(si->spills != NULL);
960                                 copy = be_reload(si->reload_cls, rld->reloader, mode,
961                                                  si->spills->spill);
962 #ifdef FIRM_STATISTICS
963                                 env->reload_count++;
964 #endif
965                         }
966
967                         DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
968                              copy, to_spill, rld->reloader));
969                         ARR_APP1(ir_node*, copies, copy);
970                 }
971
972                 /* if we had any reloads or remats, then we need to reconstruct the
973                  * SSA form for the spilled value */
974                 if (ARR_LEN(copies) > 0) {
975                         be_ssa_construction_env_t senv;
976                         /* be_lv_t *lv = be_get_irg_liveness(env->irg); */
977
978                         be_ssa_construction_init(&senv, env->irg);
979                         be_ssa_construction_add_copy(&senv, to_spill);
980                         be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
981                         be_ssa_construction_fix_users(&senv, to_spill);
982
983 #if 0
984                         /* no need to enable this as long as we invalidate liveness
985                            after this function... */
986                         be_ssa_construction_update_liveness_phis(&senv);
987                         be_liveness_update(to_spill);
988                         len = ARR_LEN(copies);
989                         for (i = 0; i < len; ++i) {
990                                 be_liveness_update(lv, copies[i]);
991                         }
992 #endif
993                         be_ssa_construction_destroy(&senv);
994                 }
995                 /* need to reconstruct SSA form if we had multiple spills */
996                 if (si->spills != NULL && si->spills->next != NULL) {
997                         spill_t *spill;
998                         int      spill_count = 0;
999
1000                         be_ssa_construction_env_t senv;
1001
1002                         be_ssa_construction_init(&senv, env->irg);
1003                         spill = si->spills;
1004                         for ( ; spill != NULL; spill = spill->next) {
1005                                 /* maybe we rematerialized the value and need no spill */
1006                                 if (spill->spill == NULL)
1007                                         continue;
1008                                 be_ssa_construction_add_copy(&senv, spill->spill);
1009                                 spill_count++;
1010                         }
1011                         if (spill_count > 1) {
1012                                 /* all reloads are attached to the first spill, fix them now */
1013                                 be_ssa_construction_fix_users(&senv, si->spills->spill);
1014                         }
1015
1016                         be_ssa_construction_destroy(&senv);
1017                 }
1018
1019                 DEL_ARR_F(copies);
1020                 si->reloaders = NULL;
1021         }
1022
1023         stat_ev_dbl("spill_spills", env->spill_count);
1024         stat_ev_dbl("spill_reloads", env->reload_count);
1025         stat_ev_dbl("spill_remats", env->remat_count);
1026         stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);
1027
1028         /* Matze: In theory be_ssa_construction should take care of the liveness...
1029          * try to disable this again in the future */
1030         be_liveness_invalidate(be_get_irg_liveness(env->irg));
1031
1032         be_remove_dead_nodes_from_schedule(env->irg);
1033
1034         be_timer_pop(T_RA_SPILL_APPLY);
1035 }
1036
1037 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill);
1038 void be_init_spill(void)
1039 {
1040         FIRM_DBG_REGISTER(dbg, "firm.be.spill");
1041 }