bespillutil: Remove the write-only attribute can_spill_after from struct reloader_t.
[libfirm] / ir / be / bespillutil.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       implementation of the spill/reload placement abstraction layer
23  * @author      Daniel Grund, Sebastian Hack, Matthias Braun
24  * @date        29.09.2005
25  */
26 #include "config.h"
27
28 #include <stdlib.h>
29 #include <stdbool.h>
30
31 #include "pset.h"
32 #include "irnode_t.h"
33 #include "ircons_t.h"
34 #include "iredges_t.h"
35 #include "irbackedge_t.h"
36 #include "irprintf.h"
37 #include "ident_t.h"
38 #include "type_t.h"
39 #include "entity_t.h"
40 #include "debug.h"
41 #include "irgwalk.h"
42 #include "array.h"
43 #include "pdeq.h"
44 #include "execfreq.h"
45 #include "irnodeset.h"
46 #include "error.h"
47
48 #include "bearch.h"
49 #include "belive_t.h"
50 #include "besched.h"
51 #include "bespill.h"
52 #include "bespillutil.h"
53 #include "belive_t.h"
54 #include "benode.h"
55 #include "bechordal_t.h"
56 #include "statev_t.h"
57 #include "bessaconstr.h"
58 #include "beirg.h"
59 #include "beirgmod.h"
60 #include "beintlive_t.h"
61 #include "bemodule.h"
62 #include "be_t.h"
63
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
65
66 #define REMAT_COST_INFINITE  1000
67
68 typedef struct reloader_t reloader_t;
69 struct reloader_t {
70         reloader_t *next;
71         ir_node    *reloader;
72         ir_node    *rematted_node;
73         int         remat_cost_delta; /** costs needed for rematerialization,
74                                            compared to placing a reload */
75 };
76
77 typedef struct spill_t spill_t;
78 struct spill_t {
79         spill_t *next;
80         ir_node *after;  /**< spill has to be placed after this node (or earlier) */
81         ir_node *spill;
82 };
83
84 typedef struct spill_info_t spill_info_t;
85 struct spill_info_t {
86         ir_node    *to_spill;  /**< the value that should get spilled */
87         reloader_t *reloaders; /**< list of places where the value should get
88                                     reloaded */
89         spill_t    *spills;    /**< list of latest places where spill must be
90                                     placed */
91         double      spill_costs; /**< costs needed for spilling the value */
92         const arch_register_class_t *reload_cls; /** the register class in which the
93                                                      reload should be placed */
94         bool        spilled_phi; /* true when the whole Phi has been spilled and
95                                     will be replaced with a PhiM. false if only the
96                                     value of the Phi gets spilled */
97 };
98
99 struct spill_env_t {
100         const arch_env_t *arch_env;
101         ir_graph         *irg;
102         struct obstack    obst;
103         int               spill_cost;     /**< the cost of a single spill node */
104         int               reload_cost;    /**< the cost of a reload node */
105         set              *spills;         /**< all spill_info_t's, which must be
106                                                placed */
107         spill_info_t    **mem_phis;       /**< set of all spilled phis. */
108
109         unsigned          spill_count;
110         unsigned          reload_count;
111         unsigned          remat_count;
112         unsigned          spilled_phi_count;
113 };
114
115 /**
116  * Compare two spill infos.
117  */
118 static int cmp_spillinfo(const void *x, const void *y, size_t size)
119 {
120         const spill_info_t *xx = (const spill_info_t*)x;
121         const spill_info_t *yy = (const spill_info_t*)y;
122         (void) size;
123
124         return xx->to_spill != yy->to_spill;
125 }
126
127 /**
128  * Returns spill info for a specific value (the value that is to be spilled)
129  */
130 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
131 {
132         spill_info_t info, *res;
133         int hash = hash_irn(value);
134
135         info.to_spill = value;
136         res = set_find(spill_info_t, env->spills, &info, sizeof(info), hash);
137
138         if (res == NULL) {
139                 info.reloaders   = NULL;
140                 info.spills      = NULL;
141                 info.spill_costs = -1;
142                 info.reload_cls  = NULL;
143                 info.spilled_phi = false;
144                 res = set_insert(spill_info_t, env->spills, &info, sizeof(info), hash);
145         }
146
147         return res;
148 }
149
150 spill_env_t *be_new_spill_env(ir_graph *irg)
151 {
152         const arch_env_t *arch_env = be_get_irg_arch_env(irg);
153
154         spill_env_t *env = XMALLOC(spill_env_t);
155         env->spills         = new_set(cmp_spillinfo, 1024);
156         env->irg            = irg;
157         env->arch_env       = arch_env;
158         env->mem_phis       = NEW_ARR_F(spill_info_t*, 0);
159         env->spill_cost     = arch_env->spill_cost;
160         env->reload_cost    = arch_env->reload_cost;
161         obstack_init(&env->obst);
162
163         env->spill_count       = 0;
164         env->reload_count      = 0;
165         env->remat_count       = 0;
166         env->spilled_phi_count = 0;
167
168         return env;
169 }
170
171 void be_delete_spill_env(spill_env_t *env)
172 {
173         del_set(env->spills);
174         DEL_ARR_F(env->mem_phis);
175         obstack_free(&env->obst, NULL);
176         free(env);
177 }
178
179 /*
180  *  ____  _                  ____      _                 _
181  * |  _ \| | __ _  ___ ___  |  _ \ ___| | ___   __ _  __| |___
182  * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
183  * |  __/| | (_| | (_|  __/ |  _ <  __/ | (_) | (_| | (_| \__ \
184  * |_|   |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
185  *
186  */
187
188 void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after)
189 {
190         spill_info_t  *spill_info = get_spillinfo(env, to_spill);
191         spill_t       *spill;
192         spill_t       *s;
193         spill_t       *last;
194
195         assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
196         DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));
197
198         /* Just for safety make sure that we do not insert the spill in front of a phi */
199         assert(!is_Phi(sched_next(after)));
200
201         /* spills that are dominated by others are not needed */
202         last = NULL;
203         s    = spill_info->spills;
204         for ( ; s != NULL; s = s->next) {
205                 /* no need to add this spill if it is dominated by another */
206                 if (value_dominates(s->after, after)) {
207                         DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after));
208                         return;
209                 }
210                 /* remove spills that we dominate */
211                 if (value_dominates(after, s->after)) {
212                         DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after));
213                         if (last != NULL) {
214                                 last->next         = s->next;
215                         } else {
216                                 spill_info->spills = s->next;
217                         }
218                 } else {
219                         last = s;
220                 }
221         }
222
223         spill         = OALLOC(&env->obst, spill_t);
224         spill->after  = after;
225         spill->next   = spill_info->spills;
226         spill->spill  = NULL;
227
228         spill_info->spills = spill;
229 }
230
231 void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before,
232                 const arch_register_class_t *reload_cls, int allow_remat)
233 {
234         spill_info_t  *info;
235         reloader_t    *rel;
236
237         assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
238
239         info = get_spillinfo(env, to_spill);
240
241         if (is_Phi(to_spill)) {
242                 int i, arity;
243
244                 /* create spillinfos for the phi arguments */
245                 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
246                         ir_node *arg = get_irn_n(to_spill, i);
247                         get_spillinfo(env, arg);
248                 }
249         }
250
251         assert(!be_is_Keep(before));
252
253         /* put reload into list */
254         rel                   = OALLOC(&env->obst, reloader_t);
255         rel->next             = info->reloaders;
256         rel->reloader         = before;
257         rel->rematted_node    = NULL;
258         rel->remat_cost_delta = allow_remat ? 0 : REMAT_COST_INFINITE;
259
260         info->reloaders  = rel;
261         assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
262         info->reload_cls = reload_cls;
263
264         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
265                 to_spill, before, allow_remat ? "" : " not"));
266 }
267
268 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
269                    const arch_register_class_t *reload_cls, int allow_remat)
270 {
271         be_add_reload2(senv, to_spill, before, reload_cls, allow_remat);
272
273 }
274
275 ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
276 {
277         ir_node *last = sched_last(block);
278
279         /* we might have keeps behind the jump... */
280         while (be_is_Keep(last)) {
281                 last = sched_prev(last);
282                 assert(!sched_is_end(last));
283         }
284
285         assert(is_cfop(last));
286
287         /* add the reload before the (cond-)jump */
288         return last;
289 }
290
291 /**
292  * determine final spill position: it should be after all phis, keep nodes
293  * and behind nodes marked as prolog
294  */
295 static ir_node *determine_spill_point(ir_node *node)
296 {
297         node = skip_Proj(node);
298         while (true) {
299                 ir_node *next = sched_next(node);
300                 if (!is_Phi(next) && !be_is_Keep(next) && !be_is_CopyKeep(next))
301                         break;
302                 node = next;
303         }
304         return node;
305 }
306
307 /**
308  * Returns the point at which you can insert a node that should be executed
309  * before block @p block when coming from pred @p pos.
310  */
311 static ir_node *get_block_insertion_point(ir_node *block, int pos)
312 {
313         ir_node *predblock;
314
315         /* simply add the reload to the beginning of the block if we only have 1
316          * predecessor. We don't need to check for phis as there can't be any in a
317          * block with only 1 pred. */
318         if (get_Block_n_cfgpreds(block) == 1) {
319                 assert(!is_Phi(sched_first(block)));
320                 return sched_first(block);
321         }
322
323         /* We have to reload the value in pred-block */
324         predblock = get_Block_cfgpred_block(block, pos);
325         return be_get_end_of_block_insertion_point(predblock);
326 }
327
328 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
329                            int pos, const arch_register_class_t *reload_cls,
330                            int allow_remat)
331 {
332         ir_node *before = get_block_insertion_point(block, pos);
333         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
334 }
335
336 void be_spill_phi(spill_env_t *env, ir_node *node)
337 {
338         ir_node *block;
339         int i, arity;
340         spill_info_t *info;
341
342         assert(is_Phi(node));
343
344         info              = get_spillinfo(env, node);
345         info->spilled_phi = true;
346         ARR_APP1(spill_info_t*, env->mem_phis, info);
347
348         /* create spills for the phi arguments */
349         block = get_nodes_block(node);
350         for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
351                 ir_node *arg = get_irn_n(node, i);
352                 ir_node *insert;
353
354                 /* some backends have virtual noreg/unknown nodes that are not scheduled
355                  * and simply always available. */
356                 if (!sched_is_scheduled(arg)) {
357                         ir_node *pred_block = get_Block_cfgpred_block(block, i);
358                         insert = be_get_end_of_block_insertion_point(pred_block);
359                         insert = sched_prev(insert);
360                 } else {
361                         insert = determine_spill_point(arg);
362                 }
363
364                 be_add_spill(env, arg, insert);
365         }
366 }
367
368 /*
369  *   ____                _         ____        _ _ _
370  *  / ___|_ __ ___  __ _| |_ ___  / ___| _ __ (_) | |___
371  * | |   | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
372  * | |___| | |  __/ (_| | ||  __/  ___) | |_) | | | \__ \
373  *  \____|_|  \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
374  *                                      |_|
375  */
376
377 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
378
379 /**
380  * Creates a spill.
381  *
382  * @param senv      the spill environment
383  * @param irn       the node that should be spilled
384  * @param ctx_irn   an user of the spilled node
385  *
386  * @return a be_Spill node
387  */
388 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
389 {
390         ir_node       *to_spill = spillinfo->to_spill;
391         const ir_node *insn     = skip_Proj_const(to_spill);
392         spill_t *spill;
393
394         /* determine_spill_costs must have been run before */
395         assert(spillinfo->spill_costs >= 0);
396
397         /* some backends have virtual noreg/unknown nodes that are not scheduled
398          * and simply always available. */
399         if (!sched_is_scheduled(insn)) {
400                 /* override spillinfos or create a new one */
401                 ir_graph *irg = get_irn_irg(to_spill);
402                 spillinfo->spills->spill = get_irg_no_mem(irg);
403                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
404                 return;
405         }
406
407         DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
408         spill = spillinfo->spills;
409         for ( ; spill != NULL; spill = spill->next) {
410                 ir_node *after = spill->after;
411                 after = determine_spill_point(after);
412
413                 spill->spill = arch_env_new_spill(env->arch_env, to_spill, after);
414                 DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
415                 env->spill_count++;
416         }
417         DBG((dbg, LEVEL_1, "\n"));
418 }
419
420 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
421
422 /**
423  * If the first usage of a Phi result would be out of memory
424  * there is no sense in allocating a register for it.
425  * Thus we spill it and all its operands to the same spill slot.
426  * Therefore the phi/dataB becomes a phi/Memory
427  *
428  * @param senv      the spill environment
429  * @param phi       the Phi node that should be spilled
430  * @param ctx_irn   an user of the spilled node
431  */
432 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
433 {
434         ir_graph *irg   = env->irg;
435         ir_node  *phi   = spillinfo->to_spill;
436         ir_node  *block = get_nodes_block(phi);
437         ir_node  *unknown;
438         ir_node **ins;
439         spill_t  *spill;
440         int       i;
441         int       arity;
442
443         assert(is_Phi(phi));
444         assert(!get_opt_cse());
445         DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
446
447         /* build a new PhiM */
448         arity   = get_irn_arity(phi);
449         ins     = ALLOCAN(ir_node*, arity);
450         unknown = new_r_Unknown(irg, mode_M);
451         for (i = 0; i < arity; ++i) {
452                 ins[i] = unknown;
453         }
454
455         /* override or replace spills list... */
456         spill         = OALLOC(&env->obst, spill_t);
457         spill->after  = determine_spill_point(phi);
458         spill->spill  = be_new_Phi(block, arity, ins, mode_M, arch_no_register_req);
459         spill->next   = NULL;
460         sched_add_after(block, spill->spill);
461
462         spillinfo->spills = spill;
463         env->spilled_phi_count++;
464
465         for (i = 0; i < arity; ++i) {
466                 ir_node      *arg      = get_irn_n(phi, i);
467                 spill_info_t *arg_info = get_spillinfo(env, arg);
468
469                 determine_spill_costs(env, arg_info);
470                 spill_node(env, arg_info);
471
472                 set_irn_n(spill->spill, i, arg_info->spills->spill);
473         }
474         DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi,
475              spill->spill));
476 }
477
478 /**
479  * Spill a node.
480  *
481  * @param senv      the spill environment
482  * @param to_spill  the node that should be spilled
483  */
484 static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
485 {
486         /* node is already spilled */
487         if (spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
488                 return;
489
490         if (spillinfo->spilled_phi) {
491                 spill_phi(env, spillinfo);
492         } else {
493                 spill_irn(env, spillinfo);
494         }
495 }
496
497 /*
498  *
499  *  ____                      _            _       _ _
500  * |  _ \ ___ _ __ ___   __ _| |_ ___ _ __(_) __ _| (_)_______
501  * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_  / _ \
502  * |  _ <  __/ | | | | | (_| | ||  __/ |  | | (_| | | |/ /  __/
503  * |_| \_\___|_| |_| |_|\__,_|\__\___|_|  |_|\__,_|_|_/___\___|
504  *
505  */
506
507 /**
508  * Tests whether value @p arg is available before node @p reloader
509  * @returns 1 if value is available, 0 otherwise
510  */
511 static int is_value_available(spill_env_t *env, const ir_node *arg,
512                               const ir_node *reloader)
513 {
514         if (is_Unknown(arg) || is_NoMem(arg))
515                 return 1;
516
517         if (be_is_Spill(skip_Proj_const(arg)))
518                 return 1;
519
520         if (arg == get_irg_frame(env->irg))
521                 return 1;
522
523         (void)reloader;
524
525         if (get_irn_mode(arg) == mode_T)
526                 return 0;
527
528         /*
529          * Ignore registers are always available
530          */
531         if (arch_irn_is_ignore(arg))
532                 return 1;
533
534         return 0;
535 }
536
537 /**
538  * Check if a node is rematerializable. This tests for the following conditions:
539  *
540  * - The node itself is rematerializable
541  * - All arguments of the node are available or also rematerialisable
542  * - The costs for the rematerialisation operation is less or equal a limit
543  *
544  * Returns the costs needed for rematerialisation or something
545  * >= REMAT_COST_INFINITE if remat is not possible.
546  */
547 static int check_remat_conditions_costs(spill_env_t *env,
548                 const ir_node *spilled, const ir_node *reloader, int parentcosts)
549 {
550         int i, arity;
551         int argremats;
552         int costs = 0;
553         const ir_node *insn = skip_Proj_const(spilled);
554
555         assert(!be_is_Spill(insn));
556         if (!arch_irn_is(insn, rematerializable))
557                 return REMAT_COST_INFINITE;
558
559         if (be_is_Reload(insn)) {
560                 costs += 2;
561         } else {
562                 costs += arch_get_op_estimated_cost(insn);
563         }
564         if (parentcosts + costs >= env->reload_cost + env->spill_cost) {
565                 return REMAT_COST_INFINITE;
566         }
567         /* never rematerialize a node which modifies the flags.
568          * (would be better to test whether the flags are actually live at point
569          * reloader...)
570          */
571         if (arch_irn_is(insn, modify_flags)) {
572                 return REMAT_COST_INFINITE;
573         }
574
575         argremats = 0;
576         for (i = 0, arity = get_irn_arity(insn); i < arity; ++i) {
577                 ir_node *arg = get_irn_n(insn, i);
578
579                 if (is_value_available(env, arg, reloader))
580                         continue;
581
582                 /* we have to rematerialize the argument as well */
583                 ++argremats;
584                 if (argremats > 1) {
585                         /* we only support rematerializing 1 argument at the moment,
586                          * as multiple arguments could increase register pressure */
587                         return REMAT_COST_INFINITE;
588                 }
589
590                 costs += check_remat_conditions_costs(env, arg, reloader,
591                                                       parentcosts + costs);
592                 if (parentcosts + costs >= env->reload_cost + env->spill_cost)
593                         return REMAT_COST_INFINITE;
594         }
595
596         return costs;
597 }
598
599 /**
600  * Re-materialize a node.
601  *
602  * @param env       the spill environment
603  * @param spilled   the node that was spilled
604  * @param reloader  a irn that requires a reload
605  */
606 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
607 {
608         int i, arity;
609         ir_node *res;
610         ir_node *bl;
611         ir_node **ins;
612
613         if (is_Block(reloader)) {
614                 bl = reloader;
615         } else {
616                 bl = get_nodes_block(reloader);
617         }
618
619         ins = ALLOCAN(ir_node*, get_irn_arity(spilled));
620         for (i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
621                 ir_node *arg = get_irn_n(spilled, i);
622
623                 if (is_value_available(env, arg, reloader)) {
624                         ins[i] = arg;
625                 } else {
626                         ins[i] = do_remat(env, arg, reloader);
627                         /* don't count the argument rematerialization as an extra remat */
628                         --env->remat_count;
629                 }
630         }
631
632         /* create a copy of the node */
633         res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
634                           get_irn_op(spilled), get_irn_mode(spilled),
635                           get_irn_arity(spilled), ins);
636         copy_node_attr(env->irg, spilled, res);
637         arch_env_mark_remat(env->arch_env, res);
638
639         DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
640
641         if (! is_Proj(res)) {
642                 /* insert in schedule */
643                 sched_reset(res);
644                 sched_add_before(reloader, res);
645                 ++env->remat_count;
646         }
647
648         return res;
649 }
650
651 double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
652 {
653         ir_node *block = get_nodes_block(before);
654         double   freq  = get_block_execfreq(block);
655         (void) to_spill;
656
657         return env->spill_cost * freq;
658 }
659
660 unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
661                                        const ir_node *before)
662 {
663         if (be_do_remats) {
664                 /* is the node rematerializable? */
665                 unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
666                 if (costs < (unsigned) env->reload_cost)
667                         return costs;
668         }
669
670         return env->reload_cost;
671 }
672
673 double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
674 {
675         ir_node *block = get_nodes_block(before);
676         double   freq  = get_block_execfreq(block);
677
678         if (be_do_remats) {
679                 /* is the node rematerializable? */
680                 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
681                 if (costs < env->reload_cost)
682                         return costs * freq;
683         }
684
685         return env->reload_cost * freq;
686 }
687
688 int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat,
689                            const ir_node *before)
690 {
691         return check_remat_conditions_costs(env, to_remat, before, 0) < REMAT_COST_INFINITE;
692 }
693
694 double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
695                                    ir_node *block, int pos)
696 {
697         ir_node *before = get_block_insertion_point(block, pos);
698         return be_get_reload_costs(env, to_spill, before);
699 }
700
701 ir_node *be_new_spill(ir_node *value, ir_node *after)
702 {
703         ir_graph                    *irg       = get_irn_irg(value);
704         ir_node                     *frame     = get_irg_frame(irg);
705         const arch_register_class_t *cls       = arch_get_irn_reg_class(value);
706         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
707         ir_node                     *block     = get_block(after);
708         ir_node                     *spill
709                 = be_new_Spill(cls, cls_frame, block, frame, value);
710
711         sched_add_after(after, spill);
712         return spill;
713 }
714
715 ir_node *be_new_reload(ir_node *value, ir_node *spill, ir_node *before)
716 {
717         ir_graph *irg   = get_irn_irg(value);
718         ir_node  *frame = get_irg_frame(irg);
719         ir_node  *block = get_block(before);
720         const arch_register_class_t *cls       = arch_get_irn_reg_class(value);
721         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
722         ir_mode                     *mode      = get_irn_mode(value);
723         ir_node  *reload;
724
725         assert(be_is_Spill(spill) || is_Phi(spill));
726         assert(get_irn_mode(spill) == mode_M);
727
728         reload = be_new_Reload(cls, cls_frame, block, frame, spill, mode);
729         sched_add_before(before, reload);
730
731         return reload;
732 }
733
734 /*
735  *  ___                     _     ____      _                 _
736  * |_ _|_ __  ___  ___ _ __| |_  |  _ \ ___| | ___   __ _  __| |___
737  *  | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
738  *  | || | | \__ \  __/ |  | |_  |  _ <  __/ | (_) | (_| | (_| \__ \
739  * |___|_| |_|___/\___|_|   \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
740  *
741  */
742
743 /**
744  * analyzes how to best spill a node and determine costs for that
745  */
746 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
747 {
748         ir_node       *to_spill = spillinfo->to_spill;
749         const ir_node *insn     = skip_Proj_const(to_spill);
750         ir_node       *spill_block;
751         spill_t       *spill;
752         double         spill_execfreq;
753
754         /* already calculated? */
755         if (spillinfo->spill_costs >= 0)
756                 return;
757
758         assert(!arch_irn_is(insn, dont_spill));
759         assert(!be_is_Reload(insn));
760
761         /* some backends have virtual noreg/unknown nodes that are not scheduled
762          * and simply always available.
763          * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi
764          * predecessor (of a PhiM) but this test might match other things too...
765          */
766         if (!sched_is_scheduled(insn)) {
767                 ir_graph *irg = get_irn_irg(to_spill);
768                 /* override spillinfos or create a new one */
769                 spill_t *spill = OALLOC(&env->obst, spill_t);
770                 spill->after = NULL;
771                 spill->next  = NULL;
772                 spill->spill = get_irg_no_mem(irg);
773
774                 spillinfo->spills      = spill;
775                 spillinfo->spill_costs = 0;
776
777                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
778                 return;
779         }
780
781         spill_block    = get_nodes_block(insn);
782         spill_execfreq = get_block_execfreq(spill_block);
783
784         if (spillinfo->spilled_phi) {
785                 /* TODO calculate correct costs...
786                  * (though we can't remat this node anyway so no big problem) */
787                 spillinfo->spill_costs = env->spill_cost * spill_execfreq;
788                 return;
789         }
790
791         if (spillinfo->spills != NULL) {
792                 spill_t *s;
793                 double   spills_execfreq;
794
795                 /* calculate sum of execution frequencies of individual spills */
796                 spills_execfreq = 0;
797                 s               = spillinfo->spills;
798                 for ( ; s != NULL; s = s->next) {
799                         ir_node *spill_block = get_block(s->after);
800                         double   freq = get_block_execfreq(spill_block);
801
802                         spills_execfreq += freq;
803                 }
804
805                 DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
806                     spills_execfreq * env->spill_cost,
807                     spill_execfreq * env->spill_cost));
808
809                 /* multi-/latespill is advantageous -> return*/
810                 if (spills_execfreq < spill_execfreq) {
811                         DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
812                         spillinfo->spill_costs = spills_execfreq * env->spill_cost;
813                         return;
814                 }
815         }
816
817         /* override spillinfos or create a new one */
818         spill        = OALLOC(&env->obst, spill_t);
819         spill->after = determine_spill_point(to_spill);
820         spill->next  = NULL;
821         spill->spill = NULL;
822
823         spillinfo->spills      = spill;
824         spillinfo->spill_costs = spill_execfreq * env->spill_cost;
825         DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
826 }
827
828 void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn)
829 {
830         const spill_info_t *si = get_spillinfo(env, irn);
831         ir_node *start_block   = get_irg_start_block(get_irn_irg(irn));
832         int n_blocks           = get_Block_dom_max_subtree_pre_num(start_block);
833         bitset_t *reloads      = bitset_alloca(n_blocks);
834         reloader_t *r;
835         spill_t *s;
836
837         if (si == NULL)
838                 return;
839
840         /* Fill the bitset with the dominance pre-order numbers
841          * of the blocks the reloads are located in. */
842         for (r = si->reloaders; r != NULL; r = r->next) {
843                 ir_node *bl = get_nodes_block(r->reloader);
844                 bitset_set(reloads, get_Block_dom_tree_pre_num(bl));
845         }
846
847         /* Now, cancel out all the blocks that are dominated by each spill.
848          * If the bitset is not empty after that, we have reloads that are
849          * not dominated by any spill. */
850         for (s = si->spills; s != NULL; s = s->next) {
851                 ir_node *bl = get_nodes_block(s->after);
852                 int start   = get_Block_dom_tree_pre_num(bl);
853                 int end     = get_Block_dom_max_subtree_pre_num(bl);
854
855                 bitset_clear_range(reloads, start, end);
856         }
857
858         if (!bitset_is_empty(reloads))
859                 be_add_spill(env, si->to_spill, si->to_spill);
860 }
861
862 void be_insert_spills_reloads(spill_env_t *env)
863 {
864         size_t n_mem_phis = ARR_LEN(env->mem_phis);
865         size_t i;
866
867         be_timer_push(T_RA_SPILL_APPLY);
868
869         /* create all phi-ms first, this is needed so, that phis, hanging on
870            spilled phis work correctly */
871         for (i = 0; i < n_mem_phis; ++i) {
872                 spill_info_t *info = env->mem_phis[i];
873                 spill_node(env, info);
874         }
875
876         /* process each spilled node */
877         foreach_set(env->spills, spill_info_t, si) {
878                 ir_node  *to_spill        = si->to_spill;
879                 ir_node **copies          = NEW_ARR_F(ir_node*, 0);
880                 double    all_remat_costs = 0; /** costs when we would remat all nodes */
881                 bool      force_remat     = false;
882                 reloader_t *rld;
883
884                 DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
885
886                 determine_spill_costs(env, si);
887
888                 /* determine possibility of rematerialisations */
889                 if (be_do_remats) {
890                         /* calculate cost savings for each indivial value when it would
891                            be rematted instead of reloaded */
892                         for (rld = si->reloaders; rld != NULL; rld = rld->next) {
893                                 double   freq;
894                                 int      remat_cost;
895                                 int      remat_cost_delta;
896                                 ir_node *block;
897                                 ir_node *reloader = rld->reloader;
898
899                                 if (rld->rematted_node != NULL) {
900                                         DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
901                                              rld->rematted_node, reloader));
902                                         continue;
903                                 }
904                                 if (rld->remat_cost_delta >= REMAT_COST_INFINITE) {
905                                         DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
906                                              reloader));
907                                         all_remat_costs = REMAT_COST_INFINITE;
908                                         continue;
909                                 }
910
911                                 remat_cost  = check_remat_conditions_costs(env, to_spill,
912                                                                            reloader, 0);
913                                 if (remat_cost >= REMAT_COST_INFINITE) {
914                                         DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
915                                              reloader));
916                                         rld->remat_cost_delta = REMAT_COST_INFINITE;
917                                         all_remat_costs       = REMAT_COST_INFINITE;
918                                         continue;
919                                 }
920
921                                 remat_cost_delta      = remat_cost - env->reload_cost;
922                                 rld->remat_cost_delta = remat_cost_delta;
923                                 block                 = is_Block(reloader) ? reloader : get_nodes_block(reloader);
924                                 freq                  = get_block_execfreq(block);
925                                 all_remat_costs      += remat_cost_delta * freq;
926                                 DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
927                                      "%d (rel %f)\n", reloader, remat_cost_delta,
928                                      remat_cost_delta * freq));
929                         }
930                         if (all_remat_costs < REMAT_COST_INFINITE) {
931                                 /* we don't need the costs for the spill if we can remat
932                                    all reloaders */
933                                 all_remat_costs -= si->spill_costs;
934
935                                 DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
936                                      env->spill_cost, si->spill_costs));
937                         }
938
939                         if (all_remat_costs < 0) {
940                                 DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
941                                      all_remat_costs));
942                                 force_remat = true;
943                         }
944                 }
945
946                 /* go through all reloads for this spill */
947                 for (rld = si->reloaders; rld != NULL; rld = rld->next) {
948                         ir_node *copy; /* a reload is a "copy" of the original value */
949
950                         if (rld->rematted_node != NULL) {
951                                 copy = rld->rematted_node;
952                                 sched_add_before(rld->reloader, copy);
953                         } else if (be_do_remats &&
954                                         (force_remat || rld->remat_cost_delta < 0)) {
955                                 copy = do_remat(env, to_spill, rld->reloader);
956                         } else {
957                                 /* make sure we have a spill */
958                                 spill_node(env, si);
959
960                                 /* create a reload, use the first spill for now SSA
961                                  * reconstruction for memory comes below */
962                                 assert(si->spills != NULL);
963                                 copy = arch_env_new_reload(env->arch_env, si->to_spill,
964                                                            si->spills->spill, rld->reloader);
965                                 env->reload_count++;
966                         }
967
968                         DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
969                              copy, to_spill, rld->reloader));
970                         ARR_APP1(ir_node*, copies, copy);
971                 }
972
973                 /* if we had any reloads or remats, then we need to reconstruct the
974                  * SSA form for the spilled value */
975                 if (ARR_LEN(copies) > 0) {
976                         be_ssa_construction_env_t senv;
977                         /* be_lv_t *lv = be_get_irg_liveness(env->irg); */
978
979                         be_ssa_construction_init(&senv, env->irg);
980                         be_ssa_construction_add_copy(&senv, to_spill);
981                         be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
982                         be_ssa_construction_fix_users(&senv, to_spill);
983
984 #if 0
985                         /* no need to enable this as long as we invalidate liveness
986                            after this function... */
987                         be_ssa_construction_update_liveness_phis(&senv);
988                         be_liveness_update(to_spill);
989                         len = ARR_LEN(copies);
990                         for (i = 0; i < len; ++i) {
991                                 be_liveness_update(lv, copies[i]);
992                         }
993 #endif
994                         be_ssa_construction_destroy(&senv);
995                 }
996                 /* need to reconstruct SSA form if we had multiple spills */
997                 if (si->spills != NULL && si->spills->next != NULL) {
998                         spill_t *spill;
999                         int      spill_count = 0;
1000
1001                         be_ssa_construction_env_t senv;
1002
1003                         be_ssa_construction_init(&senv, env->irg);
1004                         spill = si->spills;
1005                         for ( ; spill != NULL; spill = spill->next) {
1006                                 /* maybe we rematerialized the value and need no spill */
1007                                 if (spill->spill == NULL)
1008                                         continue;
1009                                 be_ssa_construction_add_copy(&senv, spill->spill);
1010                                 spill_count++;
1011                         }
1012                         if (spill_count > 1) {
1013                                 /* all reloads are attached to the first spill, fix them now */
1014                                 be_ssa_construction_fix_users(&senv, si->spills->spill);
1015                         }
1016
1017                         be_ssa_construction_destroy(&senv);
1018                 }
1019
1020                 DEL_ARR_F(copies);
1021                 si->reloaders = NULL;
1022         }
1023
1024         stat_ev_dbl("spill_spills", env->spill_count);
1025         stat_ev_dbl("spill_reloads", env->reload_count);
1026         stat_ev_dbl("spill_remats", env->remat_count);
1027         stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);
1028
1029         /* Matze: In theory be_ssa_construction should take care of the liveness...
1030          * try to disable this again in the future */
1031         be_invalidate_live_sets(env->irg);
1032
1033         be_remove_dead_nodes_from_schedule(env->irg);
1034
1035         be_timer_pop(T_RA_SPILL_APPLY);
1036 }
1037
1038 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill)
1039 void be_init_spill(void)
1040 {
1041         FIRM_DBG_REGISTER(dbg, "firm.be.spill");
1042 }