68f8c5d6315263578272e2505711a14d9bfdce62
[libfirm] / ir / be / bespillutil.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       implementation of the spill/reload placement abstraction layer
23  * @author      Daniel Grund, Sebastian Hack, Matthias Braun
24  * @date        29.09.2005
25  */
26 #include "config.h"
27
28 #include <stdlib.h>
29 #include <stdbool.h>
30
31 #include "pset.h"
32 #include "irnode_t.h"
33 #include "ircons_t.h"
34 #include "iredges_t.h"
35 #include "irbackedge_t.h"
36 #include "irprintf.h"
37 #include "ident_t.h"
38 #include "type_t.h"
39 #include "entity_t.h"
40 #include "debug.h"
41 #include "irgwalk.h"
42 #include "array.h"
43 #include "pdeq.h"
44 #include "execfreq.h"
45 #include "irnodeset.h"
46 #include "error.h"
47
48 #include "bearch.h"
49 #include "belive_t.h"
50 #include "besched.h"
51 #include "bespill.h"
52 #include "bespillutil.h"
53 #include "belive_t.h"
54 #include "benode.h"
55 #include "bechordal_t.h"
56 #include "bestatevent.h"
57 #include "bessaconstr.h"
58 #include "beirg.h"
59 #include "beirgmod.h"
60 #include "beintlive_t.h"
61 #include "bemodule.h"
62 #include "be_t.h"
63
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
65
66 #define REMAT_COST_INFINITE  1000
67
68 typedef struct reloader_t reloader_t;
69 struct reloader_t {
70         reloader_t *next;
71         ir_node    *can_spill_after;
72         ir_node    *reloader;
73         ir_node    *rematted_node;
74         int         remat_cost_delta; /** costs needed for rematerialization,
75                                            compared to placing a reload */
76 };
77
78 typedef struct spill_t spill_t;
79 struct spill_t {
80         spill_t *next;
81         ir_node *after;  /**< spill has to be placed after this node (or earlier) */
82         ir_node *spill;
83 };
84
85 typedef struct spill_info_t spill_info_t;
86 struct spill_info_t {
87         ir_node    *to_spill;  /**< the value that should get spilled */
88         reloader_t *reloaders; /**< list of places where the value should get
89                                     reloaded */
90         spill_t    *spills;    /**< list of latest places where spill must be
91                                     placed */
92         double      spill_costs; /**< costs needed for spilling the value */
93         const arch_register_class_t *reload_cls; /** the register class in which the
94                                                      reload should be placed */
95         bool        spilled_phi; /* true when the whole Phi has been spilled and
96                                     will be replaced with a PhiM. false if only the
97                                     value of the Phi gets spilled */
98 };
99
100 struct spill_env_t {
101         const arch_env_t *arch_env;
102         ir_graph         *irg;
103         struct obstack    obst;
104         int               spill_cost;     /**< the cost of a single spill node */
105         int               reload_cost;    /**< the cost of a reload node */
106         set              *spills;         /**< all spill_info_t's, which must be
107                                                placed */
108         spill_info_t    **mem_phis;       /**< set of all spilled phis. */
109         ir_exec_freq     *exec_freq;
110
111         unsigned          spill_count;
112         unsigned          reload_count;
113         unsigned          remat_count;
114         unsigned          spilled_phi_count;
115 };
116
117 /**
118  * Compare two spill infos.
119  */
120 static int cmp_spillinfo(const void *x, const void *y, size_t size)
121 {
122         const spill_info_t *xx = (const spill_info_t*)x;
123         const spill_info_t *yy = (const spill_info_t*)y;
124         (void) size;
125
126         return xx->to_spill != yy->to_spill;
127 }
128
129 /**
130  * Returns spill info for a specific value (the value that is to be spilled)
131  */
132 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
133 {
134         spill_info_t info, *res;
135         int hash = hash_irn(value);
136
137         info.to_spill = value;
138         res = (spill_info_t*)set_find(env->spills, &info, sizeof(info), hash);
139
140         if (res == NULL) {
141                 info.reloaders   = NULL;
142                 info.spills      = NULL;
143                 info.spill_costs = -1;
144                 info.reload_cls  = NULL;
145                 info.spilled_phi = false;
146                 res = (spill_info_t*)set_insert(env->spills, &info, sizeof(info), hash);
147         }
148
149         return res;
150 }
151
152 spill_env_t *be_new_spill_env(ir_graph *irg)
153 {
154         const arch_env_t *arch_env = be_get_irg_arch_env(irg);
155
156         spill_env_t *env = XMALLOC(spill_env_t);
157         env->spills         = new_set(cmp_spillinfo, 1024);
158         env->irg            = irg;
159         env->arch_env       = arch_env;
160         env->mem_phis       = NEW_ARR_F(spill_info_t*, 0);
161         env->spill_cost     = arch_env->spill_cost;
162         env->reload_cost    = arch_env->reload_cost;
163         env->exec_freq      = be_get_irg_exec_freq(irg);
164         obstack_init(&env->obst);
165
166         env->spill_count       = 0;
167         env->reload_count      = 0;
168         env->remat_count       = 0;
169         env->spilled_phi_count = 0;
170
171         return env;
172 }
173
174 void be_delete_spill_env(spill_env_t *env)
175 {
176         del_set(env->spills);
177         DEL_ARR_F(env->mem_phis);
178         obstack_free(&env->obst, NULL);
179         free(env);
180 }
181
182 /*
183  *  ____  _                  ____      _                 _
184  * |  _ \| | __ _  ___ ___  |  _ \ ___| | ___   __ _  __| |___
185  * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
186  * |  __/| | (_| | (_|  __/ |  _ <  __/ | (_) | (_| | (_| \__ \
187  * |_|   |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
188  *
189  */
190
191 void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after)
192 {
193         spill_info_t  *spill_info = get_spillinfo(env, to_spill);
194         spill_t       *spill;
195         spill_t       *s;
196         spill_t       *last;
197
198         assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
199         DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));
200
201         /* Just for safety make sure that we do not insert the spill in front of a phi */
202         assert(!is_Phi(sched_next(after)));
203
204         /* spills that are dominated by others are not needed */
205         last = NULL;
206         s    = spill_info->spills;
207         for ( ; s != NULL; s = s->next) {
208                 /* no need to add this spill if it is dominated by another */
209                 if (value_dominates(s->after, after)) {
210                         DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after));
211                         return;
212                 }
213                 /* remove spills that we dominate */
214                 if (value_dominates(after, s->after)) {
215                         DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after));
216                         if (last != NULL) {
217                                 last->next         = s->next;
218                         } else {
219                                 spill_info->spills = s->next;
220                         }
221                 } else {
222                         last = s;
223                 }
224         }
225
226         spill         = OALLOC(&env->obst, spill_t);
227         spill->after  = after;
228         spill->next   = spill_info->spills;
229         spill->spill  = NULL;
230
231         spill_info->spills = spill;
232 }
233
234 void be_add_reload2(spill_env_t *env, ir_node *to_spill, ir_node *before,
235                 ir_node *can_spill_after, const arch_register_class_t *reload_cls,
236                 int allow_remat)
237 {
238         spill_info_t  *info;
239         reloader_t    *rel;
240
241         assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
242
243         info = get_spillinfo(env, to_spill);
244
245         if (is_Phi(to_spill)) {
246                 int i, arity;
247
248                 /* create spillinfos for the phi arguments */
249                 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
250                         ir_node *arg = get_irn_n(to_spill, i);
251                         get_spillinfo(env, arg);
252                 }
253         }
254
255         assert(!is_Proj(before) && !be_is_Keep(before));
256
257         /* put reload into list */
258         rel                   = OALLOC(&env->obst, reloader_t);
259         rel->next             = info->reloaders;
260         rel->reloader         = before;
261         rel->rematted_node    = NULL;
262         rel->can_spill_after  = can_spill_after;
263         rel->remat_cost_delta = allow_remat ? 0 : REMAT_COST_INFINITE;
264
265         info->reloaders  = rel;
266         assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
267         info->reload_cls = reload_cls;
268
269         DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
270                 to_spill, before, allow_remat ? "" : " not"));
271 }
272
273 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
274                    const arch_register_class_t *reload_cls, int allow_remat)
275 {
276         be_add_reload2(senv, to_spill, before, to_spill, reload_cls, allow_remat);
277
278 }
279
280 ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
281 {
282         ir_node *last = sched_last(block);
283
284         /* we might have keeps behind the jump... */
285         while (be_is_Keep(last)) {
286                 last = sched_prev(last);
287                 assert(!sched_is_end(last));
288         }
289
290         assert(is_cfop(last));
291
292         /* add the reload before the (cond-)jump */
293         return last;
294 }
295
296 /**
297  * determine final spill position: it should be after all phis, keep nodes
298  * and behind nodes marked as prolog
299  */
300 static ir_node *determine_spill_point(ir_node *node)
301 {
302         node = skip_Proj(node);
303         while (true) {
304                 ir_node *next = sched_next(node);
305                 if (!is_Phi(next) && !be_is_Keep(next) && !be_is_CopyKeep(next))
306                         break;
307                 node = next;
308         }
309         return node;
310 }
311
312 /**
313  * Returns the point at which you can insert a node that should be executed
314  * before block @p block when coming from pred @p pos.
315  */
316 static ir_node *get_block_insertion_point(ir_node *block, int pos)
317 {
318         ir_node *predblock;
319
320         /* simply add the reload to the beginning of the block if we only have 1
321          * predecessor. We don't need to check for phis as there can't be any in a
322          * block with only 1 pred. */
323         if (get_Block_n_cfgpreds(block) == 1) {
324                 assert(!is_Phi(sched_first(block)));
325                 return sched_first(block);
326         }
327
328         /* We have to reload the value in pred-block */
329         predblock = get_Block_cfgpred_block(block, pos);
330         return be_get_end_of_block_insertion_point(predblock);
331 }
332
333 void be_add_reload_at_end(spill_env_t *env, ir_node *to_spill,
334                           const ir_node *block,
335                           const arch_register_class_t *reload_cls,
336                           int allow_remat)
337 {
338         ir_node *before = be_get_end_of_block_insertion_point(block);
339         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
340 }
341
342 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
343                            int pos, const arch_register_class_t *reload_cls,
344                            int allow_remat)
345 {
346         ir_node *before = get_block_insertion_point(block, pos);
347         be_add_reload(env, to_spill, before, reload_cls, allow_remat);
348 }
349
350 void be_spill_phi(spill_env_t *env, ir_node *node)
351 {
352         ir_node *block;
353         int i, arity;
354         spill_info_t *info;
355
356         assert(is_Phi(node));
357
358         info              = get_spillinfo(env, node);
359         info->spilled_phi = true;
360         ARR_APP1(spill_info_t*, env->mem_phis, info);
361
362         /* create spills for the phi arguments */
363         block = get_nodes_block(node);
364         for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
365                 ir_node *arg = get_irn_n(node, i);
366                 ir_node *insert;
367
368                 /* some backends have virtual noreg/unknown nodes that are not scheduled
369                  * and simply always available. */
370                 if (!sched_is_scheduled(arg)) {
371                         ir_node *pred_block = get_Block_cfgpred_block(block, i);
372                         insert = be_get_end_of_block_insertion_point(pred_block);
373                         insert = sched_prev(insert);
374                 } else {
375                         insert = determine_spill_point(arg);
376                 }
377
378                 be_add_spill(env, arg, insert);
379         }
380 }
381
382 /*
383  *   ____                _         ____        _ _ _
384  *  / ___|_ __ ___  __ _| |_ ___  / ___| _ __ (_) | |___
385  * | |   | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
386  * | |___| | |  __/ (_| | ||  __/  ___) | |_) | | | \__ \
387  *  \____|_|  \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
388  *                                      |_|
389  */
390
391 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
392
393 /**
394  * Creates a spill.
395  *
396  * @param senv      the spill environment
397  * @param irn       the node that should be spilled
398  * @param ctx_irn   an user of the spilled node
399  *
400  * @return a be_Spill node
401  */
402 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
403 {
404         ir_node       *to_spill = spillinfo->to_spill;
405         const ir_node *insn     = skip_Proj_const(to_spill);
406         spill_t *spill;
407
408         /* determine_spill_costs must have been run before */
409         assert(spillinfo->spill_costs >= 0);
410
411         /* some backends have virtual noreg/unknown nodes that are not scheduled
412          * and simply always available. */
413         if (!sched_is_scheduled(insn)) {
414                 /* override spillinfos or create a new one */
415                 ir_graph *irg = get_irn_irg(to_spill);
416                 spillinfo->spills->spill = get_irg_no_mem(irg);
417                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
418                 return;
419         }
420
421         DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
422         spill = spillinfo->spills;
423         for ( ; spill != NULL; spill = spill->next) {
424                 ir_node *after = spill->after;
425                 after = determine_spill_point(after);
426
427                 spill->spill = arch_env_new_spill(env->arch_env, to_spill, after);
428                 DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
429                 env->spill_count++;
430         }
431         DBG((dbg, LEVEL_1, "\n"));
432 }
433
434 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
435
436 /**
437  * If the first usage of a Phi result would be out of memory
438  * there is no sense in allocating a register for it.
439  * Thus we spill it and all its operands to the same spill slot.
440  * Therefore the phi/dataB becomes a phi/Memory
441  *
442  * @param senv      the spill environment
443  * @param phi       the Phi node that should be spilled
444  * @param ctx_irn   an user of the spilled node
445  */
446 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
447 {
448         ir_graph *irg   = env->irg;
449         ir_node  *phi   = spillinfo->to_spill;
450         ir_node  *block = get_nodes_block(phi);
451         ir_node  *unknown;
452         ir_node **ins;
453         spill_t  *spill;
454         int       i;
455         int       arity;
456
457         assert(is_Phi(phi));
458         assert(!get_opt_cse());
459         DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
460
461         /* build a new PhiM */
462         arity   = get_irn_arity(phi);
463         ins     = ALLOCAN(ir_node*, arity);
464         unknown = new_r_Unknown(irg, mode_M);
465         for (i = 0; i < arity; ++i) {
466                 ins[i] = unknown;
467         }
468
469         /* override or replace spills list... */
470         spill         = OALLOC(&env->obst, spill_t);
471         spill->after  = determine_spill_point(phi);
472         spill->spill  = be_new_Phi(block, arity, ins, mode_M, arch_no_register_req);
473         spill->next   = NULL;
474         sched_add_after(block, spill->spill);
475
476         spillinfo->spills = spill;
477         env->spilled_phi_count++;
478
479         for (i = 0; i < arity; ++i) {
480                 ir_node      *arg      = get_irn_n(phi, i);
481                 spill_info_t *arg_info = get_spillinfo(env, arg);
482
483                 determine_spill_costs(env, arg_info);
484                 spill_node(env, arg_info);
485
486                 set_irn_n(spill->spill, i, arg_info->spills->spill);
487         }
488         DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi,
489              spill->spill));
490 }
491
492 /**
493  * Spill a node.
494  *
495  * @param senv      the spill environment
496  * @param to_spill  the node that should be spilled
497  */
498 static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
499 {
500         /* node is already spilled */
501         if (spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
502                 return;
503
504         if (spillinfo->spilled_phi) {
505                 spill_phi(env, spillinfo);
506         } else {
507                 spill_irn(env, spillinfo);
508         }
509 }
510
511 /*
512  *
513  *  ____                      _            _       _ _
514  * |  _ \ ___ _ __ ___   __ _| |_ ___ _ __(_) __ _| (_)_______
515  * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_  / _ \
516  * |  _ <  __/ | | | | | (_| | ||  __/ |  | | (_| | | |/ /  __/
517  * |_| \_\___|_| |_| |_|\__,_|\__\___|_|  |_|\__,_|_|_/___\___|
518  *
519  */
520
521 /**
522  * Tests whether value @p arg is available before node @p reloader
523  * @returns 1 if value is available, 0 otherwise
524  */
525 static int is_value_available(spill_env_t *env, const ir_node *arg,
526                               const ir_node *reloader)
527 {
528         if (is_Unknown(arg) || is_NoMem(arg))
529                 return 1;
530
531         if (be_is_Spill(skip_Proj_const(arg)))
532                 return 1;
533
534         if (arg == get_irg_frame(env->irg))
535                 return 1;
536
537         (void)reloader;
538
539         if (get_irn_mode(arg) == mode_T)
540                 return 0;
541
542         /*
543          * Ignore registers are always available
544          */
545         if (arch_irn_is_ignore(arg))
546                 return 1;
547
548         return 0;
549 }
550
551 /**
552  * Check if a node is rematerializable. This tests for the following conditions:
553  *
554  * - The node itself is rematerializable
555  * - All arguments of the node are available or also rematerialisable
556  * - The costs for the rematerialisation operation is less or equal a limit
557  *
558  * Returns the costs needed for rematerialisation or something
559  * >= REMAT_COST_INFINITE if remat is not possible.
560  */
561 static int check_remat_conditions_costs(spill_env_t *env,
562                 const ir_node *spilled, const ir_node *reloader, int parentcosts)
563 {
564         int i, arity;
565         int argremats;
566         int costs = 0;
567         const ir_node *insn = skip_Proj_const(spilled);
568
569         assert(!be_is_Spill(insn));
570         if (!arch_irn_is(insn, rematerializable))
571                 return REMAT_COST_INFINITE;
572
573         if (be_is_Reload(insn)) {
574                 costs += 2;
575         } else {
576                 costs += arch_get_op_estimated_cost(insn);
577         }
578         if (parentcosts + costs >= env->reload_cost + env->spill_cost) {
579                 return REMAT_COST_INFINITE;
580         }
581         /* never rematerialize a node which modifies the flags.
582          * (would be better to test whether the flags are actually live at point
583          * reloader...)
584          */
585         if (arch_irn_is(insn, modify_flags)) {
586                 return REMAT_COST_INFINITE;
587         }
588
589         argremats = 0;
590         for (i = 0, arity = get_irn_arity(insn); i < arity; ++i) {
591                 ir_node *arg = get_irn_n(insn, i);
592
593                 if (is_value_available(env, arg, reloader))
594                         continue;
595
596                 /* we have to rematerialize the argument as well */
597                 ++argremats;
598                 if (argremats > 1) {
599                         /* we only support rematerializing 1 argument at the moment,
600                          * as multiple arguments could increase register pressure */
601                         return REMAT_COST_INFINITE;
602                 }
603
604                 costs += check_remat_conditions_costs(env, arg, reloader,
605                                                       parentcosts + costs);
606                 if (parentcosts + costs >= env->reload_cost + env->spill_cost)
607                         return REMAT_COST_INFINITE;
608         }
609
610         return costs;
611 }
612
613 /**
614  * Re-materialize a node.
615  *
616  * @param env       the spill environment
617  * @param spilled   the node that was spilled
618  * @param reloader  a irn that requires a reload
619  */
620 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
621 {
622         int i, arity;
623         ir_node *res;
624         ir_node *bl;
625         ir_node **ins;
626
627         if (is_Block(reloader)) {
628                 bl = reloader;
629         } else {
630                 bl = get_nodes_block(reloader);
631         }
632
633         ins = ALLOCAN(ir_node*, get_irn_arity(spilled));
634         for (i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
635                 ir_node *arg = get_irn_n(spilled, i);
636
637                 if (is_value_available(env, arg, reloader)) {
638                         ins[i] = arg;
639                 } else {
640                         ins[i] = do_remat(env, arg, reloader);
641                         /* don't count the argument rematerialization as an extra remat */
642                         --env->remat_count;
643                 }
644         }
645
646         /* create a copy of the node */
647         res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
648                           get_irn_op(spilled), get_irn_mode(spilled),
649                           get_irn_arity(spilled), ins);
650         copy_node_attr(env->irg, spilled, res);
651         arch_env_mark_remat(env->arch_env, res);
652
653         DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
654
655         if (! is_Proj(res)) {
656                 /* insert in schedule */
657                 sched_reset(res);
658                 sched_add_before(reloader, res);
659                 ++env->remat_count;
660         }
661
662         return res;
663 }
664
665 double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
666 {
667         ir_node *block = get_nodes_block(before);
668         double   freq  = get_block_execfreq(env->exec_freq, block);
669         (void) to_spill;
670
671         return env->spill_cost * freq;
672 }
673
674 unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
675                                        const ir_node *before)
676 {
677         if (be_do_remats) {
678                 /* is the node rematerializable? */
679                 unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
680                 if (costs < (unsigned) env->reload_cost)
681                         return costs;
682         }
683
684         return env->reload_cost;
685 }
686
687 double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
688 {
689         ir_node      *block = get_nodes_block(before);
690         double        freq  = get_block_execfreq(env->exec_freq, block);
691
692         if (be_do_remats) {
693                 /* is the node rematerializable? */
694                 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
695                 if (costs < env->reload_cost)
696                         return costs * freq;
697         }
698
699         return env->reload_cost * freq;
700 }
701
702 int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat,
703                            const ir_node *before)
704 {
705         return check_remat_conditions_costs(env, to_remat, before, 0) < REMAT_COST_INFINITE;
706 }
707
708 double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
709                                    ir_node *block, int pos)
710 {
711         ir_node *before = get_block_insertion_point(block, pos);
712         return be_get_reload_costs(env, to_spill, before);
713 }
714
715 ir_node *be_new_spill(ir_node *value, ir_node *after)
716 {
717         ir_graph                    *irg       = get_irn_irg(value);
718         ir_node                     *frame     = get_irg_frame(irg);
719         const arch_register_class_t *cls       = arch_get_irn_reg_class(value);
720         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
721         ir_node                     *block     = get_block(after);
722         ir_node                     *spill
723                 = be_new_Spill(cls, cls_frame, block, frame, value);
724
725         sched_add_after(after, spill);
726         return spill;
727 }
728
729 ir_node *be_new_reload(ir_node *value, ir_node *spill, ir_node *before)
730 {
731         ir_graph *irg   = get_irn_irg(value);
732         ir_node  *frame = get_irg_frame(irg);
733         ir_node  *block = get_block(before);
734         const arch_register_class_t *cls       = arch_get_irn_reg_class(value);
735         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
736         ir_mode                     *mode      = get_irn_mode(value);
737         ir_node  *reload;
738
739         assert(be_is_Spill(spill) || is_Phi(spill));
740         assert(get_irn_mode(spill) == mode_M);
741
742         reload = be_new_Reload(cls, cls_frame, block, frame, spill, mode);
743         sched_add_before(before, reload);
744
745         return reload;
746 }
747
748 /*
749  *  ___                     _     ____      _                 _
750  * |_ _|_ __  ___  ___ _ __| |_  |  _ \ ___| | ___   __ _  __| |___
751  *  | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
752  *  | || | | \__ \  __/ |  | |_  |  _ <  __/ | (_) | (_| | (_| \__ \
753  * |___|_| |_|___/\___|_|   \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
754  *
755  */
756
757 /**
758  * analyzes how to best spill a node and determine costs for that
759  */
760 static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
761 {
762         ir_node       *to_spill = spillinfo->to_spill;
763         const ir_node *insn     = skip_Proj_const(to_spill);
764         ir_node       *spill_block;
765         spill_t       *spill;
766         double         spill_execfreq;
767
768         /* already calculated? */
769         if (spillinfo->spill_costs >= 0)
770                 return;
771
772         assert(!arch_irn_is(insn, dont_spill));
773         assert(!be_is_Reload(insn));
774
775         /* some backends have virtual noreg/unknown nodes that are not scheduled
776          * and simply always available.
777          * TODO: this is kinda hairy, the NoMem is correct for an Unknown as Phi
778          * predecessor (of a PhiM) but this test might match other things too...
779          */
780         if (!sched_is_scheduled(insn)) {
781                 ir_graph *irg = get_irn_irg(to_spill);
782                 /* override spillinfos or create a new one */
783                 spill_t *spill = OALLOC(&env->obst, spill_t);
784                 spill->after = NULL;
785                 spill->next  = NULL;
786                 spill->spill = get_irg_no_mem(irg);
787
788                 spillinfo->spills      = spill;
789                 spillinfo->spill_costs = 0;
790
791                 DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
792                 return;
793         }
794
795         spill_block    = get_nodes_block(insn);
796         spill_execfreq = get_block_execfreq(env->exec_freq, spill_block);
797
798         if (spillinfo->spilled_phi) {
799                 /* TODO calculate correct costs...
800                  * (though we can't remat this node anyway so no big problem) */
801                 spillinfo->spill_costs = env->spill_cost * spill_execfreq;
802                 return;
803         }
804
805         if (spillinfo->spills != NULL) {
806                 spill_t *s;
807                 double   spills_execfreq;
808
809                 /* calculate sum of execution frequencies of individual spills */
810                 spills_execfreq = 0;
811                 s               = spillinfo->spills;
812                 for ( ; s != NULL; s = s->next) {
813                         ir_node *spill_block = get_block(s->after);
814                         double   freq = get_block_execfreq(env->exec_freq, spill_block);
815
816                         spills_execfreq += freq;
817                 }
818
819                 DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
820                     spills_execfreq * env->spill_cost,
821                     spill_execfreq * env->spill_cost));
822
823                 /* multi-/latespill is advantageous -> return*/
824                 if (spills_execfreq < spill_execfreq) {
825                         DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
826                         spillinfo->spill_costs = spills_execfreq * env->spill_cost;
827                         return;
828                 }
829         }
830
831         /* override spillinfos or create a new one */
832         spill        = OALLOC(&env->obst, spill_t);
833         spill->after = determine_spill_point(to_spill);
834         spill->next  = NULL;
835         spill->spill = NULL;
836
837         spillinfo->spills      = spill;
838         spillinfo->spill_costs = spill_execfreq * env->spill_cost;
839         DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
840 }
841
842 void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn)
843 {
844         const spill_info_t *si = get_spillinfo(env, irn);
845         ir_node *start_block   = get_irg_start_block(get_irn_irg(irn));
846         int n_blocks           = get_Block_dom_max_subtree_pre_num(start_block);
847         bitset_t *reloads      = bitset_alloca(n_blocks);
848         reloader_t *r;
849         spill_t *s;
850
851         if (si == NULL)
852                 return;
853
854         /* Fill the bitset with the dominance pre-order numbers
855          * of the blocks the reloads are located in. */
856         for (r = si->reloaders; r != NULL; r = r->next) {
857                 ir_node *bl = get_nodes_block(r->reloader);
858                 bitset_set(reloads, get_Block_dom_tree_pre_num(bl));
859         }
860
861         /* Now, cancel out all the blocks that are dominated by each spill.
862          * If the bitset is not empty after that, we have reloads that are
863          * not dominated by any spill. */
864         for (s = si->spills; s != NULL; s = s->next) {
865                 ir_node *bl = get_nodes_block(s->after);
866                 int start   = get_Block_dom_tree_pre_num(bl);
867                 int end     = get_Block_dom_max_subtree_pre_num(bl);
868
869                 bitset_clear_range(reloads, start, end);
870         }
871
872         if (!bitset_is_empty(reloads))
873                 be_add_spill(env, si->to_spill, si->to_spill);
874 }
875
876 void be_insert_spills_reloads(spill_env_t *env)
877 {
878         const ir_exec_freq    *exec_freq  = env->exec_freq;
879         size_t                 n_mem_phis = ARR_LEN(env->mem_phis);
880         spill_info_t          *si;
881         size_t                 i;
882
883         be_timer_push(T_RA_SPILL_APPLY);
884
885         /* create all phi-ms first, this is needed so, that phis, hanging on
886            spilled phis work correctly */
887         for (i = 0; i < n_mem_phis; ++i) {
888                 spill_info_t *info = env->mem_phis[i];
889                 spill_node(env, info);
890         }
891
892         /* process each spilled node */
893         foreach_set(env->spills, spill_info_t*, si) {
894                 ir_node  *to_spill        = si->to_spill;
895                 ir_node **copies          = NEW_ARR_F(ir_node*, 0);
896                 double    all_remat_costs = 0; /** costs when we would remat all nodes */
897                 bool      force_remat     = false;
898                 reloader_t *rld;
899
900                 DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
901
902                 determine_spill_costs(env, si);
903
904                 /* determine possibility of rematerialisations */
905                 if (be_do_remats) {
906                         /* calculate cost savings for each indivial value when it would
907                            be rematted instead of reloaded */
908                         for (rld = si->reloaders; rld != NULL; rld = rld->next) {
909                                 double   freq;
910                                 int      remat_cost;
911                                 int      remat_cost_delta;
912                                 ir_node *block;
913                                 ir_node *reloader = rld->reloader;
914
915                                 if (rld->rematted_node != NULL) {
916                                         DBG((dbg, LEVEL_2, "\tforced remat %+F before %+F\n",
917                                              rld->rematted_node, reloader));
918                                         continue;
919                                 }
920                                 if (rld->remat_cost_delta >= REMAT_COST_INFINITE) {
921                                         DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
922                                              reloader));
923                                         all_remat_costs = REMAT_COST_INFINITE;
924                                         continue;
925                                 }
926
927                                 remat_cost  = check_remat_conditions_costs(env, to_spill,
928                                                                            reloader, 0);
929                                 if (remat_cost >= REMAT_COST_INFINITE) {
930                                         DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
931                                              reloader));
932                                         rld->remat_cost_delta = REMAT_COST_INFINITE;
933                                         all_remat_costs       = REMAT_COST_INFINITE;
934                                         continue;
935                                 }
936
937                                 remat_cost_delta      = remat_cost - env->reload_cost;
938                                 rld->remat_cost_delta = remat_cost_delta;
939                                 block                 = is_Block(reloader) ? reloader : get_nodes_block(reloader);
940                                 freq                  = get_block_execfreq(exec_freq, block);
941                                 all_remat_costs      += remat_cost_delta * freq;
942                                 DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
943                                      "%d (rel %f)\n", reloader, remat_cost_delta,
944                                      remat_cost_delta * freq));
945                         }
946                         if (all_remat_costs < REMAT_COST_INFINITE) {
947                                 /* we don't need the costs for the spill if we can remat
948                                    all reloaders */
949                                 all_remat_costs -= si->spill_costs;
950
951                                 DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
952                                      env->spill_cost, si->spill_costs));
953                         }
954
955                         if (all_remat_costs < 0) {
956                                 DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
957                                      all_remat_costs));
958                                 force_remat = true;
959                         }
960                 }
961
962                 /* go through all reloads for this spill */
963                 for (rld = si->reloaders; rld != NULL; rld = rld->next) {
964                         ir_node *copy; /* a reload is a "copy" of the original value */
965
966                         if (rld->rematted_node != NULL) {
967                                 copy = rld->rematted_node;
968                                 sched_add_before(rld->reloader, copy);
969                         } else if (be_do_remats &&
970                                         (force_remat || rld->remat_cost_delta < 0)) {
971                                 copy = do_remat(env, to_spill, rld->reloader);
972                         } else {
973                                 /* make sure we have a spill */
974                                 spill_node(env, si);
975
976                                 /* create a reload, use the first spill for now SSA
977                                  * reconstruction for memory comes below */
978                                 assert(si->spills != NULL);
979                                 copy = arch_env_new_reload(env->arch_env, si->to_spill,
980                                                            si->spills->spill, rld->reloader);
981                                 env->reload_count++;
982                         }
983
984                         DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
985                              copy, to_spill, rld->reloader));
986                         ARR_APP1(ir_node*, copies, copy);
987                 }
988
989                 /* if we had any reloads or remats, then we need to reconstruct the
990                  * SSA form for the spilled value */
991                 if (ARR_LEN(copies) > 0) {
992                         be_ssa_construction_env_t senv;
993                         /* be_lv_t *lv = be_get_irg_liveness(env->irg); */
994
995                         be_ssa_construction_init(&senv, env->irg);
996                         be_ssa_construction_add_copy(&senv, to_spill);
997                         be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
998                         be_ssa_construction_fix_users(&senv, to_spill);
999
1000 #if 0
1001                         /* no need to enable this as long as we invalidate liveness
1002                            after this function... */
1003                         be_ssa_construction_update_liveness_phis(&senv);
1004                         be_liveness_update(to_spill);
1005                         len = ARR_LEN(copies);
1006                         for (i = 0; i < len; ++i) {
1007                                 be_liveness_update(lv, copies[i]);
1008                         }
1009 #endif
1010                         be_ssa_construction_destroy(&senv);
1011                 }
1012                 /* need to reconstruct SSA form if we had multiple spills */
1013                 if (si->spills != NULL && si->spills->next != NULL) {
1014                         spill_t *spill;
1015                         int      spill_count = 0;
1016
1017                         be_ssa_construction_env_t senv;
1018
1019                         be_ssa_construction_init(&senv, env->irg);
1020                         spill = si->spills;
1021                         for ( ; spill != NULL; spill = spill->next) {
1022                                 /* maybe we rematerialized the value and need no spill */
1023                                 if (spill->spill == NULL)
1024                                         continue;
1025                                 be_ssa_construction_add_copy(&senv, spill->spill);
1026                                 spill_count++;
1027                         }
1028                         if (spill_count > 1) {
1029                                 /* all reloads are attached to the first spill, fix them now */
1030                                 be_ssa_construction_fix_users(&senv, si->spills->spill);
1031                         }
1032
1033                         be_ssa_construction_destroy(&senv);
1034                 }
1035
1036                 DEL_ARR_F(copies);
1037                 si->reloaders = NULL;
1038         }
1039
1040         stat_ev_dbl("spill_spills", env->spill_count);
1041         stat_ev_dbl("spill_reloads", env->reload_count);
1042         stat_ev_dbl("spill_remats", env->remat_count);
1043         stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);
1044
1045         /* Matze: In theory be_ssa_construction should take care of the liveness...
1046          * try to disable this again in the future */
1047         be_invalidate_live_sets(env->irg);
1048
1049         be_remove_dead_nodes_from_schedule(env->irg);
1050
1051         be_timer_pop(T_RA_SPILL_APPLY);
1052 }
1053
1054 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill)
1055 void be_init_spill(void)
1056 {
1057         FIRM_DBG_REGISTER(dbg, "firm.be.spill");
1058 }