2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Main spill driver.
23 * @author Daniel Grund, Sebastian Hack, Matthias Braun
36 #include "iredges_t.h"
37 #include "irbackedge_t.h"
47 #include "irnodeset.h"
50 #include "besched_t.h"
54 #include "bechordal_t.h"
55 #include "bejavacoal.h"
56 #include "benodesets.h"
57 #include "bespilloptions.h"
58 #include "bestatevent.h"
59 #include "bessaconstr.h"
61 #include "beintlive_t.h"
63 /* only rematerialise when costs are less than REMAT_COST_LIMIT */
64 /* TODO determine a good value here... */
65 #define REMAT_COST_LIMIT 10
67 typedef struct _reloader_t reloader_t;
72 ir_node *rematted_node;
73 int allow_remat; /**< the node may be rematted instead of reloaded if global remat option is on */
76 typedef struct _spill_info_t {
77 /** the value that should get spilled */
78 ir_node *spilled_node;
79 /** list of places where the value should get reloaded */
80 reloader_t *reloaders;
82 /** the spill node, or a PhiM node */
84 /** if we had the value of a phi spilled before but not the phi itself then
85 * this field contains the spill for the phi value */
88 /** the register class in which the reload should be placed */
89 const arch_register_class_t *reload_cls;
93 const arch_env_t *arch_env;
97 int spill_cost; /**< the cost of a single spill node */
98 int reload_cost; /**< the cost of a reload node */
99 set *spills; /**< all spill_info_t's, which must be placed */
100 ir_nodeset_t mem_phis; /**< set of all special spilled phis. allocated and freed separately */
102 DEBUG_ONLY(firm_dbg_module_t *dbg;)
106 * Compare two spill infos.
108 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
109 const spill_info_t *xx = x;
110 const spill_info_t *yy = y;
111 return xx->spilled_node != yy->spilled_node;
115 * Returns spill info for a specific value (returns NULL if the info doesn't
118 static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) {
120 int hash = nodeset_hash(value);
122 info.spilled_node = value;
124 return set_find(env->spills, &info, sizeof(info), hash);
128 * Returns spill info for a specific value (the value that is to be spilled)
130 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) {
131 spill_info_t info, *res;
132 int hash = nodeset_hash(value);
134 info.spilled_node = value;
135 res = set_find(env->spills, &info, sizeof(info), hash);
138 info.reloaders = NULL;
140 info.old_spill = NULL;
141 info.reload_cls = NULL;
142 res = set_insert(env->spills, &info, sizeof(info), hash);
149 /* Sets the debug module of a spill environment. */
150 void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
155 /* Creates a new spill environment. */
156 spill_env_t *be_new_spill_env(be_irg_t *birg) {
157 spill_env_t *env = xmalloc(sizeof(env[0]));
158 env->spills = new_set(cmp_spillinfo, 1024);
159 env->irg = be_get_birg_irg(birg);
161 env->arch_env = birg->main_env->arch_env;
162 ir_nodeset_init(&env->mem_phis);
163 // TODO, ask backend about costs..., or even ask backend whether we should
166 env->reload_cost = 5;
167 obstack_init(&env->obst);
171 /* Deletes a spill environment. */
172 void be_delete_spill_env(spill_env_t *env) {
173 del_set(env->spills);
174 ir_nodeset_destroy(&env->mem_phis);
175 obstack_free(&env->obst, NULL);
181 * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___
182 * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
183 * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \
184 * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
188 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before, ir_node *rematted_node) {
189 spill_info_t *spill_info;
190 reloader_t *reloader;
192 spill_info = get_spillinfo(env, to_spill);
194 /* add the remat information */
195 reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
196 reloader->next = spill_info->reloaders;
197 reloader->reloader = before;
198 reloader->rematted_node = rematted_node;
199 reloader->allow_remat = 1;
201 spill_info->reloaders = reloader;
203 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
207 void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before,
208 const arch_register_class_t *reload_cls, int allow_remat)
213 info = get_spillinfo(env, to_spill);
215 if (is_Phi(to_spill)) {
218 /* create spillinfos for the phi arguments */
219 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
220 ir_node *arg = get_irn_n(to_spill, i);
221 get_spillinfo(env, arg);
225 // hackery... sometimes the morgan algo spilled the value of a phi,
226 // the belady algo decides later to spill the whole phi, then sees the
227 // spill node and adds a reload for that spill node, problem is the
228 // reload gets attach to that same spill (and is totally unnecessary)
229 if (info->old_spill != NULL &&
230 (before == info->old_spill || value_dominates(before, info->old_spill)))
232 printf("spilledphi hack was needed...\n");
233 before = sched_next(info->old_spill);
238 /* put reload into list */
239 rel = obstack_alloc(&env->obst, sizeof(rel[0]));
240 rel->next = info->reloaders;
241 rel->reloader = before;
242 rel->rematted_node = NULL;
243 rel->allow_remat = allow_remat;
245 info->reloaders = rel;
246 assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
247 info->reload_cls = reload_cls;
249 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
250 to_spill, before, allow_remat ? "" : " not"));
254 * Returns the point at which you can insert a node that should be executed
255 * before block @p block when coming from pred @p pos.
257 static ir_node *get_block_insertion_point(ir_node *block, int pos) {
258 ir_node *predblock, *last;
260 /* simply add the reload to the beginning of the block if we only have 1
261 * predecessor. We don't need to check for phis as there can't be any in a
262 * block with only 1 pred. */
263 if(get_Block_n_cfgpreds(block) == 1) {
264 assert(!is_Phi(sched_first(block)));
265 return sched_first(block);
268 /* We have to reload the value in pred-block */
269 predblock = get_Block_cfgpred_block(block, pos);
270 last = sched_last(predblock);
272 /* we might have projs and keepanys behind the jump... */
273 while(is_Proj(last) || be_is_Keep(last)) {
274 last = sched_prev(last);
275 assert(!sched_is_end(last));
279 last = sched_next(last);
280 // last node must be a cfop, only exception is the start block
281 assert(last == get_irg_start_block(get_irn_irg(block)));
284 // add the reload before the (cond-)jump
288 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos,
289 const arch_register_class_t *reload_cls, int allow_remat)
291 ir_node *before = get_block_insertion_point(block, pos);
292 be_add_reload(env, to_spill, before, reload_cls, allow_remat);
295 void be_spill_phi(spill_env_t *env, ir_node *node) {
299 assert(is_Phi(node));
301 ir_nodeset_insert(&env->mem_phis, node);
303 // create spillinfos for the phi arguments
304 spill = get_spillinfo(env, node);
305 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
306 ir_node *arg = get_irn_n(node, i);
307 get_spillinfo(env, arg);
310 // if we had a spill for the phi value before, then remove this spill from
311 // schedule, as we will remove it in the insert spill/reload phase
312 if(spill->spill != NULL && !is_Phi(spill->spill)) {
313 assert(spill->old_spill == NULL);
314 spill->old_spill = spill->spill;
321 * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___
322 * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
323 * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \
324 * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
329 * Schedules a node after an instruction. That is the place after all projs and
330 * phis that are scheduled after the instruction. This function also skips phi
331 * nodes at the beginning of a block
333 static void sched_add_after_insn(ir_node *sched_after, ir_node *node) {
334 ir_node *next = sched_next(sched_after);
335 while(is_Proj(next) || is_Phi(next)) {
336 next = sched_next(next);
338 assert(next != NULL);
340 if(sched_is_end(next)) {
341 sched_add_after(sched_last(get_nodes_block(sched_after)), node);
343 sched_add_before(next, node);
350 * @param senv the spill environment
351 * @param irn the node that should be spilled
352 * @param ctx_irn an user of the spilled node
354 * @return a be_Spill node
356 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) {
357 optimization_state_t opt;
358 ir_node *to_spill = spillinfo->spilled_node;
360 DBG((env->dbg, LEVEL_1, "spilling %+F ... ", to_spill));
362 /* Trying to spill an already spilled value, no need for a new spill
363 * node then, we can simply connect to the same one for this reload
365 * Normally reloads get simply rematerialized instead of spilled again; this
366 * can happen annyway when the reload is the pred of a phi to spill)
368 if (be_is_Reload(to_spill)) {
369 spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem);
370 DB((env->dbg, LEVEL_1, "skip reload, using existing spill %+F\n", spillinfo->spill));
374 assert(!(arch_irn_is(env->arch_env, to_spill, dont_spill)
375 && "Attempt to spill a node marked 'dont_spill'"));
377 /* some backends have virtual noreg/unknown nodes that are not scheduled */
378 if(!sched_is_scheduled(to_spill)) {
379 spillinfo->spill = new_NoMem();
385 We switch on optimizations here to get CSE. This is needed as some
386 backends have some extra spill phases and we want to make use of those
387 spills instead of creating new ones.
389 save_optimization_state(&opt);
391 spillinfo->spill = be_spill(env->arch_env, to_spill);
392 restore_optimization_state(&opt);
393 if (! sched_is_scheduled(spillinfo->spill)) {
394 DB((env->dbg, LEVEL_1, "add spill %+F after %+F\n", spillinfo->spill, to_spill));
395 sched_add_after_insn(to_spill, spillinfo->spill);
397 DB((env->dbg, LEVEL_1, "re-using spill %+F after %+F\n", spillinfo->spill, to_spill));
401 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
404 * If the first usage of a Phi result would be out of memory
405 * there is no sense in allocating a register for it.
406 * Thus we spill it and all its operands to the same spill slot.
407 * Therefore the phi/dataB becomes a phi/Memory
409 * @param senv the spill environment
410 * @param phi the Phi node that should be spilled
411 * @param ctx_irn an user of the spilled node
413 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) {
414 ir_node *phi = spillinfo->spilled_node;
416 int arity = get_irn_arity(phi);
417 ir_node *block = get_nodes_block(phi);
422 DBG((env->dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
423 /* build a new PhiM */
424 ins = alloca(sizeof(ir_node*) * arity);
425 for(i = 0; i < arity; ++i) {
426 ins[i] = get_irg_bad(env->irg);
428 spillinfo->spill = new_r_Phi(env->irg, block, arity, ins, mode_M);
430 for(i = 0; i < arity; ++i) {
431 ir_node *arg = get_irn_n(phi, i);
432 spill_info_t *arg_info = get_spillinfo(env, arg);
434 spill_node(env, arg_info);
436 set_irn_n(spillinfo->spill, i, arg_info->spill);
438 DBG((env->dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi, spillinfo->spill));
440 // rewire reloads from old_spill to phi
441 if (spillinfo->old_spill != NULL) {
442 const ir_edge_t *edge, *next;
443 ir_node *old_spill = spillinfo->old_spill;
445 DBG((env->dbg, LEVEL_1, "old spill found, rewiring reloads:\n"));
447 foreach_out_edge_safe(old_spill, edge, next) {
448 ir_node *reload = get_edge_src_irn(edge);
449 int pos = get_edge_src_pos(edge);
451 DBG((env->dbg, LEVEL_1, "\tset input %d of %+F to %+F\n", pos, reload, spillinfo->spill));
453 assert(be_is_Reload(reload) || is_Phi(reload));
454 set_irn_n(reload, pos, spillinfo->spill);
456 DBG((env->dbg, LEVEL_1, "\tset input of %+F to BAD\n", old_spill));
457 set_irn_n(old_spill, be_pos_Spill_val, new_Bad());
458 //sched_remove(old_spill);
459 spillinfo->old_spill = NULL;
466 * @param senv the spill environment
467 * @param to_spill the node that should be spilled
469 static void spill_node(spill_env_t *env, spill_info_t *spillinfo) {
472 // the node should be tagged for spilling already...
473 if(spillinfo->spill != NULL)
476 to_spill = spillinfo->spilled_node;
478 if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
479 spill_phi(env, spillinfo);
481 spill_irn(env, spillinfo);
488 * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______
489 * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \
490 * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/
491 * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___|
496 * Tests whether value @p arg is available before node @p reloader
497 * @returns 1 if value is available, 0 otherwise
499 static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) {
500 if(is_Unknown(arg) || arg == new_NoMem())
506 if(arg == get_irg_frame(env->irg))
509 // hack for now (happens when command should be inserted at end of block)
510 if(is_Block(reloader)) {
515 * Ignore registers are always available
517 if(arch_irn_is(env->arch_env, arg, ignore)) {
522 /* the following test does not work while spilling,
523 * because the liveness info is not adapted yet to the effects of the
524 * additional spills/reloads.
527 /* we want to remat before the insn reloader
528 * thus an arguments is alive if
529 * - it interferes with the reloaders result
530 * - or it is (last-) used by reloader itself
532 if (values_interfere(env->birg->lv, reloader, arg)) {
536 arity = get_irn_arity(reloader);
537 for (i = 0; i < arity; ++i) {
538 ir_node *rel_arg = get_irn_n(reloader, i);
548 * Checks whether the node can principally be rematerialized
550 static int is_remat_node(spill_env_t *env, ir_node *node) {
551 const arch_env_t *arch_env = env->arch_env;
553 assert(!be_is_Spill(node));
555 if(arch_irn_is(arch_env, node, rematerializable)) {
559 if(be_is_StackParam(node))
566 * Check if a node is rematerializable. This tests for the following conditions:
568 * - The node itself is rematerializable
569 * - All arguments of the node are available or also rematerialisable
570 * - The costs for the rematerialisation operation is less or equal a limit
572 * Returns the costs needed for rematerialisation or something
573 * > REMAT_COST_LIMIT if remat is not possible.
575 static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_node *reloader, int parentcosts) {
580 if(!is_remat_node(env, spilled))
581 return REMAT_COST_LIMIT;
583 if(be_is_Reload(spilled)) {
586 costs += arch_get_op_estimated_cost(env->arch_env, spilled);
588 if(parentcosts + costs >= REMAT_COST_LIMIT) {
589 return REMAT_COST_LIMIT;
593 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
594 ir_node *arg = get_irn_n(spilled, i);
596 if(is_value_available(env, arg, reloader))
599 // we have to rematerialize the argument as well...
601 /* we only support rematerializing 1 argument at the moment,
602 * so that we don't have to care about register pressure
604 return REMAT_COST_LIMIT;
608 costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs);
609 if(parentcosts + costs >= REMAT_COST_LIMIT)
610 return REMAT_COST_LIMIT;
616 static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
617 int costs = check_remat_conditions_costs(env, spilled, reloader, 0);
619 return costs < REMAT_COST_LIMIT;
623 * Re-materialize a node.
625 * @param senv the spill environment
626 * @param spilled the node that was spilled
627 * @param reloader a irn that requires a reload
629 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
635 if(is_Block(reloader)) {
638 bl = get_nodes_block(reloader);
641 ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
642 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
643 ir_node *arg = get_irn_n(spilled, i);
645 if(is_value_available(env, arg, reloader)) {
648 ins[i] = do_remat(env, arg, reloader);
652 /* create a copy of the node */
653 res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
655 get_irn_mode(spilled),
656 get_irn_arity(spilled),
658 copy_node_attr(spilled, res);
659 new_backedge_info(res);
662 DBG((env->dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
664 /* insert in schedule */
665 sched_add_before(reloader, res);
670 int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) {
671 spill_info_t *spill_info;
674 // is the node rematerializable?
675 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
676 if(costs < REMAT_COST_LIMIT)
680 // do we already have a spill?
681 spill_info = find_spillinfo(env, to_spill);
682 if(spill_info != NULL && spill_info->spill != NULL)
683 return env->reload_cost;
685 return env->spill_cost + env->reload_cost;
688 int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
689 ir_node *before = get_block_insertion_point(block, pos);
690 return be_get_reload_costs(env, to_spill, before);
695 * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
696 * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
697 * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \
698 * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
702 void be_insert_spills_reloads(spill_env_t *env) {
703 const arch_env_t *arch_env = env->arch_env;
708 ir_nodeset_iterator_t iter;
711 /* create all phi-ms first, this is needed so, that phis, hanging on
712 spilled phis work correctly */
713 foreach_ir_nodeset(&env->mem_phis, node, iter) {
714 spill_info_t *info = get_spillinfo(env, node);
715 spill_node(env, info);
718 /* process each spilled node */
719 for (si = set_first(env->spills); si; si = set_next(env->spills)) {
721 ir_node *spilled_node = si->spilled_node;
722 ir_mode *mode = get_irn_mode(spilled_node);
723 ir_node **copies = NEW_ARR_F(ir_node*, 0);
725 DBG((env->dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", spilled_node));
727 /* go through all reloads for this spill */
728 for (rld = si->reloaders; rld; rld = rld->next) {
729 ir_node *copy; /* a reload is a "copy" of the original value */
731 if (rld->rematted_node != NULL) {
732 copy = rld->rematted_node;
734 sched_add_before(rld->reloader, copy);
735 } else if (be_do_remats && rld->allow_remat &&
736 check_remat_conditions(env, spilled_node, rld->reloader)) {
737 copy = do_remat(env, spilled_node, rld->reloader);
740 /* make sure we have a spill */
741 if (si->spill == NULL) {
746 /* create a reload */
747 copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode, si->spill);
751 DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n",
752 copy, spilled_node, rld->reloader));
753 ARR_APP1(ir_node*, copies, copy);
756 /* if we had any reloads or remats, then we need to reconstruct the
757 * SSA form for the spilled value */
758 if (ARR_LEN(copies) > 0) {
759 be_ssa_construction_env_t senv;
760 /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
762 be_ssa_construction_init(&senv, env->birg);
763 be_ssa_construction_add_copy(&senv, spilled_node);
764 be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
765 be_ssa_construction_fix_users(&senv, spilled_node);
768 /* no need to enable this as long as we invalidate liveness
769 after this function... */
770 be_ssa_construction_update_liveness_phis(&senv);
771 be_liveness_update(spilled_node);
772 len = ARR_LEN(copies);
773 for(i = 0; i < len; ++i) {
774 be_liveness_update(lv, copies[i]);
777 be_ssa_construction_destroy(&senv);
781 si->reloaders = NULL;
784 #ifdef FIRM_STATISTICS
785 if (be_stat_ev_is_active()) {
786 be_stat_ev("spill_spills", spills);
787 be_stat_ev("spill_reloads", reloads);
788 be_stat_ev("spill_remats", remats);
790 #endif /* FIRM_STATISTICS */
792 be_remove_dead_nodes_from_schedule(env->irg);
793 /* Matze: In theory be_ssa_construction should take care of the liveness...
794 * try to disable this again in the future */
795 be_invalidate_liveness(env->birg);