2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @author Daniel Grund, Sebastian Hack, Matthias Braun
25 * Copyright: (c) Universitaet Karlsruhe
26 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
37 #include "iredges_t.h"
38 #include "irbackedge_t.h"
48 #include "irnodeset.h"
51 #include "besched_t.h"
55 #include "bechordal_t.h"
56 #include "bejavacoal.h"
57 #include "benodesets.h"
58 #include "bespilloptions.h"
59 #include "bestatevent.h"
60 #include "bessaconstr.h"
62 // only rematerialise when costs are less than REMAT_COST_LIMIT
63 // TODO determine a good value here...
64 #define REMAT_COST_LIMIT 10
66 typedef struct _reloader_t reloader_t;
71 ir_node *rematted_node;
72 int allow_remat; /**< the node may be rematted instead of reloaded if global remat option is on */
75 typedef struct _spill_info_t {
76 /** the value that should get spilled */
77 ir_node *spilled_node;
78 /** list of places where the value should get reloaded */
79 reloader_t *reloaders;
81 /** the spill node, or a PhiM node */
83 /** if we had the value of a phi spilled before but not the phi itself then
84 * this field contains the spill for the phi value */
87 /** the register class in which the reload should be placed */
88 const arch_register_class_t *reload_cls;
92 const arch_env_t *arch_env;
96 int spill_cost; /**< the cost of a single spill node */
97 int reload_cost; /**< the cost of a reload node */
98 set *spills; /**< all spill_info_t's, which must be placed */
99 ir_nodeset_t mem_phis; /**< set of all special spilled phis. allocated and freed separately */
101 DEBUG_ONLY(firm_dbg_module_t *dbg;)
105 * Compare two spill infos.
107 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
108 const spill_info_t *xx = x;
109 const spill_info_t *yy = y;
110 return xx->spilled_node != yy->spilled_node;
114 * Returns spill info for a specific value (returns NULL if the info doesn't
117 static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) {
119 int hash = nodeset_hash(value);
121 info.spilled_node = value;
123 return set_find(env->spills, &info, sizeof(info), hash);
127 * Returns spill info for a specific value (the value that is to be spilled)
129 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) {
130 spill_info_t info, *res;
131 int hash = nodeset_hash(value);
133 info.spilled_node = value;
134 res = set_find(env->spills, &info, sizeof(info), hash);
137 info.reloaders = NULL;
139 info.old_spill = NULL;
140 info.reload_cls = NULL;
141 res = set_insert(env->spills, &info, sizeof(info), hash);
148 /* Sets the debug module of a spill environment. */
149 void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
154 /* Creates a new spill environment. */
155 spill_env_t *be_new_spill_env(be_irg_t *birg) {
156 spill_env_t *env = xmalloc(sizeof(env[0]));
157 env->spills = new_set(cmp_spillinfo, 1024);
158 env->irg = be_get_birg_irg(birg);
160 env->arch_env = birg->main_env->arch_env;
161 ir_nodeset_init(&env->mem_phis);
162 // TODO, ask backend about costs..., or even ask backend whether we should
165 env->reload_cost = 5;
166 obstack_init(&env->obst);
170 /* Deletes a spill environment. */
171 void be_delete_spill_env(spill_env_t *env) {
172 del_set(env->spills);
173 ir_nodeset_destroy(&env->mem_phis);
174 obstack_free(&env->obst, NULL);
180 * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___
181 * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
182 * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \
183 * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
187 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before, ir_node *rematted_node) {
188 spill_info_t *spill_info;
189 reloader_t *reloader;
191 spill_info = get_spillinfo(env, to_spill);
193 /* add the remat information */
194 reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
195 reloader->next = spill_info->reloaders;
196 reloader->reloader = before;
197 reloader->rematted_node = rematted_node;
198 reloader->allow_remat = 1;
200 spill_info->reloaders = reloader;
202 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
206 void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before,
207 const arch_register_class_t *reload_cls, int allow_remat)
212 info = get_spillinfo(env, to_spill);
214 if (is_Phi(to_spill)) {
217 /* create spillinfos for the phi arguments */
218 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
219 ir_node *arg = get_irn_n(to_spill, i);
220 get_spillinfo(env, arg);
224 // hackery... sometimes the morgan algo spilled the value of a phi,
225 // the belady algo decides later to spill the whole phi, then sees the
226 // spill node and adds a reload for that spill node, problem is the
227 // reload gets attach to that same spill (and is totally unnecessary)
228 if (info->old_spill != NULL &&
229 (before == info->old_spill || value_dominates(before, info->old_spill)))
231 printf("spilledphi hack was needed...\n");
232 before = sched_next(info->old_spill);
237 /* put reload into list */
238 rel = obstack_alloc(&env->obst, sizeof(rel[0]));
239 rel->next = info->reloaders;
240 rel->reloader = before;
241 rel->rematted_node = NULL;
242 rel->allow_remat = allow_remat;
244 info->reloaders = rel;
245 assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
246 info->reload_cls = reload_cls;
248 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
249 to_spill, before, allow_remat ? "" : " not"));
253 * Returns the point at which you can insert a node that should be executed
254 * before block @p block when coming from pred @p pos.
256 static ir_node *get_block_insertion_point(ir_node *block, int pos) {
257 ir_node *predblock, *last;
259 /* simply add the reload to the beginning of the block if we only have 1
260 * predecessor. We don't need to check for phis as there can't be any in a
261 * block with only 1 pred. */
262 if(get_Block_n_cfgpreds(block) == 1) {
263 assert(!is_Phi(sched_first(block)));
264 return sched_first(block);
267 /* We have to reload the value in pred-block */
268 predblock = get_Block_cfgpred_block(block, pos);
269 last = sched_last(predblock);
271 /* we might have projs and keepanys behind the jump... */
272 while(is_Proj(last) || be_is_Keep(last)) {
273 last = sched_prev(last);
274 assert(!sched_is_end(last));
278 last = sched_next(last);
279 // last node must be a cfop, only exception is the start block
280 assert(last == get_irg_start_block(get_irn_irg(block)));
283 // add the reload before the (cond-)jump
287 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos,
288 const arch_register_class_t *reload_cls, int allow_remat)
290 ir_node *before = get_block_insertion_point(block, pos);
291 be_add_reload(env, to_spill, before, reload_cls, allow_remat);
294 void be_spill_phi(spill_env_t *env, ir_node *node) {
298 assert(is_Phi(node));
300 ir_nodeset_insert(&env->mem_phis, node);
302 // create spillinfos for the phi arguments
303 spill = get_spillinfo(env, node);
304 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
305 ir_node *arg = get_irn_n(node, i);
306 get_spillinfo(env, arg);
309 // if we had a spill for the phi value before, then remove this spill from
310 // schedule, as we will remove it in the insert spill/reload phase
311 if(spill->spill != NULL && !is_Phi(spill->spill)) {
312 assert(spill->old_spill == NULL);
313 spill->old_spill = spill->spill;
320 * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___
321 * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
322 * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \
323 * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
328 * Schedules a node after an instruction. That is the place after all projs and
329 * phis that are scheduled after the instruction. This function also skips phi
330 * nodes at the beginning of a block
332 static void sched_add_after_insn(ir_node *sched_after, ir_node *node) {
333 ir_node *next = sched_next(sched_after);
334 while(is_Proj(next) || is_Phi(next)) {
335 next = sched_next(next);
337 assert(next != NULL);
339 if(sched_is_end(next)) {
340 sched_add_after(sched_last(get_nodes_block(sched_after)), node);
342 sched_add_before(next, node);
349 * @param senv the spill environment
350 * @param irn the node that should be spilled
351 * @param ctx_irn an user of the spilled node
353 * @return a be_Spill node
355 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) {
356 optimization_state_t opt;
357 ir_node *to_spill = spillinfo->spilled_node;
359 DBG((env->dbg, LEVEL_1, "spilling %+F ... ", to_spill));
361 /* Trying to spill an already spilled value, no need for a new spill
362 * node then, we can simply connect to the same one for this reload
364 * Normally reloads get simply rematerialized instead of spilled again; this
365 * can happen annyway when the reload is the pred of a phi to spill)
367 if (be_is_Reload(to_spill)) {
368 spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem);
369 DB((env->dbg, LEVEL_1, "skip reload, using existing spill %+F\n", spillinfo->spill));
373 assert(!(arch_irn_is(env->arch_env, to_spill, dont_spill)
374 && "Attempt to spill a node marked 'dont_spill'"));
376 /* some backends have virtual noreg/unknown nodes that are not scheduled */
377 if(!sched_is_scheduled(to_spill)) {
378 spillinfo->spill = new_NoMem();
384 We switch on optimizations here to get CSE. This is needed as some
385 backends have some extra spill phases and we want to make use of those
386 spills instead of creating new ones.
388 save_optimization_state(&opt);
390 spillinfo->spill = be_spill(env->arch_env, to_spill);
391 restore_optimization_state(&opt);
392 if (! sched_is_scheduled(spillinfo->spill)) {
393 DB((env->dbg, LEVEL_1, "add spill %+F after %+F\n", spillinfo->spill, to_spill));
394 sched_add_after_insn(to_spill, spillinfo->spill);
396 DB((env->dbg, LEVEL_1, "re-using spill %+F after %+F\n", spillinfo->spill, to_spill));
400 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
403 * If the first usage of a Phi result would be out of memory
404 * there is no sense in allocating a register for it.
405 * Thus we spill it and all its operands to the same spill slot.
406 * Therefore the phi/dataB becomes a phi/Memory
408 * @param senv the spill environment
409 * @param phi the Phi node that should be spilled
410 * @param ctx_irn an user of the spilled node
412 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) {
413 ir_node *phi = spillinfo->spilled_node;
415 int arity = get_irn_arity(phi);
416 ir_node *block = get_nodes_block(phi);
421 DBG((env->dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
422 /* build a new PhiM */
423 ins = alloca(sizeof(ir_node*) * arity);
424 for(i = 0; i < arity; ++i) {
425 ins[i] = get_irg_bad(env->irg);
427 spillinfo->spill = new_r_Phi(env->irg, block, arity, ins, mode_M);
429 for(i = 0; i < arity; ++i) {
430 ir_node *arg = get_irn_n(phi, i);
431 spill_info_t *arg_info = get_spillinfo(env, arg);
433 spill_node(env, arg_info);
435 set_irn_n(spillinfo->spill, i, arg_info->spill);
437 DBG((env->dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi, spillinfo->spill));
439 // rewire reloads from old_spill to phi
440 if (spillinfo->old_spill != NULL) {
441 const ir_edge_t *edge, *next;
442 ir_node *old_spill = spillinfo->old_spill;
444 DBG((env->dbg, LEVEL_1, "old spill found, rewiring reloads:\n"));
446 foreach_out_edge_safe(old_spill, edge, next) {
447 ir_node *reload = get_edge_src_irn(edge);
448 int pos = get_edge_src_pos(edge);
450 DBG((env->dbg, LEVEL_1, "\tset input %d of %+F to %+F\n", pos, reload, spillinfo->spill));
452 assert(be_is_Reload(reload) || is_Phi(reload));
453 set_irn_n(reload, pos, spillinfo->spill);
455 DBG((env->dbg, LEVEL_1, "\tset input of %+F to BAD\n", old_spill));
456 set_irn_n(old_spill, be_pos_Spill_val, new_Bad());
457 //sched_remove(old_spill);
458 spillinfo->old_spill = NULL;
465 * @param senv the spill environment
466 * @param to_spill the node that should be spilled
468 static void spill_node(spill_env_t *env, spill_info_t *spillinfo) {
471 // the node should be tagged for spilling already...
472 if(spillinfo->spill != NULL)
475 to_spill = spillinfo->spilled_node;
477 if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
478 spill_phi(env, spillinfo);
480 spill_irn(env, spillinfo);
487 * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______
488 * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \
489 * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/
490 * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___|
495 * Tests whether value @p arg is available before node @p reloader
496 * @returns 1 if value is available, 0 otherwise
498 static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) {
499 if(is_Unknown(arg) || arg == new_NoMem())
505 if(arg == get_irg_frame(env->irg))
508 // hack for now (happens when command should be inserted at end of block)
509 if(is_Block(reloader)) {
514 * Ignore registers are always available
516 if(arch_irn_is(env->arch_env, arg, ignore)) {
521 /* the following test does not work while spilling,
522 * because the liveness info is not adapted yet to the effects of the
523 * additional spills/reloads.
526 /* we want to remat before the insn reloader
527 * thus an arguments is alive if
528 * - it interferes with the reloaders result
529 * - or it is (last-) used by reloader itself
531 if (values_interfere(env->birg->lv, reloader, arg)) {
535 arity = get_irn_arity(reloader);
536 for (i = 0; i < arity; ++i) {
537 ir_node *rel_arg = get_irn_n(reloader, i);
547 * Checks whether the node can principally be rematerialized
549 static int is_remat_node(spill_env_t *env, ir_node *node) {
550 const arch_env_t *arch_env = env->arch_env;
552 assert(!be_is_Spill(node));
554 if(arch_irn_is(arch_env, node, rematerializable)) {
558 if(be_is_StackParam(node))
565 * Check if a node is rematerializable. This tests for the following conditions:
567 * - The node itself is rematerializable
568 * - All arguments of the node are available or also rematerialisable
569 * - The costs for the rematerialisation operation is less or equal a limit
571 * Returns the costs needed for rematerialisation or something
572 * > REMAT_COST_LIMIT if remat is not possible.
574 static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_node *reloader, int parentcosts) {
579 if(!is_remat_node(env, spilled))
580 return REMAT_COST_LIMIT;
582 if(be_is_Reload(spilled)) {
585 costs += arch_get_op_estimated_cost(env->arch_env, spilled);
587 if(parentcosts + costs >= REMAT_COST_LIMIT) {
588 return REMAT_COST_LIMIT;
592 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
593 ir_node *arg = get_irn_n(spilled, i);
595 if(is_value_available(env, arg, reloader))
598 // we have to rematerialize the argument as well...
600 /* we only support rematerializing 1 argument at the moment,
601 * so that we don't have to care about register pressure
603 return REMAT_COST_LIMIT;
607 costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs);
608 if(parentcosts + costs >= REMAT_COST_LIMIT)
609 return REMAT_COST_LIMIT;
615 static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
616 int costs = check_remat_conditions_costs(env, spilled, reloader, 0);
618 return costs < REMAT_COST_LIMIT;
622 * Re-materialize a node.
624 * @param senv the spill environment
625 * @param spilled the node that was spilled
626 * @param reloader a irn that requires a reload
628 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
634 if(is_Block(reloader)) {
637 bl = get_nodes_block(reloader);
640 ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
641 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
642 ir_node *arg = get_irn_n(spilled, i);
644 if(is_value_available(env, arg, reloader)) {
647 ins[i] = do_remat(env, arg, reloader);
651 /* create a copy of the node */
652 res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
654 get_irn_mode(spilled),
655 get_irn_arity(spilled),
657 copy_node_attr(spilled, res);
658 new_backedge_info(res);
661 DBG((env->dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
663 /* insert in schedule */
664 sched_add_before(reloader, res);
669 int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) {
670 spill_info_t *spill_info;
673 // is the node rematerializable?
674 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
675 if(costs < REMAT_COST_LIMIT)
679 // do we already have a spill?
680 spill_info = find_spillinfo(env, to_spill);
681 if(spill_info != NULL && spill_info->spill != NULL)
682 return env->reload_cost;
684 return env->spill_cost + env->reload_cost;
687 int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
688 ir_node *before = get_block_insertion_point(block, pos);
689 return be_get_reload_costs(env, to_spill, before);
694 * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
695 * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
696 * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \
697 * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
701 void be_insert_spills_reloads(spill_env_t *env) {
702 const arch_env_t *arch_env = env->arch_env;
707 ir_nodeset_iterator_t iter;
710 /* create all phi-ms first, this is needed so, that phis, hanging on
711 spilled phis work correctly */
712 foreach_ir_nodeset(&env->mem_phis, node, iter) {
713 spill_info_t *info = get_spillinfo(env, node);
714 spill_node(env, info);
717 /* process each spilled node */
718 for (si = set_first(env->spills); si; si = set_next(env->spills)) {
720 ir_node *spilled_node = si->spilled_node;
721 ir_mode *mode = get_irn_mode(spilled_node);
722 ir_node **copies = NEW_ARR_F(ir_node*, 0);
724 DBG((env->dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", spilled_node));
726 /* go through all reloads for this spill */
727 for (rld = si->reloaders; rld; rld = rld->next) {
728 ir_node *copy; /* a reaload is a "copy" of the original value */
730 if (rld->rematted_node != NULL) {
731 copy = rld->rematted_node;
733 sched_add_before(rld->reloader, copy);
734 } else if (be_do_remats && rld->allow_remat &&
735 check_remat_conditions(env, spilled_node, rld->reloader)) {
736 copy = do_remat(env, spilled_node, rld->reloader);
739 /* make sure we have a spill */
740 if (si->spill == NULL) {
745 /* create a reload */
746 copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode, si->spill);
750 DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n",
751 copy, spilled_node, rld->reloader));
752 ARR_APP1(ir_node*, copies, copy);
755 /* if we had any reloads or remats, then we need to reconstruct the
756 * SSA form for the spilled value */
757 if (ARR_LEN(copies) > 0) {
758 be_ssa_construction_env_t senv;
759 /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
761 be_ssa_construction_init(&senv, env->birg);
762 be_ssa_construction_add_copy(&senv, spilled_node);
763 be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
764 be_ssa_construction_fix_users(&senv, spilled_node);
767 /* no need to enable this as long as we invalidate liveness
768 after this function... */
769 be_ssa_construction_update_liveness_phis(&senv);
770 be_liveness_update(spilled_node);
771 len = ARR_LEN(copies);
772 for(i = 0; i < len; ++i) {
773 be_liveness_update(lv, copies[i]);
776 be_ssa_construction_destroy(&senv);
780 si->reloaders = NULL;
783 #ifdef FIRM_STATISTICS
784 if (be_stat_ev_is_active()) {
785 be_stat_ev("spill_spills", spills);
786 be_stat_ev("spill_reloads", reloads);
787 be_stat_ev("spill_remats", remats);
789 #endif /* FIRM_STATISTICS */
791 be_remove_dead_nodes_from_schedule(env->irg);
792 /* Matze: In theory be_ssa_construction should take care of the livenes...
793 * try to disable this again in the future */
794 be_invalidate_liveness(env->birg);