3 * @author Daniel Grund, Sebastian Hack, Matthias Braun
6 * Copyright: (c) Universitaet Karlsruhe
7 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 #include "iredges_t.h"
19 #include "irbackedge_t.h"
29 #include "irnodeset.h"
32 #include "besched_t.h"
36 #include "bechordal_t.h"
37 #include "bejavacoal.h"
38 #include "benodesets.h"
39 #include "bespilloptions.h"
40 #include "bestatevent.h"
41 #include "bessaconstr.h"
43 // only rematerialise when costs are less than REMAT_COST_LIMIT
44 // TODO determine a good value here...
45 #define REMAT_COST_LIMIT 10
47 typedef struct _reloader_t reloader_t;
52 ir_node *rematted_node;
53 int allow_remat; /**< the node may be rematted instead of reloaded if global remat option is on */
56 typedef struct _spill_info_t {
57 /** the value that should get spilled */
58 ir_node *spilled_node;
59 /** list of places where the value should get reloaded */
60 reloader_t *reloaders;
62 /** the spill node, or a PhiM node */
64 /** if we had the value of a phi spilled before but not the phi itself then
65 * this field contains the spill for the phi value */
68 /** the register class in which the reload should be placed */
69 const arch_register_class_t *reload_cls;
73 const arch_env_t *arch_env;
77 int spill_cost; /**< the cost of a single spill node */
78 int reload_cost; /**< the cost of a reload node */
79 set *spills; /**< all spill_info_t's, which must be placed */
80 ir_nodeset_t mem_phis; /**< set of all special spilled phis. allocated and freed separately */
82 DEBUG_ONLY(firm_dbg_module_t *dbg;)
86 * Compare two spill infos.
88 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
89 const spill_info_t *xx = x;
90 const spill_info_t *yy = y;
91 return xx->spilled_node != yy->spilled_node;
95 * Returns spill info for a specific value (returns NULL if the info doesn't
98 static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) {
100 int hash = nodeset_hash(value);
102 info.spilled_node = value;
104 return set_find(env->spills, &info, sizeof(info), hash);
108 * Returns spill info for a specific value (the value that is to be spilled)
110 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) {
111 spill_info_t info, *res;
112 int hash = nodeset_hash(value);
114 info.spilled_node = value;
115 res = set_find(env->spills, &info, sizeof(info), hash);
118 info.reloaders = NULL;
120 info.old_spill = NULL;
121 info.reload_cls = NULL;
122 res = set_insert(env->spills, &info, sizeof(info), hash);
129 /* Sets the debug module of a spill environment. */
130 void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
135 /* Creates a new spill environment. */
136 spill_env_t *be_new_spill_env(be_irg_t *birg) {
137 spill_env_t *env = xmalloc(sizeof(env[0]));
138 env->spills = new_set(cmp_spillinfo, 1024);
139 env->irg = be_get_birg_irg(birg);
141 env->arch_env = birg->main_env->arch_env;
142 ir_nodeset_init(&env->mem_phis);
143 // TODO, ask backend about costs..., or even ask backend whether we should
146 env->reload_cost = 5;
147 obstack_init(&env->obst);
151 /* Deletes a spill environment. */
152 void be_delete_spill_env(spill_env_t *env) {
153 del_set(env->spills);
154 ir_nodeset_destroy(&env->mem_phis);
155 obstack_free(&env->obst, NULL);
161 * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___
162 * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
163 * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \
164 * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
168 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before, ir_node *rematted_node) {
169 spill_info_t *spill_info;
170 reloader_t *reloader;
172 spill_info = get_spillinfo(env, to_spill);
174 /* add the remat information */
175 reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
176 reloader->next = spill_info->reloaders;
177 reloader->reloader = before;
178 reloader->rematted_node = rematted_node;
179 reloader->allow_remat = 1;
181 spill_info->reloaders = reloader;
183 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
187 void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before,
188 const arch_register_class_t *reload_cls, int allow_remat)
193 info = get_spillinfo(env, to_spill);
195 if (is_Phi(to_spill)) {
198 /* create spillinfos for the phi arguments */
199 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
200 ir_node *arg = get_irn_n(to_spill, i);
201 get_spillinfo(env, arg);
205 // hackery... sometimes the morgan algo spilled the value of a phi,
206 // the belady algo decides later to spill the whole phi, then sees the
207 // spill node and adds a reload for that spill node, problem is the
208 // reload gets attach to that same spill (and is totally unnecessary)
209 if (info->old_spill != NULL &&
210 (before == info->old_spill || value_dominates(before, info->old_spill)))
212 printf("spilledphi hack was needed...\n");
213 before = sched_next(info->old_spill);
218 /* put reload into list */
219 rel = obstack_alloc(&env->obst, sizeof(rel[0]));
220 rel->next = info->reloaders;
221 rel->reloader = before;
222 rel->rematted_node = NULL;
223 rel->allow_remat = allow_remat;
225 info->reloaders = rel;
226 assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
227 info->reload_cls = reload_cls;
229 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
230 to_spill, before, allow_remat ? "" : " not"));
234 * Returns the point at which you can insert a node that should be executed
235 * before block @p block when coming from pred @p pos.
237 static ir_node *get_block_insertion_point(ir_node *block, int pos) {
238 ir_node *predblock, *last;
240 /* simply add the reload to the beginning of the block if we only have 1
241 * predecessor. We don't need to check for phis as there can't be any in a
242 * block with only 1 pred. */
243 if(get_Block_n_cfgpreds(block) == 1) {
244 assert(!is_Phi(sched_first(block)));
245 return sched_first(block);
248 /* We have to reload the value in pred-block */
249 predblock = get_Block_cfgpred_block(block, pos);
250 last = sched_last(predblock);
252 /* we might have projs and keepanys behind the jump... */
253 while(is_Proj(last) || be_is_Keep(last)) {
254 last = sched_prev(last);
255 assert(!sched_is_end(last));
259 last = sched_next(last);
260 // last node must be a cfop, only exception is the start block
261 assert(last == get_irg_start_block(get_irn_irg(block)));
264 // add the reload before the (cond-)jump
268 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos,
269 const arch_register_class_t *reload_cls, int allow_remat)
271 ir_node *before = get_block_insertion_point(block, pos);
272 be_add_reload(env, to_spill, before, reload_cls, allow_remat);
275 void be_spill_phi(spill_env_t *env, ir_node *node) {
279 assert(is_Phi(node));
281 ir_nodeset_insert(&env->mem_phis, node);
283 // create spillinfos for the phi arguments
284 spill = get_spillinfo(env, node);
285 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
286 ir_node *arg = get_irn_n(node, i);
287 get_spillinfo(env, arg);
290 // if we had a spill for the phi value before, then remove this spill from
291 // schedule, as we will remove it in the insert spill/reload phase
292 if(spill->spill != NULL && !is_Phi(spill->spill)) {
293 assert(spill->old_spill == NULL);
294 spill->old_spill = spill->spill;
301 * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___
302 * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
303 * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \
304 * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
309 * Schedules a node after an instruction. That is the place after all projs and
310 * phis that are scheduled after the instruction. This function also skips phi
311 * nodes at the beginning of a block
313 static void sched_add_after_insn(ir_node *sched_after, ir_node *node) {
314 ir_node *next = sched_next(sched_after);
315 while(is_Proj(next) || is_Phi(next)) {
316 next = sched_next(next);
318 assert(next != NULL);
320 if(sched_is_end(next)) {
321 sched_add_after(sched_last(get_nodes_block(sched_after)), node);
323 sched_add_before(next, node);
330 * @param senv the spill environment
331 * @param irn the node that should be spilled
332 * @param ctx_irn an user of the spilled node
334 * @return a be_Spill node
336 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) {
337 optimization_state_t opt;
338 ir_node *to_spill = spillinfo->spilled_node;
340 DBG((env->dbg, LEVEL_1, "spilling %+F ... ", to_spill));
342 /* Trying to spill an already spilled value, no need for a new spill
343 * node then, we can simply connect to the same one for this reload
345 * Normally reloads get simply rematerialized instead of spilled again; this
346 * can happen annyway when the reload is the pred of a phi to spill)
348 if (be_is_Reload(to_spill)) {
349 spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem);
350 DB((env->dbg, LEVEL_1, "skip reload, using existing spill %+F\n", spillinfo->spill));
354 assert(!(arch_irn_is(env->arch_env, to_spill, dont_spill)
355 && "Attempt to spill a node marked 'dont_spill'"));
357 /* some backends have virtual noreg/unknown nodes that are not scheduled */
358 if(!sched_is_scheduled(to_spill)) {
359 spillinfo->spill = new_NoMem();
365 We switch on optimizations here to get CSE. This is needed as some
366 backends have some extra spill phases and we want to make use of those
367 spills instead of creating new ones.
369 save_optimization_state(&opt);
371 spillinfo->spill = be_spill(env->arch_env, to_spill);
372 restore_optimization_state(&opt);
373 if (! sched_is_scheduled(spillinfo->spill)) {
374 DB((env->dbg, LEVEL_1, "add spill %+F after %+F\n", spillinfo->spill, to_spill));
375 sched_add_after_insn(to_spill, spillinfo->spill);
377 DB((env->dbg, LEVEL_1, "re-using spill %+F after %+F\n", spillinfo->spill, to_spill));
381 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
384 * If the first usage of a Phi result would be out of memory
385 * there is no sense in allocating a register for it.
386 * Thus we spill it and all its operands to the same spill slot.
387 * Therefore the phi/dataB becomes a phi/Memory
389 * @param senv the spill environment
390 * @param phi the Phi node that should be spilled
391 * @param ctx_irn an user of the spilled node
393 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) {
394 ir_node *phi = spillinfo->spilled_node;
396 int arity = get_irn_arity(phi);
397 ir_node *block = get_nodes_block(phi);
402 DBG((env->dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
403 /* build a new PhiM */
404 ins = alloca(sizeof(ir_node*) * arity);
405 for(i = 0; i < arity; ++i) {
406 ins[i] = get_irg_bad(env->irg);
408 spillinfo->spill = new_r_Phi(env->irg, block, arity, ins, mode_M);
410 for(i = 0; i < arity; ++i) {
411 ir_node *arg = get_irn_n(phi, i);
412 spill_info_t *arg_info = get_spillinfo(env, arg);
414 spill_node(env, arg_info);
416 set_irn_n(spillinfo->spill, i, arg_info->spill);
418 DBG((env->dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi, spillinfo->spill));
420 // rewire reloads from old_spill to phi
421 if (spillinfo->old_spill != NULL) {
422 const ir_edge_t *edge, *next;
423 ir_node *old_spill = spillinfo->old_spill;
425 DBG((env->dbg, LEVEL_1, "old spill found, rewiring reloads:\n"));
427 foreach_out_edge_safe(old_spill, edge, next) {
428 ir_node *reload = get_edge_src_irn(edge);
429 int pos = get_edge_src_pos(edge);
431 DBG((env->dbg, LEVEL_1, "\tset input %d of %+F to %+F\n", pos, reload, spillinfo->spill));
433 assert(be_is_Reload(reload) || is_Phi(reload));
434 set_irn_n(reload, pos, spillinfo->spill);
436 DBG((env->dbg, LEVEL_1, "\tset input of %+F to BAD\n", old_spill));
437 set_irn_n(old_spill, be_pos_Spill_val, new_Bad());
438 //sched_remove(old_spill);
439 spillinfo->old_spill = NULL;
446 * @param senv the spill environment
447 * @param to_spill the node that should be spilled
449 static void spill_node(spill_env_t *env, spill_info_t *spillinfo) {
452 // the node should be tagged for spilling already...
453 if(spillinfo->spill != NULL)
456 to_spill = spillinfo->spilled_node;
458 if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
459 spill_phi(env, spillinfo);
461 spill_irn(env, spillinfo);
468 * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______
469 * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \
470 * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/
471 * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___|
476 * Tests whether value @p arg is available before node @p reloader
477 * @returns 1 if value is available, 0 otherwise
479 static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) {
480 if(is_Unknown(arg) || arg == new_NoMem())
486 if(arg == get_irg_frame(env->irg))
489 // hack for now (happens when command should be inserted at end of block)
490 if(is_Block(reloader)) {
495 * Ignore registers are always available
497 if(arch_irn_is(env->arch_env, arg, ignore)) {
502 /* the following test does not work while spilling,
503 * because the liveness info is not adapted yet to the effects of the
504 * additional spills/reloads.
507 /* we want to remat before the insn reloader
508 * thus an arguments is alive if
509 * - it interferes with the reloaders result
510 * - or it is (last-) used by reloader itself
512 if (values_interfere(env->birg->lv, reloader, arg)) {
516 arity = get_irn_arity(reloader);
517 for (i = 0; i < arity; ++i) {
518 ir_node *rel_arg = get_irn_n(reloader, i);
528 * Checks whether the node can principally be rematerialized
530 static int is_remat_node(spill_env_t *env, ir_node *node) {
531 const arch_env_t *arch_env = env->arch_env;
533 assert(!be_is_Spill(node));
535 if(arch_irn_is(arch_env, node, rematerializable)) {
539 if(be_is_StackParam(node))
546 * Check if a node is rematerializable. This tests for the following conditions:
548 * - The node itself is rematerializable
549 * - All arguments of the node are available or also rematerialisable
550 * - The costs for the rematerialisation operation is less or equal a limit
552 * Returns the costs needed for rematerialisation or something
553 * > REMAT_COST_LIMIT if remat is not possible.
555 static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_node *reloader, int parentcosts) {
560 if(!is_remat_node(env, spilled))
561 return REMAT_COST_LIMIT;
563 if(be_is_Reload(spilled)) {
566 costs += arch_get_op_estimated_cost(env->arch_env, spilled);
568 if(parentcosts + costs >= REMAT_COST_LIMIT) {
569 return REMAT_COST_LIMIT;
573 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
574 ir_node *arg = get_irn_n(spilled, i);
576 if(is_value_available(env, arg, reloader))
579 // we have to rematerialize the argument as well...
581 /* we only support rematerializing 1 argument at the moment,
582 * so that we don't have to care about register pressure
584 return REMAT_COST_LIMIT;
588 costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs);
589 if(parentcosts + costs >= REMAT_COST_LIMIT)
590 return REMAT_COST_LIMIT;
596 static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
597 int costs = check_remat_conditions_costs(env, spilled, reloader, 0);
599 return costs < REMAT_COST_LIMIT;
603 * Re-materialize a node.
605 * @param senv the spill environment
606 * @param spilled the node that was spilled
607 * @param reloader a irn that requires a reload
609 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
615 if(is_Block(reloader)) {
618 bl = get_nodes_block(reloader);
621 ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
622 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
623 ir_node *arg = get_irn_n(spilled, i);
625 if(is_value_available(env, arg, reloader)) {
628 ins[i] = do_remat(env, arg, reloader);
632 /* create a copy of the node */
633 res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
635 get_irn_mode(spilled),
636 get_irn_arity(spilled),
638 copy_node_attr(spilled, res);
639 new_backedge_info(res);
642 DBG((env->dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
644 /* insert in schedule */
645 sched_add_before(reloader, res);
650 int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) {
651 spill_info_t *spill_info;
654 // is the node rematerializable?
655 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
656 if(costs < REMAT_COST_LIMIT)
660 // do we already have a spill?
661 spill_info = find_spillinfo(env, to_spill);
662 if(spill_info != NULL && spill_info->spill != NULL)
663 return env->reload_cost;
665 return env->spill_cost + env->reload_cost;
668 int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
669 ir_node *before = get_block_insertion_point(block, pos);
670 return be_get_reload_costs(env, to_spill, before);
675 * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
676 * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
677 * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \
678 * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
682 void be_insert_spills_reloads(spill_env_t *env) {
683 const arch_env_t *arch_env = env->arch_env;
688 ir_nodeset_iterator_t iter;
691 /* create all phi-ms first, this is needed so, that phis, hanging on
692 spilled phis work correctly */
693 foreach_ir_nodeset(&env->mem_phis, node, iter) {
694 spill_info_t *info = get_spillinfo(env, node);
695 spill_node(env, info);
698 /* process each spilled node */
699 for (si = set_first(env->spills); si; si = set_next(env->spills)) {
701 ir_node *spilled_node = si->spilled_node;
702 ir_mode *mode = get_irn_mode(spilled_node);
703 ir_node **copies = NEW_ARR_F(ir_node*, 0);
705 DBG((env->dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", spilled_node));
707 /* go through all reloads for this spill */
708 for (rld = si->reloaders; rld; rld = rld->next) {
709 ir_node *copy; /* a reaload is a "copy" of the original value */
711 if (rld->rematted_node != NULL) {
712 copy = rld->rematted_node;
714 sched_add_before(rld->reloader, copy);
715 } else if (be_do_remats && rld->allow_remat &&
716 check_remat_conditions(env, spilled_node, rld->reloader)) {
717 copy = do_remat(env, spilled_node, rld->reloader);
720 /* make sure we have a spill */
721 if (si->spill == NULL) {
726 /* create a reload */
727 copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode, si->spill);
731 DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n",
732 copy, spilled_node, rld->reloader));
733 ARR_APP1(ir_node*, copies, copy);
736 /* if we had any reloads or remats, then we need to reconstruct the
737 * SSA form for the spilled value */
738 if (ARR_LEN(copies) > 0) {
739 be_ssa_construction_env_t senv;
740 /* be_lv_t *lv = be_get_birg_liveness(env->birg); */
742 be_ssa_construction_init(&senv, env->birg);
743 be_ssa_construction_add_copy(&senv, spilled_node);
744 be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
745 be_ssa_construction_fix_users(&senv, spilled_node);
748 /* no need to enable this as long as we invalidate liveness
749 after this function... */
750 be_ssa_construction_update_liveness_phis(&senv);
751 be_liveness_update(spilled_node);
752 len = ARR_LEN(copies);
753 for(i = 0; i < len; ++i) {
754 be_liveness_update(lv, copies[i]);
757 be_ssa_construction_destroy(&senv);
761 si->reloaders = NULL;
764 #ifdef FIRM_STATISTICS
765 if (be_stat_ev_is_active()) {
766 be_stat_ev("spill_spills", spills);
767 be_stat_ev("spill_reloads", reloads);
768 be_stat_ev("spill_remats", remats);
770 #endif /* FIRM_STATISTICS */
772 be_remove_dead_nodes_from_schedule(env->irg);
773 /* Matze: In theory be_ssa_construction should take care of the livenes...
774 * try to disable this again in the future */
775 be_invalidate_liveness(env->birg);