3 * @author Daniel Grund, Sebastian Hack, Matthias Braun
6 * Copyright: (c) Universitaet Karlsruhe
7 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 #include "iredges_t.h"
19 #include "irbackedge_t.h"
29 #include "irnodeset.h"
32 #include "besched_t.h"
36 #include "bechordal_t.h"
37 #include "bejavacoal.h"
38 #include "benodesets.h"
39 #include "bespilloptions.h"
40 #include "bestatevent.h"
43 // only rematerialise when costs are less than REMAT_COST_LIMIT
44 // TODO determine a good value here...
45 #define REMAT_COST_LIMIT 10
47 typedef struct _reloader_t reloader_t;
52 ir_node *rematted_node;
53 int allow_remat; /**< the node may be rematted instead of reloaded if global remat option is on */
56 typedef struct _spill_info_t {
57 /** the value that should get spilled */
58 ir_node *spilled_node;
59 /** list of places where the value should get reloaded */
60 reloader_t *reloaders;
62 /** the spill node, or a PhiM node */
64 /** if we had the value of a phi spilled before but not the phi itself then
65 * this field contains the spill for the phi value */
68 /** the register class in which the reload should be placed */
69 const arch_register_class_t *reload_cls;
73 const arch_env_t *arch_env;
77 int spill_cost; /**< the cost of a single spill node */
78 int reload_cost; /**< the cost of a reload node */
79 set *spills; /**< all spill_info_t's, which must be placed */
80 ir_nodeset_t mem_phis; /**< set of all special spilled phis. allocated and freed separately */
82 DEBUG_ONLY(firm_dbg_module_t *dbg;)
86 * Compare two spill infos.
88 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
89 const spill_info_t *xx = x;
90 const spill_info_t *yy = y;
91 return xx->spilled_node != yy->spilled_node;
95 * Returns spill info for a specific value (returns NULL if the info doesn't
98 static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) {
100 int hash = nodeset_hash(value);
102 info.spilled_node = value;
104 return set_find(env->spills, &info, sizeof(info), hash);
108 * Returns spill info for a specific value (the value that is to be spilled)
110 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) {
111 spill_info_t info, *res;
112 int hash = nodeset_hash(value);
114 info.spilled_node = value;
115 res = set_find(env->spills, &info, sizeof(info), hash);
118 info.reloaders = NULL;
120 info.old_spill = NULL;
121 info.reload_cls = NULL;
122 res = set_insert(env->spills, &info, sizeof(info), hash);
129 /* Sets the debug module of a spill environment. */
130 void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
135 /* Creates a new spill environment. */
136 spill_env_t *be_new_spill_env(be_irg_t *birg) {
137 spill_env_t *env = xmalloc(sizeof(env[0]));
138 env->spills = new_set(cmp_spillinfo, 1024);
139 env->irg = be_get_birg_irg(birg);
141 env->arch_env = birg->main_env->arch_env;
142 ir_nodeset_init(&env->mem_phis);
143 // TODO, ask backend about costs..., or even ask backend whether we should
146 env->reload_cost = 5;
147 obstack_init(&env->obst);
151 /* Deletes a spill environment. */
152 void be_delete_spill_env(spill_env_t *env) {
153 del_set(env->spills);
154 ir_nodeset_destroy(&env->mem_phis);
155 obstack_free(&env->obst, NULL);
161 * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___
162 * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
163 * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \
164 * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
168 void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before, ir_node *rematted_node) {
169 spill_info_t *spill_info;
170 reloader_t *reloader;
172 spill_info = get_spillinfo(env, to_spill);
174 /* add the remat information */
175 reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
176 reloader->next = spill_info->reloaders;
177 reloader->reloader = before;
178 reloader->rematted_node = rematted_node;
179 reloader->allow_remat = 1;
181 spill_info->reloaders = reloader;
183 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
187 void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before,
188 const arch_register_class_t *reload_cls, int allow_remat)
193 info = get_spillinfo(env, to_spill);
195 if (is_Phi(to_spill)) {
198 /* create spillinfos for the phi arguments */
199 for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
200 ir_node *arg = get_irn_n(to_spill, i);
201 get_spillinfo(env, arg);
205 // hackery... sometimes the morgan algo spilled the value of a phi,
206 // the belady algo decides later to spill the whole phi, then sees the
207 // spill node and adds a reload for that spill node, problem is the
208 // reload gets attach to that same spill (and is totally unnecessary)
209 if (info->old_spill != NULL &&
210 (before == info->old_spill || value_dominates(before, info->old_spill)))
212 printf("spilledphi hack was needed...\n");
213 before = sched_next(info->old_spill);
218 /* put reload into list */
219 rel = obstack_alloc(&env->obst, sizeof(rel[0]));
220 rel->next = info->reloaders;
221 rel->reloader = before;
222 rel->rematted_node = NULL;
223 rel->allow_remat = allow_remat;
225 info->reloaders = rel;
226 assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
227 info->reload_cls = reload_cls;
229 DBG((env->dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
230 to_spill, before, allow_remat ? "" : " not"));
233 static ir_node *get_reload_insertion_point(ir_node *block, int pos) {
234 ir_node *predblock, *last;
236 /* simply add the reload to the beginning of the block if we only have 1 predecessor
237 * (we don't need to check for phis as there can't be any in a block with only 1 pred)
239 if(get_Block_n_cfgpreds(block) == 1) {
240 assert(!is_Phi(sched_first(block)));
241 return sched_first(block);
244 /* We have to reload the value in pred-block */
245 predblock = get_Block_cfgpred_block(block, pos);
246 last = sched_last(predblock);
248 /* we might have projs and keepanys behind the jump... */
249 while(is_Proj(last) || be_is_Keep(last)) {
250 last = sched_prev(last);
251 assert(!sched_is_end(last));
255 last = sched_next(last);
256 // last node must be a cfop, only exception is the start block
257 assert(last == get_irg_start_block(get_irn_irg(block)));
260 // add the reload before the (cond-)jump
264 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos,
265 const arch_register_class_t *reload_cls, int allow_remat)
267 ir_node *before = get_reload_insertion_point(block, pos);
268 be_add_reload(env, to_spill, before, reload_cls, allow_remat);
271 void be_spill_phi(spill_env_t *env, ir_node *node) {
275 assert(is_Phi(node));
277 ir_nodeset_insert(&env->mem_phis, node);
279 // create spillinfos for the phi arguments
280 spill = get_spillinfo(env, node);
281 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
282 ir_node *arg = get_irn_n(node, i);
283 get_spillinfo(env, arg);
286 // if we had a spill for the phi value before, then remove this spill from
287 // schedule, as we will remove it in the insert spill/reload phase
288 if(spill->spill != NULL && !is_Phi(spill->spill)) {
289 assert(spill->old_spill == NULL);
290 spill->old_spill = spill->spill;
297 * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___
298 * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
299 * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \
300 * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
305 * Schedules a node after an instruction. (That is the place after all projs and phis
306 * that are scheduled after the instruction)
307 * This function also skips phi nodes at the beginning of a block
309 static void sched_add_after_insn(ir_node *sched_after, ir_node *node) {
310 ir_node *next = sched_next(sched_after);
311 while(is_Proj(next) || is_Phi(next)) {
312 next = sched_next(next);
314 assert(next != NULL);
316 if(sched_is_end(next)) {
317 sched_add_after(sched_last(get_nodes_block(sched_after)), node);
319 sched_add_before(next, node);
326 * @param senv the spill environment
327 * @param irn the node that should be spilled
328 * @param ctx_irn an user of the spilled node
330 * @return a be_Spill node
332 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) {
333 optimization_state_t opt;
334 ir_node *to_spill = spillinfo->spilled_node;
336 DBG((env->dbg, LEVEL_1, "spilling %+F ... ", to_spill));
338 /* Trying to spill an already spilled value, no need for a new spill
339 * node then, we can simply connect to the same one for this reload
341 * (although rematerialization code should handle most of these cases
342 * this can still happen when spilling Phis)
344 if (be_is_Reload(to_spill)) {
345 spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem);
346 DB((env->dbg, LEVEL_1, "skip reload, using existing spill %+F\n", spillinfo->spill));
350 if (arch_irn_is(env->arch_env, to_spill, dont_spill)) {
351 assert(0 && "Attempt to spill a node marked 'dont_spill'");
355 We switch on optimizations here to get CSE. This is needed as some backends
356 have some extra spill phases and we want to make use of those spills instead
357 of creating new ones.
359 save_optimization_state(&opt);
361 spillinfo->spill = be_spill(env->arch_env, to_spill);
362 restore_optimization_state(&opt);
363 if (! sched_is_scheduled(spillinfo->spill)) {
364 DB((env->dbg, LEVEL_1, "add spill %+F after %+F\n", spillinfo->spill, to_spill));
365 sched_add_after_insn(to_spill, spillinfo->spill);
368 DB((env->dbg, LEVEL_1, "re-using spill %+F after %+F\n", spillinfo->spill, to_spill));
372 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
375 * If the first usage of a Phi result would be out of memory
376 * there is no sense in allocating a register for it.
377 * Thus we spill it and all its operands to the same spill slot.
378 * Therefore the phi/dataB becomes a phi/Memory
380 * @param senv the spill environment
381 * @param phi the Phi node that should be spilled
382 * @param ctx_irn an user of the spilled node
384 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) {
385 ir_node *phi = spillinfo->spilled_node;
387 int arity = get_irn_arity(phi);
388 ir_node *block = get_nodes_block(phi);
393 DBG((env->dbg, LEVEL_1, "spilling Phi %+F:\n", phi));
394 /* build a new PhiM */
395 ins = alloca(sizeof(ir_node*) * arity);
396 for(i = 0; i < arity; ++i) {
397 ins[i] = get_irg_bad(env->irg);
399 spillinfo->spill = new_r_Phi(env->irg, block, arity, ins, mode_M);
401 for(i = 0; i < arity; ++i) {
402 ir_node *arg = get_irn_n(phi, i);
403 spill_info_t *arg_info = get_spillinfo(env, arg);
405 spill_node(env, arg_info);
407 set_irn_n(spillinfo->spill, i, arg_info->spill);
409 DBG((env->dbg, LEVEL_1, "... done spilling Phi %+F\n", phi));
411 // rewire reloads from old_spill to phi
412 if (spillinfo->old_spill != NULL) {
413 const ir_edge_t *edge, *next;
414 ir_node *old_spill = spillinfo->old_spill;
416 DBG((env->dbg, LEVEL_1, "old spill found, rewiring reloads:\n"));
418 foreach_out_edge_safe(old_spill, edge, next) {
419 ir_node *reload = get_edge_src_irn(edge);
420 int pos = get_edge_src_pos(edge);
422 DBG((env->dbg, LEVEL_1, "\tset input %d of %+F to %+F\n", pos, reload, spillinfo->spill));
424 assert(be_is_Reload(reload) || is_Phi(reload));
425 set_irn_n(reload, pos, spillinfo->spill);
427 DBG((env->dbg, LEVEL_1, "\tset input of %+F to BAD\n", old_spill));
428 set_irn_n(old_spill, be_pos_Spill_val, new_Bad());
429 //sched_remove(old_spill);
430 spillinfo->old_spill = NULL;
437 * @param senv the spill environment
438 * @param to_spill the node that should be spilled
440 static void spill_node(spill_env_t *env, spill_info_t *spillinfo) {
443 // the node should be tagged for spilling already...
444 if(spillinfo->spill != NULL)
447 to_spill = spillinfo->spilled_node;
448 assert(sched_is_scheduled(to_spill) && "Node to be spilled must be scheduled!");
449 if (is_Phi(to_spill) && ir_nodeset_contains(&env->mem_phis, to_spill)) {
450 spill_phi(env, spillinfo);
452 spill_irn(env, spillinfo);
459 * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______
460 * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \
461 * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/
462 * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___|
467 * Tests whether value @p arg is available before node @p reloader
468 * @returns 1 if value is available, 0 otherwise
470 static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) {
471 if(is_Unknown(arg) || arg == new_NoMem())
477 if(arg == get_irg_frame(env->irg))
480 // hack for now (happens when command should be inserted at end of block)
481 if(is_Block(reloader)) {
486 * Ignore registers are always available
488 if(arch_irn_is(env->arch_env, arg, ignore)) {
493 /* the following test does not work while spilling,
494 * because the liveness info is not adapted yet to the effects of the
495 * additional spills/reloads.
498 /* we want to remat before the insn reloader
499 * thus an arguments is alive if
500 * - it interferes with the reloaders result
501 * - or it is (last-) used by reloader itself
503 if (values_interfere(env->birg->lv, reloader, arg)) {
507 arity = get_irn_arity(reloader);
508 for (i = 0; i < arity; ++i) {
509 ir_node *rel_arg = get_irn_n(reloader, i);
519 * Checks whether the node can principally be rematerialized
521 static int is_remat_node(spill_env_t *env, ir_node *node) {
522 const arch_env_t *arch_env = env->arch_env;
524 assert(!be_is_Spill(node));
526 if(arch_irn_is(arch_env, node, rematerializable)) {
530 if(be_is_StackParam(node))
537 * Check if a node is rematerializable. This tests for the following conditions:
539 * - The node itself is rematerializable
540 * - All arguments of the node are available or also rematerialisable
541 * - The costs for the rematerialisation operation is less or equal a limit
543 * Returns the costs needed for rematerialisation or something
544 * > REMAT_COST_LIMIT if remat is not possible.
546 static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_node *reloader, int parentcosts) {
551 if(!is_remat_node(env, spilled))
552 return REMAT_COST_LIMIT;
554 if(be_is_Reload(spilled)) {
557 costs += arch_get_op_estimated_cost(env->arch_env, spilled);
559 if(parentcosts + costs >= REMAT_COST_LIMIT) {
560 return REMAT_COST_LIMIT;
564 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
565 ir_node *arg = get_irn_n(spilled, i);
567 if(is_value_available(env, arg, reloader))
570 // we have to rematerialize the argument as well...
572 /* we only support rematerializing 1 argument at the moment,
573 * so that we don't have to care about register pressure
575 return REMAT_COST_LIMIT;
579 costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs);
580 if(parentcosts + costs >= REMAT_COST_LIMIT)
581 return REMAT_COST_LIMIT;
587 static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
588 int costs = check_remat_conditions_costs(env, spilled, reloader, 0);
590 return costs < REMAT_COST_LIMIT;
594 * Re-materialize a node.
596 * @param senv the spill environment
597 * @param spilled the node that was spilled
598 * @param reloader a irn that requires a reload
600 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
606 if(is_Block(reloader)) {
609 bl = get_nodes_block(reloader);
612 ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
613 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
614 ir_node *arg = get_irn_n(spilled, i);
616 if(is_value_available(env, arg, reloader)) {
619 ins[i] = do_remat(env, arg, reloader);
623 /* create a copy of the node */
624 res = new_ir_node(get_irn_dbg_info(spilled), env->irg, bl,
626 get_irn_mode(spilled),
627 get_irn_arity(spilled),
629 copy_node_attr(spilled, res);
630 new_backedge_info(res);
633 DBG((env->dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
635 /* insert in schedule */
636 sched_add_before(reloader, res);
641 int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) {
642 spill_info_t *spill_info;
645 // is the node rematerializable?
646 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
647 if(costs < REMAT_COST_LIMIT)
651 // do we already have a spill?
652 spill_info = find_spillinfo(env, to_spill);
653 if(spill_info != NULL && spill_info->spill != NULL)
654 return env->reload_cost;
656 return env->spill_cost + env->reload_cost;
659 int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
660 ir_node *before = get_reload_insertion_point(block, pos);
661 return be_get_reload_costs(env, to_spill, before);
666 * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
667 * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
668 * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \
669 * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
673 void be_insert_spills_reloads(spill_env_t *env) {
674 const arch_env_t *arch_env = env->arch_env;
680 /* process each spilled node */
681 for (si = set_first(env->spills); si; si = set_next(env->spills)) {
683 ir_node *spilled_node = si->spilled_node;
684 ir_mode *mode = get_irn_mode(spilled_node);
685 ir_node **copies = NEW_ARR_F(ir_node*, 0);
687 DBG((env->dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", spilled_node));
689 /* go through all reloads for this spill */
690 for (rld = si->reloaders; rld; rld = rld->next) {
691 ir_node *copy; /* a reaload is a "copy" of the original value */
693 if (rld->rematted_node != NULL) {
694 copy = rld->rematted_node;
696 sched_add_before(rld->reloader, copy);
697 } else if (be_do_remats && rld->allow_remat &&
698 check_remat_conditions(env, spilled_node, rld->reloader)) {
699 copy = do_remat(env, spilled_node, rld->reloader);
702 /* make sure we have a spill */
703 if (si->spill == NULL) {
708 /* create a reload */
709 copy = be_reload(arch_env, si->reload_cls, rld->reloader, mode, si->spill);
713 DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n",
714 copy, spilled_node, rld->reloader));
715 ARR_APP1(ir_node*, copies, copy);
718 /* if we had any reloads or remats, then we need to reconstruct the
719 * SSA form for the spilled value */
720 if (ARR_LEN(copies) > 0) {
721 /* Matze: used mem_phis as ignore uses in the past, I don't see how
722 one of the mem_phis can be a use of the spilled value...
723 so I changed this to NULL now */
725 be_get_birg_dom_front(env->birg),
726 be_get_birg_liveness(env->birg),
727 spilled_node, ARR_LEN(copies), copies,
732 si->reloaders = NULL;
735 #ifdef FIRM_STATISTICS
736 if (be_stat_ev_is_active()) {
737 be_stat_ev("spill_spills", spills);
738 be_stat_ev("spill_reloads", reloads);
739 be_stat_ev("spill_remats", remats);
741 #endif /* FIRM_STATISTICS */
743 be_remove_dead_nodes_from_schedule(env->irg);
744 /* Matze: In theory be_ssa_construction should take care of the livenes...
745 * try to disable this again in the future */
746 be_invalidate_liveness(env->birg);