2 * Author: Daniel Grund, Sebastian Hack, Matthias Braun
4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 #include "iredges_t.h"
17 #include "irbackedge_t.h"
26 #include "unionfind.h"
30 #include "besched_t.h"
34 #include "bechordal_t.h"
35 #include "bejavacoal.h"
36 #include "benodesets.h"
38 // only rematerialise when costs are less than REMAT_COST_LIMIT
39 // TODO determine a good value here...
40 #define REMAT_COST_LIMIT 10
42 typedef struct _reloader_t reloader_t;
49 typedef struct _spill_info_t {
50 /** the value that should get spilled */
51 ir_node *spilled_node;
52 /** list of places where the value should get reloaded */
53 reloader_t *reloaders;
55 /** the spill node, or a PhiM node */
57 /** if we had the value of a phi spilled before but not the phi itself then
58 * this field contains the spill for the phi value */
63 const arch_register_class_t *cls;
64 const arch_env_t *arch_env;
65 const be_chordal_env_t *chordal_env;
67 int spill_cost; /**< the cost of a single spill node */
68 int reload_cost; /**< the cost of a reload node */
69 set *spills; /**< all spill_info_t's, which must be placed */
70 pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */
72 DEBUG_ONLY(firm_dbg_module_t *dbg;)
76 * Compare two spill infos.
78 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
79 const spill_info_t *xx = x;
80 const spill_info_t *yy = y;
81 return xx->spilled_node != yy->spilled_node;
85 * Returns spill info for a specific value (returns NULL if the info doesn't
88 static spill_info_t *find_spillinfo(const spill_env_t *env, ir_node *value) {
90 int hash = nodeset_hash(value);
92 info.spilled_node = value;
94 return set_find(env->spills, &info, sizeof(info), hash);
98 * Returns spill info for a specific value (the value that is to be spilled)
100 static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) {
101 spill_info_t info, *res;
102 int hash = nodeset_hash(value);
104 info.spilled_node = value;
105 res = set_find(env->spills, &info, sizeof(info), hash);
108 info.reloaders = NULL;
110 info.old_spill = NULL;
111 res = set_insert(env->spills, &info, sizeof(info), hash);
118 /* Sets the debug module of a spill environment. */
119 void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
124 /* Creates a new spill environment. */
125 spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env) {
126 spill_env_t *env = xmalloc(sizeof(env[0]));
127 env->spills = new_set(cmp_spillinfo, 1024);
128 env->cls = chordal_env->cls;
129 env->chordal_env = chordal_env;
130 env->arch_env = env->chordal_env->birg->main_env->arch_env;
131 env->mem_phis = pset_new_ptr_default();
132 // TODO, ask backend about costs...
134 env->reload_cost = 5;
135 obstack_init(&env->obst);
139 /* Deletes a spill environment. */
140 void be_delete_spill_env(spill_env_t *env) {
141 del_set(env->spills);
142 del_pset(env->mem_phis);
143 obstack_free(&env->obst, NULL);
149 * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___
150 * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
151 * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \
152 * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
156 void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) {
160 assert(sched_is_scheduled(before));
161 assert(arch_irn_consider_in_reg_alloc(env->arch_env, env->cls, to_spill));
163 info = get_spillinfo(env, to_spill);
165 if(is_Phi(to_spill)) {
167 // create spillinfos for the phi arguments
168 for(i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
169 ir_node *arg = get_irn_n(to_spill, i);
170 get_spillinfo(env, arg);
174 rel = obstack_alloc(&env->obst, sizeof(rel[0]));
175 rel->reloader = before;
176 rel->next = info->reloaders;
177 info->reloaders = rel;
180 static ir_node *get_reload_insertion_point(ir_node *block, int pos) {
181 ir_node *predblock, *last;
183 /* simply add the reload to the beginning of the block if we only have 1 predecessor
184 * (we don't need to check for phis as there can't be any in a block with only 1 pred)
186 if(get_Block_n_cfgpreds(block) == 1) {
187 assert(!is_Phi(sched_first(block)));
188 return sched_first(block);
191 /* We have to reload the value in pred-block */
192 predblock = get_Block_cfgpred_block(block, pos);
193 last = sched_last(predblock);
195 /* we might have projs and keepanys behind the jump... */
196 while(is_Proj(last) || be_is_Keep(last)) {
197 last = sched_prev(last);
198 assert(!sched_is_end(last));
200 assert(is_cfop(last));
202 // add the reload before the (cond-)jump
206 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
207 ir_node *before = get_reload_insertion_point(block, pos);
208 be_add_reload(env, to_spill, before);
211 void be_spill_phi(spill_env_t *env, ir_node *node) {
215 assert(is_Phi(node));
217 pset_insert_ptr(env->mem_phis, node);
219 // create spillinfos for the phi arguments
220 spill = get_spillinfo(env, node);
221 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
222 ir_node *arg = get_irn_n(node, i);
223 get_spillinfo(env, arg);
226 // if we had a spill for the phi value before, then remove this spill from
227 // schedule, as we will remove it in the insert spill/reload phase
228 if(spill->spill != NULL && !is_Phi(spill->spill)) {
229 spill->old_spill = spill->spill;
236 * / ___|_ __ ___ __ _| |_ ___ / ___| _ __ (_) | |___
237 * | | | '__/ _ \/ _` | __/ _ \ \___ \| '_ \| | | / __|
238 * | |___| | | __/ (_| | || __/ ___) | |_) | | | \__ \
239 * \____|_| \___|\__,_|\__\___| |____/| .__/|_|_|_|___/
244 * Schedules a node after an instruction. (That is the place after all projs and phis
245 * that are scheduled after the instruction)
246 * This function also skips phi nodes at the beginning of a block
248 static void sched_add_after_insn(ir_node *sched_after, ir_node *node) {
249 ir_node *next = sched_next(sched_after);
250 while(is_Proj(next) || is_Phi(next)) {
251 next = sched_next(next);
253 assert(next != NULL);
255 if(sched_is_end(next)) {
256 sched_add_after(sched_last(get_nodes_block(sched_after)), node);
258 sched_add_before(next, node);
265 * @param senv the spill environment
266 * @param irn the node that should be spilled
267 * @param ctx_irn an user of the spilled node
269 * @return a be_Spill node
271 static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) {
272 ir_node *to_spill = spillinfo->spilled_node;
274 DBG((env->dbg, LEVEL_1, "%+F\n", to_spill));
276 /* Trying to spill an already spilled value, no need for a new spill
277 * node then, we can simply connect to the same one for this reload
279 * (although rematerialization code should handle most of these cases
280 * this can still happen when spilling Phis)
282 if(be_is_Reload(to_spill)) {
283 spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem);
287 if (arch_irn_is(env->arch_env, to_spill, dont_spill)) {
288 if (env->chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN)
289 ir_fprintf(stderr, "Verify warning: spilling 'dont_spill' node %+F\n", to_spill);
290 else if (env->chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT)
291 assert(0 && "Attempt to spill a node marked 'dont_spill'");
294 spillinfo->spill = be_spill(env->arch_env, to_spill);
295 sched_add_after_insn(to_spill, spillinfo->spill);
298 static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
301 * If the first usage of a Phi result would be out of memory
302 * there is no sense in allocating a register for it.
303 * Thus we spill it and all its operands to the same spill slot.
304 * Therefore the phi/dataB becomes a phi/Memory
306 * @param senv the spill environment
307 * @param phi the Phi node that should be spilled
308 * @param ctx_irn an user of the spilled node
310 static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) {
311 ir_node *phi = spillinfo->spilled_node;
313 int arity = get_irn_arity(phi);
314 ir_node *block = get_nodes_block(phi);
319 /* build a new PhiM */
320 ins = alloca(sizeof(ir_node*) * arity);
321 for(i = 0; i < arity; ++i) {
322 ins[i] = get_irg_bad(env->chordal_env->irg);
324 spillinfo->spill = new_r_Phi(env->chordal_env->irg, block, arity, ins, mode_M);
326 for(i = 0; i < arity; ++i) {
327 ir_node *arg = get_irn_n(phi, i);
328 spill_info_t *arg_info = get_spillinfo(env, arg);
330 spill_node(env, arg_info);
332 set_irn_n(spillinfo->spill, i, arg_info->spill);
335 // rewire reloads from old_spill to phi
336 if(spillinfo->old_spill != NULL) {
337 const ir_edge_t *edge, *next;
338 foreach_out_edge_safe(spillinfo->old_spill, edge, next) {
339 ir_node* reload = get_edge_src_irn(edge);
340 assert(be_is_Reload(reload) || is_Phi(reload));
341 set_irn_n(reload, get_edge_src_pos(edge), spillinfo->spill);
343 spillinfo->old_spill = NULL;
350 * @param senv the spill environment
351 * @param to_spill the node that should be spilled
353 static void spill_node(spill_env_t *env, spill_info_t *spillinfo) {
356 // the node should be tagged for spilling already...
357 if(spillinfo->spill != NULL)
360 to_spill = spillinfo->spilled_node;
361 if (is_Phi(to_spill) && pset_find_ptr(env->mem_phis, spillinfo->spilled_node)) {
362 spill_phi(env, spillinfo);
364 spill_irn(env, spillinfo);
371 * | _ \ ___ _ __ ___ __ _| |_ ___ _ __(_) __ _| (_)_______
372 * | |_) / _ \ '_ ` _ \ / _` | __/ _ \ '__| |/ _` | | |_ / _ \
373 * | _ < __/ | | | | | (_| | || __/ | | | (_| | | |/ / __/
374 * |_| \_\___|_| |_| |_|\__,_|\__\___|_| |_|\__,_|_|_/___\___|
379 * Tests whether value @p arg is available before node @p reloader
380 * @returns 1 if value is available, 0 otherwise
382 static int is_value_available(spill_env_t *env, ir_node *arg, ir_node *reloader) {
383 if(is_Unknown(arg) || arg == new_NoMem())
389 if(arg == get_irg_frame(env->chordal_env->irg))
392 /* the following test does not work while spilling,
393 * because the liveness info is not adapted yet to the effects of the
394 * additional spills/reloads.
396 * So we can only do this test for ignore registers (of our register class)
398 if(arch_get_irn_reg_class(env->arch_env, arg, -1) == env->chordal_env->cls
399 && arch_irn_is(env->arch_env, arg, ignore)) {
402 /* we want to remat before the insn reloader
403 * thus an arguments is alive if
404 * - it interferes with the reloaders result
405 * - or it is (last-) used by reloader itself
407 if (values_interfere(env->chordal_env->lv, reloader, arg)) {
411 arity = get_irn_arity(reloader);
412 for (i = 0; i < arity; ++i) {
413 ir_node *rel_arg = get_irn_n(reloader, i);
423 * Checks whether the node can principally be rematerialized
425 static int is_remat_node(spill_env_t *env, ir_node *node) {
426 const arch_env_t *arch_env = env->arch_env;
428 assert(!be_is_Spill(node));
430 if(arch_irn_is(arch_env, node, rematerializable)) {
434 if(be_is_StackParam(node))
441 * Check if a node is rematerializable. This tests for the following conditions:
443 * - The node itself is rematerializable
444 * - All arguments of the node are available or also rematerialisable
445 * - The costs for the rematerialisation operation is less or equal a limit
447 * Returns the costs needed for rematerialisation or something
448 * > REMAT_COST_LIMIT if remat is not possible.
450 static int check_remat_conditions_costs(spill_env_t *env, ir_node *spilled, ir_node *reloader, int parentcosts) {
455 if(!is_remat_node(env, spilled))
456 return REMAT_COST_LIMIT;
458 if(be_is_Reload(spilled)) {
461 costs += arch_get_op_estimated_cost(env->arch_env, spilled);
463 if(parentcosts + costs >= REMAT_COST_LIMIT) {
464 return REMAT_COST_LIMIT;
468 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
469 ir_node *arg = get_irn_n(spilled, i);
471 if(is_value_available(env, arg, reloader))
474 // we have to rematerialize the argument as well...
476 /* we only support rematerializing 1 argument at the moment,
477 * so that we don't have to care about register pressure
479 return REMAT_COST_LIMIT;
483 costs += check_remat_conditions_costs(env, arg, reloader, parentcosts + costs);
484 if(parentcosts + costs >= REMAT_COST_LIMIT)
485 return REMAT_COST_LIMIT;
491 static int check_remat_conditions(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
492 int costs = check_remat_conditions_costs(env, spilled, reloader, 0);
494 return costs < REMAT_COST_LIMIT;
498 * Re-materialize a node.
500 * @param senv the spill environment
501 * @param spilled the node that was spilled
502 * @param reloader a irn that requires a reload
504 static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader) {
507 ir_node *bl = get_nodes_block(reloader);
510 ins = alloca(get_irn_arity(spilled) * sizeof(ins[0]));
511 for(i = 0, arity = get_irn_arity(spilled); i < arity; ++i) {
512 ir_node *arg = get_irn_n(spilled, i);
514 if(is_value_available(env, arg, reloader)) {
517 ins[i] = do_remat(env, arg, reloader);
521 /* create a copy of the node */
522 res = new_ir_node(get_irn_dbg_info(spilled), env->chordal_env->irg, bl,
524 get_irn_mode(spilled),
525 get_irn_arity(spilled),
527 copy_node_attr(spilled, res);
528 new_backedge_info(res);
530 DBG((env->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader));
532 /* insert in schedule */
533 assert(!is_Block(reloader));
534 sched_add_before(reloader, res);
539 int be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before) {
540 spill_info_t *spill_info;
542 // is the node rematerializable?
543 int costs = check_remat_conditions_costs(env, to_spill, before, 0);
544 if(costs < REMAT_COST_LIMIT)
547 // do we already have a spill?
548 spill_info = find_spillinfo(env, to_spill);
549 if(spill_info != NULL && spill_info->spill != NULL)
550 return env->reload_cost;
552 return env->spill_cost + env->reload_cost;
555 int be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
556 ir_node *before = get_reload_insertion_point(block, pos);
557 return be_get_reload_costs(env, to_spill, before);
562 * |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
563 * | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ |/ _ \ / _` |/ _` / __|
564 * | || | | \__ \ __/ | | |_ | _ < __/ | (_) | (_| | (_| \__ \
565 * |___|_| |_|___/\___|_| \__| |_| \_\___|_|\___/ \__,_|\__,_|___/
569 void be_insert_spills_reloads(spill_env_t *env) {
570 const arch_env_t *arch_env = env->arch_env;
573 /* process each spilled node */
574 for(si = set_first(env->spills); si; si = set_next(env->spills)) {
576 ir_mode *mode = get_irn_mode(si->spilled_node);
577 pset *values = pset_new_ptr(16);
579 /* go through all reloads for this spill */
580 for(rld = si->reloaders; rld; rld = rld->next) {
583 if (check_remat_conditions(env, si->spilled_node, rld->reloader)) {
584 new_val = do_remat(env, si->spilled_node, rld->reloader);
586 /* make sure we have a spill */
590 new_val = be_reload(arch_env, env->cls, rld->reloader, mode, si->spill);
593 DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader));
594 pset_insert_ptr(values, new_val);
597 if(pset_count(values) > 0) {
598 /* introduce copies, rewire the uses */
599 pset_insert_ptr(values, si->spilled_node);
600 be_ssa_constr_set_ignore(env->chordal_env->dom_front, env->chordal_env->lv, values, env->mem_phis);
605 si->reloaders = NULL;
608 be_remove_dead_nodes_from_schedule(env->chordal_env->irg);
609 be_liveness_recompute(env->chordal_env->lv);