4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irprintf_t.h"
28 #include "iredges_t.h"
34 #include "bespillbelady.h"
36 #include "besched_t.h"
40 #include "bechordal_t.h"
49 #define DEBUG_LVL 0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
50 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
52 #define MIN(a,b) (((a)<(b))?(a):(b))
54 typedef struct _workset_t workset_t;
56 typedef struct _belady_env_t {
58 const arch_env_t *arch;
59 const arch_register_class_t *cls;
60 int n_regs; /** number of regs in this reg-class */
62 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
63 be_uses_t *uses; /**< env for the next-use magic */
64 ir_node *instr; /**< current instruction */
65 unsigned instr_nr; /**< current instruction number (relative to block start) */
66 pset *used; /**< holds the values used (so far) in the current BB */
67 pset *copies; /**< holds all copies placed due to phi-spilling */
69 spill_env_t *senv; /* see bespill.h */
70 pset *reloads; /**< all reload nodes placed */
75 int len; /**< current length */
76 loc_t vals[1]; /**< inlined array of the values/distances in this working set */
79 void workset_print(const workset_t *w)
83 for(i = 0; i < w->len; ++i) {
84 ir_printf("%+F %d\n", w->vals[i].irn, w->vals[i].time);
89 * Alloc a new workset on obstack @p ob with maximum size @p max
91 static INLINE workset_t *new_workset(struct obstack *ob, belady_env_t *bel) {
93 size_t size = sizeof(*res) + (bel->n_regs-1)*sizeof(res->vals[0]);
94 res = obstack_alloc(ob, size);
101 * Alloc a new instance on obstack and make it equal to @param ws
103 static INLINE workset_t *workset_clone(struct obstack *ob, workset_t *ws) {
105 size_t size = sizeof(*res) + (ws->bel->n_regs-1)*sizeof(res->vals[0]);
106 res = obstack_alloc(ob, size);
107 memcpy(res, ws, size);
112 * Do NOT alloc anything. Make @param tgt equal to @param src.
113 * returns @param tgt for convinience
115 static INLINE workset_t *workset_copy(workset_t *tgt, workset_t *src) {
116 size_t size = sizeof(*src) + (src->bel->n_regs-1)*sizeof(src->vals[0]);
117 memcpy(tgt, src, size);
122 * Overwrites the current content array of @param ws with the
123 * @param count locations given at memory @param locs.
124 * Set the length of @param ws to count.
126 #define workset_bulk_fill(ws, count, locs) memcpy(&(ws)->vals[0], locs, ((ws)->len=count)*sizeof(locs[0]));
130 * Inserts the value @p val into the workset, iff it is not
131 * already contained. The workset must not be full.
133 static INLINE void workset_insert(workset_t *ws, ir_node *val) {
135 /* check for current regclass */
136 if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
137 DBG((dbg, DBG_DECIDE, "Dropped %+F\n", val));
141 /* check if val is already contained */
142 for(i=0; i<ws->len; ++i)
143 if (ws->vals[i].irn == val)
147 assert(ws->len < ws->bel->n_regs && "Workset already full!");
148 ws->vals[ws->len++].irn = val;
152 * Inserts all values in array @p vals of length @p cnt
153 * into the workset. There must be enough space for the
156 static INLINE void workset_bulk_insert(workset_t *ws, int cnt, ir_node **vals) {
159 for(o=0; o<cnt; ++o) {
160 ir_node *val = vals[o];
161 DBG((dbg, DBG_TRACE, "Bulk insert %+F\n", val));
162 /* check for current regclass */
163 if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
164 DBG((dbg, DBG_TRACE, "Wrong reg class\n"));
168 /* check if val is already contained */
169 for(i=0; i<ws->len; ++i)
170 if (ws->vals[i].irn == val) {
171 DBG((dbg, DBG_TRACE, "Already contained\n"));
176 assert(ws->len < ws->bel->n_regs && "Workset does not have enough room!");
177 ws->vals[ws->len++].irn = val;
178 DBG((dbg, DBG_TRACE, "Inserted\n"));
181 /*epsilon statement :)*/;
186 * Removes all entries from this workset
188 #define workset_clear(ws) (ws)->len = 0;
191 * Removes the value @p val from the workset if present.
193 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
195 for(i=0; i<ws->len; ++i)
196 if (ws->vals[i].irn == val) {
197 ws->vals[i] = ws->vals[--ws->len];
202 static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
204 for(i=0; i<ws->len; ++i)
205 if (ws->vals[i].irn == val)
211 * Iterates over all values in the working set.
212 * @p ws The workset to iterate
213 * @p v A variable to put the current value in
214 * @p i An integer for internal use
216 #define workset_foreach(ws, v, i) for(i=0; \
217 v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
220 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
221 #define workset_set_length(ws, length) (ws)->len = length
222 #define workset_get_length(ws) ((ws)->len)
223 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
224 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
226 typedef struct _block_info_t {
227 workset_t *ws_start, *ws_end;
231 static INLINE void *new_block_info(struct obstack *ob) {
232 block_info_t *res = obstack_alloc(ob, sizeof(*res));
233 res->ws_start = NULL;
239 #define get_block_info(blk) ((block_info_t *)get_irn_link(blk))
240 #define set_block_info(blk, info) set_irn_link(blk, info)
242 static int is_mem_phi(const ir_node *irn, void *data) {
244 ir_node *blk = get_nodes_block(irn);
246 DBG((dbg, DBG_SPILL, "Is %+F a mem-phi?\n", irn));
247 sws = get_block_info(blk)->ws_start;
248 DBG((dbg, DBG_SPILL, " %d\n", !workset_contains(sws, irn)));
249 return !workset_contains(sws, irn);
253 * @return The distance to the next use
254 * Or 0 if irn is an ignore node
257 static INLINE unsigned get_distance(belady_env_t *bel, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
259 arch_irn_flags_t fl = arch_irn_get_flags(bel->arch, def);
260 unsigned dist = be_get_next_use(bel->uses, from, from_step, def, skip_from_uses);
262 if(!USES_IS_INIFINITE(dist) && (fl & (arch_irn_flags_ignore | arch_irn_flags_dont_spill)) != 0)
269 * Performs the actions necessary to grant the request that:
270 * - new_vals can be held in registers
271 * - as few as possible other values are disposed
272 * - the worst values get disposed
274 * @p is_usage indicates that the values in new_vals are used (not defined)
275 * In this case reloads must be performed
277 static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) {
279 int i, len, max_allowed, demand, iter;
280 workset_t *ws = bel->ws;
281 ir_node **to_insert = alloca(bel->n_regs * sizeof(*to_insert));
284 * 1. Identify the number of needed slots and the values to reload
287 workset_foreach(new_vals, val, iter) {
288 /* mark value as used */
290 pset_insert_ptr(bel->used, val);
292 if (!workset_contains(ws, val)) {
293 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
294 to_insert[demand++] = val;
296 be_add_reload(bel->senv, val, bel->instr);
298 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
300 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
304 * 2. Make room for at least 'demand' slots
306 len = workset_get_length(ws);
307 max_allowed = bel->n_regs - demand;
309 /* Only make more free room if we do not have enough */
310 if (len > max_allowed) {
311 /* get current next-use distance */
312 for (i=0; i<ws->len; ++i)
313 workset_set_time(ws, i, get_distance(bel, bel->instr, bel->instr_nr, workset_get_val(ws, i), !is_usage));
315 /* sort entries by increasing nextuse-distance*/
318 /* Logic for not needed live-ins: If a value is disposed
319 before its first usage, remove it from start workset */
320 for (i=max_allowed; i<ws->len; ++i) {
321 ir_node *irn = ws->vals[i].irn;
322 if (!pset_find_ptr(bel->used, irn)) {
323 ir_node *curr_bb = get_nodes_block(bel->instr);
324 workset_t *ws_start = get_block_info(curr_bb)->ws_start;
325 workset_remove(ws_start, irn);
327 DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
329 DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
332 /* kill the last 'demand' entries in the array */
333 workset_set_length(ws, max_allowed);
337 * 3. Insert the new values into the workset
339 workset_bulk_insert(bel->ws, demand, to_insert);
342 static void belady(ir_node *blk, void *env);
345 * Collects all values live-in at block @p blk and all phi results in this block.
346 * Then it adds the best values (at most n_regs) to the blocks start_workset.
347 * The phis among the remaining values get spilled: Introduce psudo-copies of
348 * their args to break interference and make it possible to spill them to the
351 static block_info_t *compute_block_start_info(ir_node *blk, void *env) {
352 belady_env_t *bel = env;
353 ir_node *irn, *first;
355 int i, count, ws_count;
356 loc_t loc, *starters;
357 ir_graph *irg = get_irn_irg(blk);
359 block_info_t *res = get_block_info(blk);
361 /* Have we seen this block before? */
365 /* Create the block info for this block. */
366 res = new_block_info(&bel->ob);
367 set_block_info(blk, res);
370 /* Get all values living at the block start sorted by next use*/
373 DBG((dbg, DBG_START, "Living at start of %+F:\n", blk));
374 first = sched_first(blk);
376 sched_foreach(blk, irn)
377 if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, -1) == bel->cls) {
379 loc.time = get_distance(bel, first, 0, irn, 0);
380 obstack_grow(&ob, &loc, sizeof(loc));
381 DBG((dbg, DBG_START, " %+F:\n", irn));
386 live_foreach(blk, li)
387 if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, -1) == bel->cls) {
388 loc.irn = (ir_node *)li->irn;
389 loc.time = get_distance(bel, first, 0, li->irn, 0);
390 obstack_grow(&ob, &loc, sizeof(loc));
391 DBG((dbg, DBG_START, " %+F:\n", irn));
395 starters = obstack_finish(&ob);
396 qsort(starters, count, sizeof(starters[0]), loc_compare);
399 /* If we have only one predecessor, we want the start_set of blk to be the end_set of pred */
400 if (get_Block_n_cfgpreds(blk) == 1 && blk != get_irg_start_block(get_irn_irg(blk))) {
401 ir_node *pred_blk = get_Block_cfgpred_block(blk, 0);
402 block_info_t *pred_info = get_block_info(pred_blk);
404 /* if pred block has not been processed yet, do it now */
406 belady(pred_blk, bel);
407 pred_info = get_block_info(pred_blk);
410 /* now we have an end_set of pred */
411 assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
412 res->ws_start = workset_clone(&bel->ob, pred_info->ws_end);
416 /* Else we want the start_set to be the values used 'the closest' */
418 /* Copy the best ones from starters to start workset */
419 ws_count = MIN(count, bel->n_regs);
420 res->ws_start = new_workset(&bel->ob, bel);
421 workset_bulk_fill(res->ws_start, ws_count, starters);
425 /* The phis of this block which are not in the start set have to be spilled later.
426 * Therefore we add temporary copies in the pred_blocks so the spills can spill
427 * into the same spill slot.
428 * After spilling these copies get deleted. */
429 for (i=workset_get_length(res->ws_start); i<count; ++i) {
432 irn = starters[i].irn;
433 if (!is_Phi(irn) || get_nodes_block(irn) != blk)
436 DBG((dbg, DBG_START, "For %+F:\n", irn));
438 for (max=get_irn_arity(irn), o=0; o<max; ++o) {
439 ir_node *arg = get_irn_n(irn, o);
440 ir_node *pred_block = get_Block_cfgpred_block(get_nodes_block(irn), o);
441 ir_node *cpy = be_new_Copy(bel->cls, irg, pred_block, arg);
442 pset_insert_ptr(bel->copies, cpy);
443 DBG((dbg, DBG_START, " place a %+F of %+F in %+F\n", cpy, arg, pred_block));
444 sched_add_before(pred_block, cpy);
445 set_irn_n(irn, o, cpy);
449 obstack_free(&ob, NULL);
455 * For the given block @p blk, decide for each values
456 * whether it is used from a register or is reloaded
459 static void belady(ir_node *blk, void *env) {
460 belady_env_t *bel = env;
464 block_info_t *blk_info;
466 /* Don't do a block twice */
467 if (get_block_info(blk))
470 /* get the starting workset for this block */
471 blk_info = compute_block_start_info(blk, bel);
473 DBG((dbg, DBG_DECIDE, "\n"));
474 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", blk));
476 workset_copy(bel->ws, blk_info->ws_start);
477 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", blk));
478 workset_foreach(bel->ws, irn, iter)
479 DBG((dbg, DBG_WSETS, " %+F\n", irn));
481 /* process the block from start to end */
482 DBG((dbg, DBG_WSETS, "Processing...\n"));
483 bel->used = pset_new_ptr(32);
485 new_vals = new_workset(&bel->ob, bel);
486 sched_foreach(blk, irn) {
487 assert(workset_get_length(bel->ws) <= bel->n_regs && "Too much values in workset!");
490 /* projs are handled with the tuple value.
491 * Phis are no real instr (see insert_starters())
492 * instr_nr does not increase */
493 if (is_Proj(irn) || is_Phi(irn)) {
494 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
497 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
499 /* set instruction in the workset */
502 /* allocate all values _used_ by this instruction */
503 workset_clear(new_vals);
504 workset_bulk_insert(new_vals, get_irn_arity(irn)+1, get_irn_in(irn));
505 displace(bel, new_vals, 1);
507 /* allocate all values _defined_ by this instruction */
508 workset_clear(new_vals);
509 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
511 for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
512 workset_insert(new_vals, proj);
514 workset_insert(new_vals, irn);
516 displace(bel, new_vals, 0);
522 /* Remember end-workset for this block */
523 blk_info->ws_end = workset_clone(&bel->ob, bel->ws);
524 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", blk));
525 workset_foreach(blk_info->ws_end, irn, iter)
526 DBG((dbg, DBG_WSETS, " %+F\n", irn));
530 * 'decide' is block-local and makes assumptions
531 * about the set of live-ins. Thus we must adapt the
532 * live-outs to the live-ins at each block-border.
534 static void fix_block_borders(ir_node *blk, void *env) {
536 belady_env_t *bel = env;
537 int i, max, iter, iter2;
539 DBG((dbg, DBG_FIX, "\n"));
540 DBG((dbg, DBG_FIX, "Fixing %+F\n", blk));
542 wsb = get_block_info(blk)->ws_start;
544 /* process all pred blocks */
545 for (i=0, max=get_irn_arity(blk); i<max; ++i) {
546 ir_node *irnb, *irnp, *pred = get_Block_cfgpred_block(blk, i);
547 workset_t *wsp = get_block_info(pred)->ws_end;
549 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
551 workset_foreach(wsb, irnb, iter) {
552 /* if irnb is a phi of the current block we reload
553 * the corresponding argument, else irnb itself */
554 if(is_Phi(irnb) && blk == get_nodes_block(irnb))
555 irnb = get_irn_n(irnb, i);
557 /* Unknowns are available everywhere */
558 if(get_irn_opcode(irnb) == iro_Unknown)
561 /* check if irnb is in a register at end of pred */
562 workset_foreach(wsp, irnp, iter2)
566 /* irnb is in memory at the end of pred, so we have to reload it */
567 DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
568 be_add_reload_on_edge(bel->senv, irnb, blk, i);
571 /*epsilon statement :)*/;
577 * Removes all used reloads from bel->reloads.
578 * The remaining nodes in bel->reloads will be removed from the graph.
580 static void rescue_used_reloads(ir_node *irn, void *env) {
581 pset *rlds = (pset *)env;
582 if (pset_find_ptr(rlds, irn))
583 pset_remove_ptr(rlds, irn);
587 * Removes all copies introduced for phi-spills
589 static void remove_copies(belady_env_t *bel) {
592 for (irn = pset_first(bel->copies); irn; irn = pset_next(bel->copies)) {
595 assert(be_is_Copy(irn));
596 assert(get_irn_n_edges(irn) == 1 && "This is not a copy introduced in 'compute_block_start_info()'. Who created it?");
598 user = get_irn_edge(get_irn_irg(irn), irn, 0)->src;
600 src = get_irn_n(irn, be_pos_Copy_orig);
601 set_irn_n(user, 0, src);
606 * Finds all unused reloads and remove them from the schedule
607 * Also removes spills if they are not used anymore after removing reloads
609 static void remove_unused_reloads(ir_graph *irg, belady_env_t *bel) {
612 irg_walk_graph(irg, rescue_used_reloads, NULL, bel->reloads);
613 for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads)) {
615 DBG((dbg, DBG_SPILL, "Removing %+F before %+F in %+F\n", irn, sched_next(irn), get_nodes_block(irn)));
617 if (be_is_Reload(irn))
618 spill = get_irn_n(irn, be_pos_Reload_mem);
621 set_irn_n(irn, 0, new_Bad());
624 if (be_is_Reload(irn)) {
625 /* if spill not used anymore, remove it too
626 * test of regclass is necessary since spill may be a phi-M */
627 if (get_irn_n_edges(spill) == 0 && bel->cls == arch_get_irn_reg_class(bel->arch, spill, -1)) {
628 set_irn_n(spill, 0, new_Bad());
635 void be_spill_belady(const be_chordal_env_t *chordal_env) {
638 FIRM_DBG_REGISTER(dbg, "ir.be.spillbelady");
640 /* init belady env */
641 obstack_init(&bel.ob);
642 bel.arch = chordal_env->birg->main_env->arch_env;
643 bel.cls = chordal_env->cls;
644 bel.n_regs = arch_register_class_n_regs(bel.cls);
645 bel.ws = new_workset(&bel.ob, &bel);
646 bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls);
647 bel.senv = be_new_spill_env(chordal_env, is_mem_phi, NULL);
648 DEBUG_ONLY(be_set_spill_env_dbg_module(bel.senv, dbg);)
649 bel.reloads = pset_new_ptr_default();
650 bel.copies = pset_new_ptr_default();
652 DBG((dbg, LEVEL_1, "running on register class: %s\n", bel.cls->name));
655 be_clear_links(chordal_env->irg);
656 irg_block_walk_graph(chordal_env->irg, NULL, belady, &bel);
657 irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &bel);
658 be_insert_spills_reloads(bel.senv, bel.reloads);
659 remove_unused_reloads(chordal_env->irg, &bel);
663 del_pset(bel.reloads);
664 be_delete_spill_env(bel.senv);
665 be_end_uses(bel.uses);
666 obstack_free(&bel.ob, NULL);