4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
7 * NOTE: Comments my be (partially) wrong, since there was a major bug
8 * (spilling of phis, prespill) whose fixing changed a lot.
29 #include "iredges_t.h"
34 #include "bespillbelady.h"
36 #include "besched_t.h"
40 #include "bechordal_t.h"
49 #define DEBUG_LVL 0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
50 static firm_dbg_module_t *dbg = NULL;
52 #define MIN(a,b) (((a)<(b))?(a):(b))
53 #undef SINGLE_START_PROJS
55 typedef struct _workset_t workset_t;
57 typedef struct _belady_env_t {
59 const arch_env_t *arch;
60 const arch_register_class_t *cls;
61 int n_regs; /** number of regs in this reg-class */
63 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
64 be_uses_t *uses; /**< env for the next-use magic */
65 ir_node *instr; /**< current instruction */
66 unsigned instr_nr; /**< current instruction number (relative to block start) */
67 pset *used; /**< holds the values used (so far) in the current BB */
69 spill_env_t *senv; /* see bespill.h */
70 pset *reloads; /**< all reload nodes placed */
75 int i; /**< used for iteration TODO remove this form the struct */
76 int len; /**< current length */
77 loc_t vals[1]; /**< inlined array of the values/distances in this working set */
80 typedef struct _block_info_t {
81 workset_t *ws_start, *ws_end;
85 * Alloc a new workset on obstack @p ob with maximum size @p max
87 static INLINE workset_t *new_workset(struct obstack *ob, belady_env_t *bel) {
89 size_t size = sizeof(*res) + (bel->n_regs-1)*sizeof(res->vals[0]);
90 res = obstack_alloc(ob, size);
97 * Alloc a new instance on obstack and make it equal to @param ws
99 static INLINE workset_t *workset_clone(struct obstack *ob, workset_t *ws) {
101 size_t size = sizeof(*res) + (ws->bel->n_regs-1)*sizeof(res->vals[0]);
102 res = obstack_alloc(ob, size);
103 memcpy(res, ws, size);
108 * Do NOT alloc anything. Make @param tgt equal to @param src.
109 * returns @param tgt for convinience
111 static INLINE workset_t *workset_copy(workset_t *tgt, workset_t *src) {
112 size_t size = sizeof(*src) + (src->bel->n_regs-1)*sizeof(src->vals[0]);
113 memcpy(tgt, src, size);
118 * Overwrites the current content array of @param ws with the
119 * @param count locations given at memory @param locs.
120 * Set the length of @param ws to count.
122 #define workset_bulk_fill(ws, count, locs) memcpy(&(ws)->vals[0], locs, ((ws)->len=count)*sizeof(locs[0]));
126 * Inserts the value @p val into the workset, iff it is not
127 * already contained. The workset must not be full.
129 static INLINE void workset_insert(workset_t *ws, ir_node *val) {
131 /* check for current regclass */
132 if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
133 DBG((dbg, DBG_DECIDE, "Dropped %+F\n", val));
137 /* check if val is already contained */
138 for(i=0; i<ws->len; ++i)
139 if (ws->vals[i].irn == val)
143 assert(ws->len < ws->bel->n_regs && "Workset already full!");
144 ws->vals[ws->len++].irn = val;
148 * Inserts all values in array @p vals of length @p cnt
149 * into the workset. There must be enough space for the
152 static INLINE void workset_bulk_insert(workset_t *ws, int cnt, ir_node **vals) {
155 for(o=0; o<cnt; ++o) {
156 ir_node *val = vals[o];
157 DBG((dbg, DBG_TRACE, "Bulk insert %+F\n", val));
158 /* check for current regclass */
159 if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
160 DBG((dbg, DBG_TRACE, "Wrong reg class\n"));
164 /* check if val is already contained */
165 for(i=0; i<ws->len; ++i)
166 if (ws->vals[i].irn == val) {
167 DBG((dbg, DBG_TRACE, "Already contained\n"));
172 assert(ws->len < ws->bel->n_regs && "Workset does not have enough room!");
173 ws->vals[ws->len++].irn = val;
174 DBG((dbg, DBG_TRACE, "Inserted\n"));
177 /*epsilon statement :)*/;
182 * Removes all entries from this workset
184 #define workset_clear(ws) (ws)->len = 0;
187 * Removes the value @p val from the workset if present.
189 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
191 for(i=0; i<ws->len; ++i)
192 if (ws->vals[i].irn == val) {
193 ws->vals[i] = ws->vals[--ws->len];
198 static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
200 for(i=0; i<ws->len; ++i)
201 if (ws->vals[i].irn == val)
206 #define workset_foreach(ws, v) for(ws->i=0; \
207 v=(ws->i < ws->len) ? ws->vals[ws->i].irn : NULL, ws->i < ws->len; \
210 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
211 #define workset_set_length(ws, length) (ws)->len = length
212 #define workset_get_length(ws) ((ws)->len)
213 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
214 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
217 static int is_mem_phi(const ir_node *irn, void *data) {
219 ir_node *blk = get_nodes_block(irn);
221 DBG((dbg, DBG_SPILL, "Is %+F a mem-phi?\n", irn));
222 sws = ((block_info_t *) get_irn_link(blk))->ws_start;
223 DBG((dbg, DBG_SPILL, " %d\n", !workset_contains(sws, irn)));
224 return !workset_contains(sws, irn);
228 * Collects all values live-in at block @p blk and all phi results in this block.
229 * Then it adds the best values (at most n_regs) to the blocks start_workset.
230 * The phis among the remaining values get spilled: Introduce psudo-copies of
231 * their args to break interference and make it possible to spill them to the
234 static void compute_block_start_info(ir_node *blk, void *env) {
235 belady_env_t *bel = env;
236 block_info_t *blk_info;
237 ir_node *irn, *first;
239 int i, count, ws_count;
240 loc_t loc, *starters;
241 ir_graph *irg = get_irn_irg(blk);
246 /* Get all values living at the block start */
247 DBG((dbg, DBG_START, "Living at start of %+F:\n", blk));
248 first = sched_first(blk);
250 sched_foreach(blk, irn)
251 if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, -1) == bel->cls) {
253 loc.time = be_get_next_use(bel->uses, first, 0, irn, 0);
254 obstack_grow(&ob, &loc, sizeof(loc));
255 DBG((dbg, DBG_START, " %+F:\n", irn));
260 live_foreach(blk, li)
261 if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, -1) == bel->cls) {
262 loc.irn = (ir_node *)li->irn;
263 loc.time = be_get_next_use(bel->uses, first, 0, li->irn, 0);
264 obstack_grow(&ob, &loc, sizeof(loc));
265 DBG((dbg, DBG_START, " %+F:\n", irn));
268 starters = obstack_finish(&ob);
270 /* Sort all values */
271 qsort(starters, count, sizeof(starters[0]), loc_compare);
273 /* Create the start workset for this block. Copy the best ones from starters */
274 blk_info = obstack_alloc(&bel->ob, sizeof(*blk_info));
275 set_irn_link(blk, blk_info);
277 ws_count = MIN(count, bel->n_regs);
278 blk_info->ws_start = new_workset(&bel->ob, bel);
279 workset_bulk_fill(blk_info->ws_start, ws_count, starters);
281 /* Spill the phis among the remaining values */
282 for (i=ws_count; i<count; ++i) {
285 irn = starters[i].irn;
286 if (!is_Phi(irn) || get_nodes_block(irn) != blk)
289 DBG((dbg, DBG_START, "For %+F:\n", irn));
291 for (max=get_irn_arity(irn), o=0; o<max; ++o) {
292 ir_node *arg = get_irn_n(irn, o);
293 ir_node *pred_block = get_Block_cfgpred_block(get_nodes_block(irn), o);
294 ir_node *cpy = be_new_Copy(bel->cls, irg, pred_block, arg);
295 DBG((dbg, DBG_START, " place a %+F of %+F in %+F\n", cpy, arg, pred_block));
296 sched_add_before(pred_block, cpy);
297 set_irn_n(irn, o, cpy);
301 obstack_free(&ob, NULL);
305 * Performs the actions neccessary to grant the request that:
306 * - new_vals can be held in registers
307 * - as few as possible other values are disposed
308 * - the worst values get disposed
310 * @p is_usage indicates that the values in new_vals are used (not defined)
311 * In this case reloads must be performed
313 static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) {
315 int i, len, max_allowed, demand;
316 workset_t *ws = bel->ws;
317 ir_node **to_insert = alloca(bel->n_regs * sizeof(*to_insert));
320 * 1. Identify the number of needed slots and the values to reload
323 workset_foreach(new_vals, val) {
324 /* mark value as used */
326 pset_insert_ptr(bel->used, val);
328 if (!workset_contains(ws, val)) {
329 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
330 to_insert[demand++] = val;
332 be_add_reload(bel->senv, val, bel->instr);
334 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
336 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
340 * 2. Make room for at least 'demand' slots
342 len = workset_get_length(ws);
343 max_allowed = bel->n_regs - demand;
345 /* Only make more free room if we do not have enough */
346 if (len > max_allowed) {
347 /* get current next-use distance */
348 for (i=0; i<ws->len; ++i)
349 workset_set_time(ws, i, be_get_next_use(bel->uses, bel->instr, bel->instr_nr, workset_get_val(ws, i), !is_usage));
351 /* sort entries by increasing nextuse-distance*/
354 /* Logic for not needed live-ins: If a value is disposed
355 before its first usage, remove it from start workset */
356 for (i=max_allowed; i<ws->len; ++i) {
357 ir_node *irn = ws->vals[i].irn;
358 if (!pset_find_ptr(bel->used, irn)) {
359 ir_node *curr_bb = get_nodes_block(bel->instr);
360 workset_t *ws_start = ((block_info_t *) get_irn_link(curr_bb))->ws_start;
361 workset_remove(ws_start, irn);
363 DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
365 DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
368 /* kill the last 'demand' entries in the array */
369 workset_set_length(ws, max_allowed);
373 * 3. Insert the new values into the workset
375 workset_bulk_insert(bel->ws, demand, to_insert);
379 * For the given block @p blk, decide for each values
380 * whether it is used from a register or is reloaded
383 static void belady(ir_node *blk, void *env) {
384 belady_env_t *bel = env;
387 #ifdef SINGLE_START_PROJS
388 ir_node *start_blk = get_irg_start_block(get_irn_irg(blk));
390 block_info_t *blk_info = get_irn_link(blk);
392 DBG((dbg, DBG_DECIDE, "\n"));
393 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", blk));
395 workset_copy(bel->ws, blk_info->ws_start);
396 DBG((dbg, DBG_WSETS, "Initial start workset for %+F:\n", blk));
397 workset_foreach(bel->ws, irn)
398 DBG((dbg, DBG_WSETS, " %+F\n", irn));
400 /* process the block from start to end */
401 DBG((dbg, DBG_WSETS, "Processing...\n"));
402 bel->used = pset_new_ptr(32);
404 new_vals = new_workset(&bel->ob, bel);
405 sched_foreach(blk, irn) {
406 assert(workset_get_length(bel->ws) <= bel->n_regs && "Too much values in workset!");
409 #ifdef SINGLE_START_PROJS
411 (is_Proj(irn) && blk!=start_blk) ||
412 (get_irn_mode(irn) == mode_T && blk==start_blk)) {
413 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
417 /* projs are handled with the tuple value.
418 * Phis are no real instr (see insert_starters)
419 * instr_nr does not increase */
420 if (is_Proj(irn) || is_Phi(irn)) {
421 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
425 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
427 /* set instruction in the workset */
430 /* allocate all values _used_ by this instruction */
431 workset_clear(new_vals);
432 workset_bulk_insert(new_vals, get_irn_arity(irn)+1, get_irn_in(irn));
433 displace(bel, new_vals, 1);
435 /* allocate all values _defined_ by this instruction */
436 workset_clear(new_vals);
437 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
439 for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
440 workset_insert(new_vals, proj);
442 workset_insert(new_vals, irn);
444 displace(bel, new_vals, 0);
450 /* Remember end-workset for this block */
451 blk_info->ws_end = workset_clone(&bel->ob, bel->ws);
452 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", blk));
453 workset_foreach(blk_info->ws_start, irn)
454 DBG((dbg, DBG_WSETS, " %+F\n", irn));
455 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", blk));
456 workset_foreach(blk_info->ws_end, irn)
457 DBG((dbg, DBG_WSETS, " %+F\n", irn));
461 * 'decide' is block-local and makes assumptions
462 * about the set of live-ins. Thus we must adapt the
463 * live-outs to the live-ins at each block-border.
465 static void fix_block_borders(ir_node *blk, void *env) {
467 belady_env_t *bel = env;
470 DBG((dbg, DBG_FIX, "\n"));
471 DBG((dbg, DBG_FIX, "Fixing %+F\n", blk));
473 wsb = ((block_info_t *)get_irn_link(blk))->ws_start;
475 /* process all pred blocks */
476 for (i=0, max=get_irn_arity(blk); i<max; ++i) {
477 ir_node *irnb, *irnp, *pred = get_Block_cfgpred_block(blk, i);
478 workset_t *wsp = ((block_info_t *)get_irn_link(pred))->ws_end;
480 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
482 workset_foreach(wsb, irnb) {
483 /* if irnb is a phi of the current block we reload
484 * the corresponding argument, else irnb itself */
485 if(is_Phi(irnb) && blk == get_nodes_block(irnb))
486 irnb = get_irn_n(irnb, i);
488 /* Unknowns are available everywhere */
489 if(get_irn_opcode(irnb) == iro_Unknown)
492 /* check if irnb is in a register at end of pred */
493 workset_foreach(wsp, irnp)
497 /* irnb is in memory at the end of pred, so we have to reload it */
498 DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
499 be_add_reload_on_edge(bel->senv, irnb, blk, i);
502 /*epsilon statement :)*/;
508 * Removes all used reloads from bel->reloads.
509 * The remaining nodes in bel->reloads will be removed from the graph.
511 static void rescue_used_reloads_and_remove_copies(ir_node *irn, void *env) {
512 pset *rlds = (pset *)env;
513 if (pset_find_ptr(rlds, irn))
514 pset_remove_ptr(rlds, irn);
517 /* remove copies introduced for phi-spills */
518 if (be_is_Copy(irn)) {
519 ir_node *src, *spill;
520 assert(get_irn_n_edges(irn) == 1 && "This is not a copy introduced in 'compute_block_start_info()'. Who created it?");
522 spill = get_irn_edge(get_irn_irg(irn), irn, 0)->src;
523 assert(be_is_Spill(spill) && "This is not a copy introduced in 'compute_block_start_info()'. Who created it?");
525 src = get_irn_n(irn, 0);
526 set_irn_n(spill, 0, src);
531 * Finds all unused reloads and remove them from the schedule
532 * Also removes spills if they are not used anymore after removing reloads
534 static void remove_copies_and_unused_reloads(ir_graph *irg, belady_env_t *bel) {
537 irg_walk_graph(irg, rescue_used_reloads_and_remove_copies, NULL, bel->reloads);
538 for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads)) {
540 DBG((dbg, DBG_SPILL, "Removing %+F before %+F in %+F\n", irn, sched_next(irn), get_nodes_block(irn)));
542 spill = get_irn_n(irn, 0);
545 set_irn_n(irn, 0, new_Bad());
548 /* if spill not used anymore, remove it too
549 * test of regclass is necessary since spill may be a phi-M */
550 if (get_irn_n_edges(spill) == 0 && bel->cls == arch_get_irn_reg_class(bel->arch, spill, -1)) {
551 set_irn_n(spill, 0, new_Bad());
557 void be_spill_belady(const be_chordal_env_t *chordal_env) {
560 dbg = firm_dbg_register("ir.be.spillbelady");
561 firm_dbg_set_mask(dbg, DEBUG_LVL);
563 /* init belady env */
564 obstack_init(&bel.ob);
565 bel.arch = chordal_env->main_env->arch_env;
566 bel.cls = chordal_env->cls;
567 bel.n_regs = arch_register_class_n_regs(bel.cls);
568 bel.ws = new_workset(&bel.ob, &bel);
569 bel.uses = be_begin_uses(chordal_env->irg, chordal_env->main_env->arch_env, bel.cls);
570 bel.senv = be_new_spill_env(dbg, chordal_env, is_mem_phi, NULL);
571 bel.reloads = pset_new_ptr_default();
574 irg_block_walk_graph(chordal_env->irg, compute_block_start_info, NULL, &bel);
575 irg_block_walk_graph(chordal_env->irg, belady, NULL, &bel);
576 irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &bel);
577 be_insert_spills_reloads(bel.senv, bel.reloads);
578 remove_copies_and_unused_reloads(chordal_env->irg, &bel);
582 del_pset(bel.reloads);
583 be_delete_spill_env(bel.senv);
584 be_end_uses(bel.uses);
585 obstack_free(&bel.ob, NULL);