4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irprintf_t.h"
28 #include "iredges_t.h"
34 #include "bespillbelady.h"
36 #include "besched_t.h"
40 #include "bechordal_t.h"
49 #define DBG_WORKSET 128
50 #define DEBUG_LVL 0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
51 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
53 typedef struct _workset_t workset_t;
55 typedef struct _belady_env_t {
57 const arch_env_t *arch;
58 const arch_register_class_t *cls;
59 int n_regs; /** number of regs in this reg-class */
61 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
62 be_uses_t *uses; /**< env for the next-use magic */
63 ir_node *instr; /**< current instruction */
64 unsigned instr_nr; /**< current instruction number (relative to block start) */
65 pset *used; /**< holds the values used (so far) in the current BB */
67 spill_env_t *senv; /**< see bespill.h */
72 int len; /**< current length */
73 loc_t vals[0]; /**< inlined array of the values/distances in this working set */
76 void workset_print(const workset_t *w)
80 for(i = 0; i < w->len; ++i) {
81 ir_printf("%+F %d\n", w->vals[i].irn, w->vals[i].time);
86 * Alloc a new workset on obstack @p ob with maximum size @p max
88 static INLINE workset_t *new_workset(struct obstack *ob, belady_env_t *bel) {
90 size_t size = sizeof(*res) + (bel->n_regs)*sizeof(res->vals[0]);
91 res = obstack_alloc(ob, size);
98 * Alloc a new instance on obstack and make it equal to @param ws
100 static INLINE workset_t *workset_clone(struct obstack *ob, workset_t *ws) {
102 size_t size = sizeof(*res) + (ws->bel->n_regs)*sizeof(res->vals[0]);
103 res = obstack_alloc(ob, size);
104 memcpy(res, ws, size);
109 * Do NOT alloc anything. Make @param tgt equal to @param src.
110 * returns @param tgt for convinience
112 static INLINE workset_t *workset_copy(workset_t *tgt, workset_t *src) {
113 size_t size = sizeof(*src) + (src->bel->n_regs)*sizeof(src->vals[0]);
114 memcpy(tgt, src, size);
119 * Overwrites the current content array of @param ws with the
120 * @param count locations given at memory @param locs.
121 * Set the length of @param ws to count.
123 #define workset_bulk_fill(ws, count, locs) memcpy(&(ws)->vals[0], locs, ((ws)->len=count)*sizeof(locs[0]));
127 * Inserts the value @p val into the workset, iff it is not
128 * already contained. The workset must not be full.
130 static INLINE void workset_insert(workset_t *ws, ir_node *val) {
132 /* check for current regclass */
133 if (!arch_irn_consider_in_reg_alloc(ws->bel->arch, ws->bel->cls, val)) {
134 DBG((dbg, DBG_WORKSET, "Dropped %+F\n", val));
138 /* check if val is already contained */
139 for(i=0; i<ws->len; ++i)
140 if (ws->vals[i].irn == val)
144 assert(ws->len < ws->bel->n_regs && "Workset already full!");
145 ws->vals[ws->len++].irn = val;
149 * Removes all entries from this workset
151 #define workset_clear(ws) (ws)->len = 0;
154 * Removes the value @p val from the workset if present.
156 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
158 for(i=0; i<ws->len; ++i) {
159 if (ws->vals[i].irn == val) {
160 ws->vals[i] = ws->vals[--ws->len];
166 static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
168 for(i=0; i<ws->len; ++i) {
169 if (ws->vals[i].irn == val)
177 * Iterates over all values in the working set.
178 * @p ws The workset to iterate
179 * @p v A variable to put the current value in
180 * @p i An integer for internal use
182 #define workset_foreach(ws, v, i) for(i=0; \
183 v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
186 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
187 #define workset_set_length(ws, length) (ws)->len = length
188 #define workset_get_length(ws) ((ws)->len)
189 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
190 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
192 typedef struct _block_info_t {
193 workset_t *ws_start, *ws_end;
197 static INLINE void *new_block_info(struct obstack *ob) {
198 block_info_t *res = obstack_alloc(ob, sizeof(*res));
199 res->ws_start = NULL;
205 #define get_block_info(blk) ((block_info_t *)get_irn_link(blk))
206 #define set_block_info(blk, info) set_irn_link(blk, info)
209 * @return The distance to the next use or 0 if irn has dont_spill flag set
211 static INLINE unsigned get_distance(belady_env_t *env, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
213 int flags = arch_irn_get_flags(env->arch, def);
214 unsigned dist = be_get_next_use(env->uses, from, from_step, def, skip_from_uses);
216 assert(! (flags & arch_irn_flags_ignore));
217 // we have to keep nonspillable nodes in the workingset
218 if(flags & arch_irn_flags_dont_spill)
225 * Performs the actions necessary to grant the request that:
226 * - new_vals can be held in registers
227 * - as few as possible other values are disposed
228 * - the worst values get disposed
230 * @p is_usage indicates that the values in new_vals are used (not defined)
231 * In this case reloads must be performed
233 static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) {
235 int i, len, max_allowed, demand, iter;
236 workset_t *ws = bel->ws;
237 ir_node **to_insert = alloca(bel->n_regs * sizeof(*to_insert));
240 * 1. Identify the number of needed slots and the values to reload
243 workset_foreach(new_vals, val, iter) {
244 /* mark value as used */
246 pset_insert_ptr(bel->used, val);
248 if (!workset_contains(ws, val)) {
249 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
250 to_insert[demand++] = val;
252 be_add_reload(bel->senv, val, bel->instr);
254 assert(is_usage || "Defined value already in workset?!?");
255 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
258 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
261 * 2. Make room for at least 'demand' slots
263 len = workset_get_length(ws);
264 max_allowed = bel->n_regs - demand;
266 DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
268 /* Only make more free room if we do not have enough */
269 if (len > max_allowed) {
270 /* get current next-use distance */
271 for (i=0; i<ws->len; ++i)
272 workset_set_time(ws, i, get_distance(bel, bel->instr, bel->instr_nr, workset_get_val(ws, i), !is_usage));
274 /* sort entries by increasing nextuse-distance*/
277 /* Logic for not needed live-ins: If a value is disposed
278 before its first usage, remove it from start workset */
279 for (i=max_allowed; i<ws->len; ++i) {
280 ir_node *irn = ws->vals[i].irn;
282 if (!pset_find_ptr(bel->used, irn)) {
283 ir_node *curr_bb = get_nodes_block(bel->instr);
284 workset_t *ws_start = get_block_info(curr_bb)->ws_start;
285 workset_remove(ws_start, irn);
287 DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
289 DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
293 /* kill the last 'demand' entries in the array */
294 workset_set_length(ws, max_allowed);
298 * 3. Insert the new values into the workset
300 for(i = 0; i < demand; ++i)
301 workset_insert(bel->ws, to_insert[i]);
304 static void belady(ir_node *blk, void *env);
307 * Collects all values live-in at block @p blk and all phi results in this block.
308 * Then it adds the best values (at most n_regs) to the blocks start_workset.
309 * The phis among the remaining values get spilled: Introduce psudo-copies of
310 * their args to break interference and make it possible to spill them to the
313 static block_info_t *compute_block_start_info(ir_node *blk, void *data) {
314 belady_env_t *env = data;
315 ir_node *irn, *first;
318 loc_t loc, *starters;
320 block_info_t *res = get_block_info(blk);
322 /* Have we seen this block before? */
326 /* Create the block info for this block. */
327 res = new_block_info(&env->ob);
328 set_block_info(blk, res);
330 /* Get all values living at the block start sorted by next use*/
333 DBG((dbg, DBG_START, "Living at start of %+F:\n", blk));
334 first = sched_first(blk);
336 sched_foreach(blk, irn) {
337 if(!is_Phi(irn) || !arch_irn_consider_in_reg_alloc(env->arch, env->cls, irn))
341 loc.time = get_distance(env, first, 0, irn, 0);
342 obstack_grow(&ob, &loc, sizeof(loc));
343 DBG((dbg, DBG_START, " %+F:\n", irn));
347 live_foreach(blk, li) {
348 if (!live_is_in(li) || !arch_irn_consider_in_reg_alloc(env->arch, env->cls, li->irn))
351 loc.irn = (ir_node *)li->irn;
352 loc.time = get_distance(env, first, 0, li->irn, 0);
353 obstack_grow(&ob, &loc, sizeof(loc));
354 DBG((dbg, DBG_START, " %+F:\n", li->irn));
358 starters = obstack_finish(&ob);
359 qsort(starters, count, sizeof(starters[0]), loc_compare);
362 /* If we have only one predecessor, we want the start_set of blk to be the end_set of pred */
363 if (get_Block_n_cfgpreds(blk) == 1 && blk != get_irg_start_block(get_irn_irg(blk))) {
364 ir_node *pred_blk = get_Block_cfgpred_block(blk, 0);
365 block_info_t *pred_info = get_block_info(pred_blk);
367 /* if pred block has not been processed yet, do it now */
369 belady(pred_blk, env);
370 pred_info = get_block_info(pred_blk);
373 /* now we have an end_set of pred */
374 assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
375 res->ws_start = workset_clone(&env->ob, pred_info->ws_end);
379 /* Else we want the start_set to be the values used 'the closest' */
380 /* Copy the best ones from starters to start workset */
381 ws_count = MIN(count, env->n_regs);
382 res->ws_start = new_workset(&env->ob, env);
383 workset_bulk_fill(res->ws_start, ws_count, starters);
386 /* The phis of this block which are not in the start set have to be spilled later.
387 * Therefore we add temporary copies in the pred_blocks so the spills can spill
388 * into the same spill slot.
389 * After spilling these copies get deleted.
391 for (i=workset_get_length(res->ws_start); i<count; ++i) {
392 irn = starters[i].irn;
393 if (!is_Phi(irn) || get_nodes_block(irn) != blk)
396 be_spill_phi(env->senv, irn);
401 obstack_free(&ob, NULL);
407 * For the given block @p blk, decide for each values
408 * whether it is used from a register or is reloaded
411 static void belady(ir_node *blk, void *env) {
412 belady_env_t *bel = env;
416 block_info_t *blk_info;
418 /* Don't do a block twice */
419 if (get_block_info(blk))
422 /* get the starting workset for this block */
423 blk_info = compute_block_start_info(blk, bel);
425 DBG((dbg, DBG_DECIDE, "\n"));
426 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", blk));
428 workset_copy(bel->ws, blk_info->ws_start);
429 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", blk));
430 workset_foreach(bel->ws, irn, iter)
431 DBG((dbg, DBG_WSETS, " %+F\n", irn));
433 /* process the block from start to end */
434 DBG((dbg, DBG_WSETS, "Processing...\n"));
435 bel->used = pset_new_ptr(32);
437 new_vals = new_workset(&bel->ob, bel);
438 sched_foreach(blk, irn) {
440 assert(workset_get_length(bel->ws) <= bel->n_regs && "Too much values in workset!");
442 /* projs are handled with the tuple value.
443 * Phis are no real instr (see insert_starters())
444 * instr_nr does not increase */
445 if (is_Proj(irn) || is_Phi(irn)) {
446 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
449 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
451 /* set instruction in the workset */
454 /* allocate all values _used_ by this instruction */
455 workset_clear(new_vals);
456 for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
457 workset_insert(new_vals, get_irn_n(irn, i));
459 displace(bel, new_vals, 1);
461 /* allocate all values _defined_ by this instruction */
462 workset_clear(new_vals);
463 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
465 for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
466 workset_insert(new_vals, proj);
468 workset_insert(new_vals, irn);
470 displace(bel, new_vals, 0);
476 /* Remember end-workset for this block */
477 blk_info->ws_end = workset_clone(&bel->ob, bel->ws);
478 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", blk));
479 workset_foreach(blk_info->ws_end, irn, iter)
480 DBG((dbg, DBG_WSETS, " %+F\n", irn));
484 * 'decide' is block-local and makes assumptions
485 * about the set of live-ins. Thus we must adapt the
486 * live-outs to the live-ins at each block-border.
488 static void fix_block_borders(ir_node *blk, void *env) {
490 belady_env_t *bel = env;
491 int i, max, iter, iter2;
493 DBG((dbg, DBG_FIX, "\n"));
494 DBG((dbg, DBG_FIX, "Fixing %+F\n", blk));
496 wsb = get_block_info(blk)->ws_start;
498 /* process all pred blocks */
499 for (i=0, max=get_irn_arity(blk); i<max; ++i) {
500 ir_node *irnb, *irnp, *pred = get_Block_cfgpred_block(blk, i);
501 workset_t *wsp = get_block_info(pred)->ws_end;
503 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
505 workset_foreach(wsb, irnb, iter) {
506 /* if irnb is a phi of the current block we reload
507 * the corresponding argument, else irnb itself */
508 if(is_Phi(irnb) && blk == get_nodes_block(irnb))
509 irnb = get_irn_n(irnb, i);
511 /* Unknowns are available everywhere */
512 if(get_irn_opcode(irnb) == iro_Unknown)
515 /* check if irnb is in a register at end of pred */
516 workset_foreach(wsp, irnp, iter2) {
521 /* irnb is not in memory at the end of pred, so we have to reload it */
522 DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
523 be_add_reload_on_edge(bel->senv, irnb, blk, i);
526 /*epsilon statement :)*/;
531 void be_spill_belady(const be_chordal_env_t *chordal_env) {
532 be_spill_belady_spill_env(chordal_env, NULL);
535 void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t *spill_env) {
538 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
540 /* init belady env */
541 obstack_init(&bel.ob);
542 bel.arch = chordal_env->birg->main_env->arch_env;
543 bel.cls = chordal_env->cls;
544 bel.n_regs = arch_count_non_ignore_regs(bel.arch, bel.cls);
545 bel.ws = new_workset(&bel.ob, &bel);
546 bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls);
547 if(spill_env == NULL) {
548 bel.senv = be_new_spill_env(chordal_env);
550 bel.senv = spill_env;
552 DEBUG_ONLY(be_set_spill_env_dbg_module(bel.senv, dbg);)
554 DBG((dbg, LEVEL_1, "running on register class: %s\n", bel.cls->name));
557 be_clear_links(chordal_env->irg);
558 irg_block_walk_graph(chordal_env->irg, NULL, belady, &bel);
559 irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &bel);
560 be_insert_spills_reloads(bel.senv);
562 be_remove_dead_nodes_from_schedule(chordal_env->irg);
565 if(spill_env == NULL)
566 be_delete_spill_env(bel.senv);
567 be_end_uses(bel.uses);
568 obstack_free(&bel.ob, NULL);