4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irprintf_t.h"
28 #include "iredges_t.h"
34 #include "bespillbelady.h"
36 #include "besched_t.h"
40 #include "bechordal_t.h"
49 #define DBG_WORKSET 128
50 #define DEBUG_LVL 0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
51 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
53 typedef struct _workset_t workset_t;
55 typedef struct _belady_env_t {
57 const be_chordal_env_t *cenv;
58 const arch_env_t *arch;
59 const arch_register_class_t *cls;
60 int n_regs; /** number of regs in this reg-class */
62 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
63 be_uses_t *uses; /**< env for the next-use magic */
64 ir_node *instr; /**< current instruction */
65 unsigned instr_nr; /**< current instruction number (relative to block start) */
68 spill_env_t *senv; /**< see bespill.h */
72 int len; /**< current length */
73 loc_t vals[0]; /**< inlined array of the values/distances in this working set */
76 void workset_print(const workset_t *w)
80 for(i = 0; i < w->len; ++i) {
81 ir_printf("%+F %d\n", w->vals[i].irn, w->vals[i].time);
86 * Alloc a new workset on obstack @p ob with maximum size @p max
88 static INLINE workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
90 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
91 res = obstack_alloc(ob, size);
97 * Alloc a new instance on obstack and make it equal to @param ws
99 static INLINE workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
101 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
102 res = obstack_alloc(ob, size);
103 memcpy(res, ws, size);
108 * Do NOT alloc anything. Make @param tgt equal to @param src.
109 * returns @param tgt for convenience
111 static INLINE workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
112 size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
113 memcpy(tgt, src, size);
118 * Overwrites the current content array of @param ws with the
119 * @param count locations given at memory @param locs.
120 * Set the length of @param ws to count.
122 static INLINE void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
123 workset->len = count;
124 memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
128 * Inserts the value @p val into the workset, iff it is not
129 * already contained. The workset must not be full.
131 static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
133 /* check for current regclass */
134 if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
135 DBG((dbg, DBG_WORKSET, "Dropped %+F\n", val));
139 /* check if val is already contained */
140 for(i=0; i<ws->len; ++i)
141 if (ws->vals[i].irn == val)
145 assert(ws->len < env->n_regs && "Workset already full!");
146 ws->vals[ws->len++].irn = val;
150 * Removes all entries from this workset
152 static INLINE void workset_clear(workset_t *ws) {
157 * Removes the value @p val from the workset if present.
159 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
161 for(i=0; i<ws->len; ++i) {
162 if (ws->vals[i].irn == val) {
163 ws->vals[i] = ws->vals[--ws->len];
169 static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
171 for(i=0; i<ws->len; ++i) {
172 if (ws->vals[i].irn == val)
180 * Iterates over all values in the working set.
181 * @p ws The workset to iterate
182 * @p v A variable to put the current value in
183 * @p i An integer for internal use
185 #define workset_foreach(ws, v, i) for(i=0; \
186 v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
189 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
190 #define workset_set_length(ws, length) (ws)->len = length
191 #define workset_get_length(ws) ((ws)->len)
192 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
193 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
195 typedef struct _block_info_t {
196 workset_t *ws_start, *ws_end;
201 static INLINE void *new_block_info(struct obstack *ob) {
202 block_info_t *res = obstack_alloc(ob, sizeof(*res));
203 res->ws_start = NULL;
210 #define get_block_info(blk) ((block_info_t *)get_irn_link(blk))
211 #define set_block_info(blk, info) set_irn_link(blk, info)
214 * @return The distance to the next use or 0 if irn has dont_spill flag set
216 static INLINE unsigned get_distance(belady_env_t *env, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
218 int flags = arch_irn_get_flags(env->arch, def);
219 unsigned dist = be_get_next_use(env->uses, from, from_step, def, skip_from_uses);
221 assert(! (flags & arch_irn_flags_ignore));
222 // we have to keep nonspillable nodes in the workingset
223 if(flags & arch_irn_flags_dont_spill)
230 * Performs the actions necessary to grant the request that:
231 * - new_vals can be held in registers
232 * - as few as possible other values are disposed
233 * - the worst values get disposed
235 * @p is_usage indicates that the values in new_vals are used (not defined)
236 * In this case reloads must be performed
238 static void displace(belady_env_t *env, workset_t *new_vals, int is_usage) {
240 int i, len, max_allowed, demand, iter;
241 workset_t *ws = env->ws;
242 ir_node **to_insert = alloca(env->n_regs * sizeof(*to_insert));
245 * 1. Identify the number of needed slots and the values to reload
248 workset_foreach(new_vals, val, iter) {
249 /* mark value as used */
251 pset_insert_ptr(env->used, val);
253 if (!workset_contains(ws, val)) {
254 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
255 to_insert[demand++] = val;
257 be_add_reload(env->senv, val, env->instr);
259 assert(is_usage || "Defined value already in workset?!?");
260 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
263 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
266 * 2. Make room for at least 'demand' slots
268 len = workset_get_length(ws);
269 max_allowed = env->n_regs - demand;
271 DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
273 /* Only make more free room if we do not have enough */
274 if (len > max_allowed) {
275 /* get current next-use distance */
276 for (i=0; i<ws->len; ++i)
277 workset_set_time(ws, i, get_distance(env, env->instr, env->instr_nr, workset_get_val(ws, i), !is_usage));
279 /* sort entries by increasing nextuse-distance*/
282 /* Logic for not needed live-ins: If a value is disposed
283 * before its first usage, remove it from start workset
284 * We don't do this for phis though
286 for (i=max_allowed; i<ws->len; ++i) {
287 ir_node *irn = ws->vals[i].irn;
292 if (!pset_find_ptr(env->used, irn)) {
293 ir_node *curr_bb = get_nodes_block(env->instr);
294 workset_t *ws_start = get_block_info(curr_bb)->ws_start;
295 workset_remove(ws_start, irn);
297 DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
299 DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
303 /* kill the last 'demand' entries in the array */
304 workset_set_length(ws, max_allowed);
308 * 3. Insert the new values into the workset
310 for(i = 0; i < demand; ++i)
311 workset_insert(env, env->ws, to_insert[i]);
314 static void belady(ir_node *blk, void *env);
317 * Computes set of live-ins for each block with multiple predecessors
318 * and notifies spill algorithm which phis need to be spilled
320 static void spill_phi_walker(ir_node *block, void *data) {
321 belady_env_t *env = data;
322 block_info_t *block_info;
323 ir_node *first, *irn;
324 loc_t loc, *starters;
325 int i, len, ws_count;
327 if(get_Block_n_cfgpreds(block) == 1 && get_irg_start_block(get_irn_irg(block)) != block)
330 block_info = new_block_info(&env->ob);
331 set_block_info(block, block_info);
333 /* Collect all values living at start of block */
334 starters = NEW_ARR_F(loc_t, 0);
336 DBG((dbg, DBG_START, "Living at start of %+F:\n", block));
337 first = sched_first(block);
338 sched_foreach(block, irn) {
341 if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, irn))
345 loc.time = get_distance(env, first, 0, irn, 0);
346 ARR_APP1(loc_t, starters, loc);
347 DBG((dbg, DBG_START, " %+F:\n", irn));
350 be_lv_foreach(env->cenv->lv, block, be_lv_state_in, i) {
351 ir_node *irn = be_lv_get_irn(env->cenv->lv, block, i);
352 if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, irn))
356 loc.time = get_distance(env, first, 0, irn, 0);
357 ARR_APP1(loc_t, starters, loc);
358 DBG((dbg, DBG_START, " %+F:\n", irn));
361 // Sort start values by first use
362 qsort(starters, ARR_LEN(starters), sizeof(starters[0]), loc_compare);
364 /* Copy the best ones from starters to start workset */
365 ws_count = MIN(ARR_LEN(starters), env->n_regs);
366 block_info->ws_start = new_workset(env, &env->ob);
367 workset_bulk_fill(block_info->ws_start, ws_count, starters);
369 /* The phis of this block which are not in the start set have to be spilled later. */
370 for (i = ws_count, len = ARR_LEN(starters); i < len; ++i) {
371 irn = starters[i].irn;
372 if (!is_Phi(irn) || get_nodes_block(irn) != block)
375 be_spill_phi(env->senv, irn);
382 * Collects all values live-in at block @p blk and all phi results in this block.
383 * Then it adds the best values (at most n_regs) to the blocks start_workset.
384 * The phis among the remaining values get spilled: Introduce psudo-copies of
385 * their args to break interference and make it possible to spill them to the
388 static block_info_t *compute_block_start_info(belady_env_t *env, ir_node *block) {
390 block_info_t *res, *pred_info;
392 /* Have we seen this block before? */
393 res = get_block_info(block);
397 /* Create the block info for this block. */
398 res = new_block_info(&env->ob);
399 set_block_info(block, res);
401 /* Use endset of predecessor block as startset */
402 assert(get_Block_n_cfgpreds(block) == 1 && block != get_irg_start_block(get_irn_irg(block)));
403 pred_block = get_Block_cfgpred_block(block, 0);
404 pred_info = get_block_info(pred_block);
406 /* if pred block has not been processed yet, do it now */
407 if (pred_info == NULL || pred_info->processed == 0) {
408 belady(pred_block, env);
409 pred_info = get_block_info(pred_block);
412 /* now we have an end_set of pred */
413 assert(pred_info->ws_end && "The recursive call (above) is supposed to compute an end_set");
414 res->ws_start = workset_clone(env, &env->ob, pred_info->ws_end);
421 * For the given block @p blk, decide for each values
422 * whether it is used from a register or is reloaded
425 static void belady(ir_node *block, void *data) {
426 belady_env_t *env = data;
430 block_info_t *block_info;
432 /* make sure we have blockinfo (with startset) */
433 block_info = get_block_info(block);
434 if (block_info == NULL)
435 block_info = compute_block_start_info(env, block);
437 /* Don't do a block twice */
438 if(block_info->processed)
441 /* get the starting workset for this block */
442 DBG((dbg, DBG_DECIDE, "\n"));
443 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", block));
445 workset_copy(env, env->ws, block_info->ws_start);
446 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", block));
447 workset_foreach(env->ws, irn, iter)
448 DBG((dbg, DBG_WSETS, " %+F\n", irn));
450 /* process the block from start to end */
451 DBG((dbg, DBG_WSETS, "Processing...\n"));
452 env->used = pset_new_ptr_default();
454 new_vals = new_workset(env, &env->ob);
455 sched_foreach(block, irn) {
457 assert(workset_get_length(env->ws) <= env->n_regs && "Too much values in workset!");
459 /* projs are handled with the tuple value.
460 * Phis are no real instr (see insert_starters())
461 * instr_nr does not increase */
462 if (is_Proj(irn) || is_Phi(irn)) {
463 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
466 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
468 /* set instruction in the workset */
471 /* allocate all values _used_ by this instruction */
472 workset_clear(new_vals);
473 for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
474 workset_insert(env, new_vals, get_irn_n(irn, i));
476 displace(env, new_vals, 1);
478 /* allocate all values _defined_ by this instruction */
479 workset_clear(new_vals);
480 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
482 for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
483 workset_insert(env, new_vals, proj);
485 workset_insert(env, new_vals, irn);
487 displace(env, new_vals, 0);
493 /* Remember end-workset for this block */
494 block_info->ws_end = workset_clone(env, &env->ob, env->ws);
495 block_info->processed = 1;
496 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", block));
497 workset_foreach(block_info->ws_end, irn, iter)
498 DBG((dbg, DBG_WSETS, " %+F\n", irn));
502 * 'decide' is block-local and makes assumptions
503 * about the set of live-ins. Thus we must adapt the
504 * live-outs to the live-ins at each block-border.
506 static void fix_block_borders(ir_node *blk, void *data) {
507 belady_env_t *env = data;
509 int i, max, iter, iter2;
511 DBG((dbg, DBG_FIX, "\n"));
512 DBG((dbg, DBG_FIX, "Fixing %+F\n", blk));
514 wsb = get_block_info(blk)->ws_start;
516 /* process all pred blocks */
517 for (i=0, max=get_irn_arity(blk); i<max; ++i) {
518 ir_node *irnb, *irnp, *pred = get_Block_cfgpred_block(blk, i);
519 workset_t *wsp = get_block_info(pred)->ws_end;
521 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
523 workset_foreach(wsb, irnb, iter) {
524 /* if irnb is a phi of the current block we reload
525 * the corresponding argument, else irnb itself */
526 if(is_Phi(irnb) && blk == get_nodes_block(irnb)) {
527 irnb = get_irn_n(irnb, i);
529 // we might have unknowns as argument for the phi
530 if(!arch_irn_consider_in_reg_alloc(env->arch, env->cls, irnb))
534 /* Unknowns are available everywhere */
535 if(get_irn_opcode(irnb) == iro_Unknown)
538 /* check if irnb is in a register at end of pred */
539 workset_foreach(wsp, irnp, iter2) {
544 /* irnb is not in memory at the end of pred, so we have to reload it */
545 DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
546 be_add_reload_on_edge(env->senv, irnb, blk, i);
549 /*epsilon statement :)*/;
554 void be_spill_belady(const be_chordal_env_t *chordal_env) {
555 be_spill_belady_spill_env(chordal_env, NULL);
558 void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t *spill_env) {
561 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady");
562 //firm_dbg_set_mask(dbg, DBG_WSETS);
564 /* init belady env */
565 obstack_init(&env.ob);
566 env.cenv = chordal_env;
567 env.arch = chordal_env->birg->main_env->arch_env;
568 env.cls = chordal_env->cls;
569 env.n_regs = arch_count_non_ignore_regs(env.arch, env.cls);
570 env.ws = new_workset(&env, &env.ob);
571 env.uses = be_begin_uses(chordal_env->irg, chordal_env->lv, chordal_env->birg->main_env->arch_env, env.cls);
572 if(spill_env == NULL) {
573 env.senv = be_new_spill_env(chordal_env);
575 env.senv = spill_env;
577 DEBUG_ONLY(be_set_spill_env_dbg_module(env.senv, dbg);)
579 DBG((dbg, LEVEL_1, "running on register class: %s\n", env.cls->name));
581 be_clear_links(chordal_env->irg);
582 /* Decide which phi nodes will be spilled and place copies for them into the graph */
583 irg_block_walk_graph(chordal_env->irg, spill_phi_walker, NULL, &env);
584 /* Fix high register pressure with belady algorithm */
585 irg_block_walk_graph(chordal_env->irg, NULL, belady, &env);
586 /* belady was block-local, fix the global flow by adding reloads on the edges */
587 irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &env);
588 /* Insert spill/reload nodes into the graph and fix usages */
589 be_insert_spills_reloads(env.senv);
591 be_remove_dead_nodes_from_schedule(chordal_env->irg);
592 be_liveness_recompute(chordal_env->lv);
595 if(spill_env == NULL)
596 be_delete_spill_env(env.senv);
597 be_end_uses(env.uses);
598 obstack_free(&env.ob, NULL);