4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
17 #include "iredges_t.h"
21 #include "bespillbelady.h"
23 #include "besched_t.h"
28 #define MIN(a,b) (((a)<(b))?(a):(b))
35 #define DEBUG_LVL (DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
36 static firm_dbg_module_t *dbg = NULL;
39 typedef struct _workset_t workset_t;
40 typedef struct _block_info_t block_info_t;
41 typedef struct _reloader_t reloader_t;
42 typedef struct _spill_info_t spill_info_t;
43 typedef struct _spill_ctx_t spill_ctx_t;
44 typedef struct _belady_env_t belady_env_t;
48 int i; /**< used for iteration */
49 int len; /**< current length */
50 loc_t vals[1]; /**< inlined array of the values/distances in this working set */
53 struct _block_info_t {
54 workset_t *ws_start, *ws_end;
62 struct _spill_info_t {
63 ir_node *spilled_node;
64 reloader_t *reloaders;
68 ir_node *spilled; /**< The spilled node. */
69 ir_node *user; /**< The node this spill is for. */
70 ir_node *spill; /**< The spill itself. */
73 struct _belady_env_t {
75 const be_main_session_env_t *session;
76 const be_node_factory_t *factory;
77 const arch_env_t *arch;
78 const arch_register_class_t *cls;
79 int n_regs; /** number of regs in this reg-class */
81 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
82 be_uses_t *uses; /**< env for the next-use magic */
83 ir_node *instr; /**< current instruction */
84 unsigned instr_nr; /**< current instruction number (relative to block start) */
85 pset *used; /**< holds the values used (so far) in the current BB */
86 set *spills; /**< all spill_info_t's, which must be placed */
88 set *spill_ctxs; /**< all spill contexts (multiple spilling and stop recursion) */
89 pset *mem_phis; /**< all phis which must be converted to memory phis */
90 pset *reloads; /**< all reload nodes placed */
93 static int set_cmp_spillinfo(const void *x, const void *y, size_t size) {
94 const spill_info_t *xx = x;
95 const spill_info_t *yy = y;
96 return ! (xx->spilled_node == yy->spilled_node);
99 static int set_cmp_spillctx(const void *a, const void *b, size_t n) {
100 const spill_ctx_t *p = a;
101 const spill_ctx_t *q = b;
102 return !(p->user == q->user && p->spilled == q->spilled);
106 * Alloc a new workset on obstack @p ob with maximum size @p max
108 static INLINE workset_t *new_workset(struct obstack *ob, belady_env_t *bel) {
110 size_t size = sizeof(*res) + (bel->n_regs-1)*sizeof(res->vals[0]);
111 res = obstack_alloc(ob, size);
112 memset(res, 0, size);
117 static INLINE workset_t *workset_clone(struct obstack *ob, workset_t *ws) {
119 size_t size = sizeof(*res) + (ws->bel->n_regs-1)*sizeof(res->vals[0]);
120 res = obstack_alloc(ob, size);
121 memcpy(res, ws, size);
126 * Inserts the value @p val into the workset, iff it is not
127 * already contained. The workset must not be full.
129 static INLINE void workset_insert(workset_t *ws, ir_node *val) {
131 assert(ws->len < ws->bel->n_regs && "Workset already full!");
132 /* check for current regclass */
133 if (arch_get_irn_reg_class(ws->bel->arch, val, 0) != ws->bel->cls) {
134 DBG((dbg, 0, "Dropped %+F\n", val));
138 /* check if val is already contained */
139 for(i=0; i<ws->len; ++i)
140 if (ws->vals[i].irn == val)
144 ws->vals[ws->len++].irn = val;
148 * Inserts all values in array @p vals of length @p cnt
149 * into the workset. There must be enough space for the
152 static INLINE void workset_bulk_insert(workset_t *ws, int cnt, ir_node **vals) {
154 assert(ws->len + cnt <= ws->bel->n_regs && "Workset does not have enough room!");
156 for(o=0; o<cnt; ++o) {
157 ir_node *val = vals[o];
158 DBG((dbg, DBG_TRACE, "Bulk insert %+F\n", val));
159 /* check for current regclass */
160 if (arch_get_irn_reg_class(ws->bel->arch, val, 0) != ws->bel->cls) {
161 DBG((dbg, DBG_TRACE, "Wrong reg class\n"));
165 /* check if val is already contained */
166 for(i=0; i<ws->len; ++i)
167 if (ws->vals[i].irn == val) {
168 DBG((dbg, DBG_TRACE, "Already contained\n"));
173 ws->vals[ws->len++].irn = val;
174 DBG((dbg, DBG_TRACE, "Inserted\n"));
177 /*epsilon statement :)*/;
182 * Overwrites the current contents of @p ws with the
183 * locations given in @p locs
185 #define workset_bulk_fill(ws, count, locs) memcpy(&(ws)->vals[0], locs, ((ws)->len=count)*sizeof(locs[0]));
188 * Removes all entries from this workset
190 #define workset_clear(ws) (ws)->len = 0;
193 * Removes the value @p val from the workset if present.
195 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
197 for(i=0; i<ws->len; ++i)
198 if (ws->vals[i].irn == val) {
199 ws->vals[i] = ws->vals[--ws->len];
204 static INLINE int workset_contains(const workset_t *ws, ir_node *val) {
206 for(i=0; i<ws->len; ++i)
207 if (ws->vals[i].irn == val)
212 #define workset_foreach(ws, v) for(ws->i=0; \
213 v=(ws->i < ws->len) ? ws->vals[ws->i].irn : NULL, ws->i < ws->len; \
216 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
217 #define workset_set_length(ws, length) (ws)->len = length
218 #define workset_get_length(ws) ((ws)->len)
219 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
220 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
224 * Collects all values live-in at block @p blk and all phi results in this block.
225 * Then it adds the best values (at most n_regs) to the ws.
227 static void build_start_set(belady_env_t *bel, ir_node *blk) {
228 workset_t *ws = bel->ws;
229 ir_node *irn, *first;
232 loc_t loc, *starters;
238 first = sched_first(blk);
240 sched_foreach(blk, irn)
241 if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, 0) == bel->cls) {
243 loc.time = be_get_next_use(bel->uses, first, 0, irn);
244 obstack_grow(&ob, &loc, sizeof(loc));
249 live_foreach(blk, li)
250 if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, 0) == bel->cls) {
251 loc.irn = (ir_node *)li->irn;
252 loc.time = be_get_next_use(bel->uses, first, 0, li->irn);
253 obstack_grow(&ob, &loc, sizeof(loc));
256 starters = obstack_finish(&ob);
258 /* sort all values */
259 qsort(starters, count, sizeof(starters[0]), loc_compare);
261 /* copy the best ones to the ws */
262 count = MIN(count, ws->bel->n_regs);
263 workset_bulk_fill(ws, count, starters);
265 obstack_free(&ob, NULL);
269 * Performs the actions neccessary to grant the request that:
270 * - new_vals can be held in registers
271 * - as few as possible other values are disposed
272 * - the worst values get disposed
274 * @p is_usage indicates that the values in new_vals are used (not defined)
275 * In this case reloads must be performed
277 static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) {
279 int i, len, max_allowed, demand;
280 workset_t *ws = bel->ws;
281 ir_node **to_insert = alloca(bel->n_regs * sizeof(*to_insert));
284 * 1. Identify the number of needed slots and the values to reload
287 workset_foreach(new_vals, val) {
288 /* mark value as used */
290 pset_insert_ptr(bel->used, val);
292 if (!workset_contains(ws, val)) {
293 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
294 to_insert[demand++] = val;
297 spill_info_t si, *found;
300 /* find the spill info or create it */
301 si.spilled_node = val;
303 found = set_insert(bel->spills, &si, sizeof(si), HASH_PTR(si.spilled_node));
305 /* insert the reloader into the linked list */
306 rld = obstack_alloc(&bel->ob, sizeof(*rld));
307 rld->reloader = bel->instr;
308 rld->next = found->reloaders;
309 found->reloaders = rld;
312 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
314 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
318 * 2. Make room for at least 'demand' slots
320 len = workset_get_length(ws);
321 max_allowed = bel->n_regs - demand;
323 /* Only make more free room if we do not have enough */
324 if (len > max_allowed) {
325 /* get current next-use distance */
326 for (i=0; i<ws->len; ++i)
327 workset_set_time(ws, i, be_get_next_use(bel->uses, bel->instr, bel->instr_nr, workset_get_val(ws, i)));
329 /* sort entries by increasing nextuse-distance*/
332 /* Logic for not needed live-ins: If a value is disposed
333 before its first usage, remove it from start workset */
334 for (i=max_allowed; i<ws->len; ++i) {
335 ir_node *irn = ws->vals[i].irn;
336 if (!pset_find_ptr(bel->used, irn)) {
337 ir_node *curr_bb = get_nodes_block(bel->instr);
338 workset_t *ws_start = ((block_info_t *) get_irn_link(curr_bb))->ws_start;
339 workset_remove(ws_start, irn);
341 DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
343 DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
346 /* kill the last 'demand' entries in the array */
347 workset_set_length(ws, max_allowed);
351 * 3. Insert the new values into the workset
353 workset_bulk_insert(bel->ws, demand, to_insert);
357 * For the given block @p blk, decide for each values
358 * whether it is used from a register or is reloaded
361 static void decide(ir_node *blk, void *env) {
362 belady_env_t *bel = env;
365 block_info_t *blk_info = obstack_alloc(&bel->ob, sizeof(*blk_info));
366 set_irn_link(blk, blk_info);
368 DBG((dbg, DBG_DECIDE, "\n"));
369 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", blk));
371 /* build starting-workset for this block */
372 build_start_set(bel, blk);
373 blk_info->ws_start = workset_clone(&bel->ob, bel->ws);
374 DBG((dbg, DBG_WSETS, "Initial start workset for %+F:\n", blk));
375 workset_foreach(blk_info->ws_start, irn)
376 DBG((dbg, DBG_WSETS, " %+F\n", irn));
378 /* process the block from start to end */
379 DBG((dbg, DBG_WSETS, "Processing...\n"));
380 bel->used = pset_new_ptr(32);
382 new_vals = new_workset(&bel->ob, bel);
383 sched_foreach(blk, irn) {
384 DBG((dbg, DBG_DECIDE, " %+F\n", irn));
386 /* projs are handled with the tuple value.
387 * Phis are no real instr (see insert_starters)
388 * instr_nr does not increase */
389 if (is_Proj(irn) || is_Phi(irn))
392 /* set instruction in the workset */
395 /* allocate all values _used_ by this instruction */
396 workset_clear(new_vals);
397 workset_bulk_insert(new_vals, get_irn_arity(irn)+1, get_irn_in(irn));
398 displace(bel, new_vals, 1);
400 /* allocate all values _defined_ by this instruction */
401 workset_clear(new_vals);
402 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
404 for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
405 workset_insert(new_vals, proj);
407 workset_insert(new_vals, irn);
409 displace(bel, new_vals, 0);
415 /* Remember end-workset for this block */
416 blk_info->ws_end = workset_clone(&bel->ob, bel->ws);
417 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", blk));
418 workset_foreach(blk_info->ws_start, irn)
419 DBG((dbg, DBG_WSETS, " %+F\n", irn));
420 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", blk));
421 workset_foreach(blk_info->ws_end, irn)
422 DBG((dbg, DBG_WSETS, " %+F\n", irn));
426 * 'decide' is block-local and makes assumtions
427 * about the set of live-ins. Thus we must adapt the
428 * live-outs to the live-ins at each block-border.
430 static void fix_block_borders(ir_node *blk, void *env) {
431 belady_env_t *bel = env;
434 DBG((dbg, DBG_FIX, "\n"));
435 DBG((dbg, DBG_FIX, "Fixing %+F\n", blk));
437 workset_t *wsb = ((block_info_t *)get_irn_link(blk))->ws_start;
439 /* process all pred blocks */
440 for (i=0, max=get_irn_arity(blk); i<max; ++i) {
441 ir_node *irnb, *irnp, *pred = get_Block_cfgpred_block(blk, i);
442 workset_t *wsp = ((block_info_t *)get_irn_link(pred))->ws_end;
444 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
446 workset_foreach(wsb, irnb) {
447 spill_info_t si, *found;
450 /* if irnb is a phi of the current block we reload
451 * the corresponding argument, else irnb itself */
452 if(is_Phi(irnb) && blk == get_nodes_block(irnb))
453 irnb = get_irn_n(irnb, i);
455 /* check if irnb is in a register at end of pred */
456 workset_foreach(wsp, irnp)
460 /* irnb is in memory at the end of pred, so we have to reload it */
462 /* find the spill info or create it */
463 si.spilled_node = irnb;
465 found = set_insert(bel->spills, &si, sizeof(si), HASH_PTR(si.spilled_node));
467 /* insert the reloader into the linked list.
468 * the schedule position depends on the cf-situation of the block */
469 rld = obstack_alloc(&bel->ob, sizeof(*rld));
470 rld->reloader = (max==1) ? sched_skip(sched_first(blk), 1, sched_skip_phi_predicator, NULL) : pred;
471 rld->next = found->reloaders;
472 found->reloaders = rld;
474 DBG((dbg, DBG_FIX, " reload %+F before %+F\n", irnb, rld->reloader));
477 /*epsilon statement :)*/;
482 static INLINE spill_ctx_t *get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
485 templ.spilled = to_spill;
486 templ.user = ctx_irn;
489 return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
492 static INLINE ir_node *spill_irn(belady_env_t *bel, ir_node *irn, ir_node *ctx_irn) {
494 DBG((dbg, DBG_SPILL, "spill_irn %+F\n", irn));
496 ctx = get_spill_ctx(bel->spill_ctxs, irn, ctx_irn);
498 ctx->spill = be_spill(bel->factory, bel->arch, irn);
504 * If the first usage of a phi result would be out of memory
505 * there is no sense in allocating a register for it.
506 * Thus we spill it and all its operands to the same spill slot.
507 * Therefore the phi/dataB becomes a phi/Memory
509 static ir_node *spill_phi(belady_env_t *bel, ir_node *phi, ir_node *ctx_irn) {
510 int i, n = get_irn_arity(phi);
511 ir_node **ins, *bl = get_nodes_block(phi);
512 ir_graph *irg = get_irn_irg(bl);
516 DBG((dbg, DBG_SPILL, "spill_phi %+F\n", phi));
518 /* search an existing spill for this context */
519 ctx = get_spill_ctx(bel->spill_ctxs, phi, ctx_irn);
521 /* if not found spill the phi */
523 /* build a new PhiM with dummy in-array */
524 ins = malloc(n * sizeof(ins[0]));
526 ins[i] = new_r_Unknown(irg, mode_M);
527 ctx->spill = new_r_Phi(bel->session->irg, bl, n, ins, mode_M);
530 /* re-wire the phiM */
532 ir_node *arg = get_irn_n(phi, i);
535 if(is_Phi(arg) && pset_find_ptr(bel->mem_phis, arg))
536 sub_res = spill_phi(bel, arg, ctx_irn);
538 sub_res = spill_irn(bel, arg, ctx_irn);
540 set_irn_n(ctx->spill, i, sub_res);
546 static ir_node *spill_node(belady_env_t *bel, ir_node *to_spill) {
548 if (pset_find_ptr(bel->mem_phis, to_spill))
549 res = spill_phi(bel, to_spill, to_spill);
551 res = spill_irn(bel, to_spill, to_spill);
556 static void insert_spills_reloads(ir_graph *irg, belady_env_t *bel) {
562 /* get all special spilled phis */
563 for(si = set_first(bel->spills); si; si = set_next(bel->spills)) {
564 irn = si->spilled_node;
566 ir_node *blk = get_nodes_block(irn);
567 workset_t *sws = ((block_info_t *)get_irn_link(blk))->ws_start;
568 if (!workset_contains(sws, irn))
569 pset_insert_ptr(bel->mem_phis, irn);
573 /* process each spilled node */
574 for(si = set_first(bel->spills); si; si = set_next(bel->spills)) {
578 ir_mode *mode = get_irn_mode(si->spilled_node);
580 /* go through all reloads for this spill */
581 for(rld = si->reloaders; rld; rld = rld->next) {
582 /* the spill for this reloader */
583 ir_node *spill = spill_node(bel, si->spilled_node);
586 ir_node *bl = is_Block(rld->reloader) ? rld->reloader : get_nodes_block(rld->reloader);
587 ir_node *reload = new_Reload(bel->factory, bel->cls, irg, bl, mode, spill);
588 pset_insert_ptr(bel->reloads, reload);
590 /* remember the reaload */
591 obstack_ptr_grow(&ob, reload);
592 sched_add_before(rld->reloader, reload);
596 assert(n_reloads > 0);
597 reloads = obstack_finish(&ob);
598 be_introduce_copies_ignore(bel->session->dom_front, si->spilled_node, n_reloads, reloads, bel->mem_phis);
599 obstack_free(&ob, reloads);
602 obstack_free(&ob, NULL);
604 /* remove all special phis form the irg and the schedule */
605 for(irn = pset_first(bel->mem_phis); irn; irn = pset_next(bel->mem_phis)) {
607 for(i = 0, n = get_irn_arity(irn); i < n; ++i)
608 set_irn_n(irn, i, new_r_Bad(irg));
614 * Removes all used reloads from bel->reloads.
615 * The remaining nodes in bel->reloads will be removed from the graph.
617 static void rescue_used_reloads(ir_node *irn, void *env) {
618 pset *rlds = ((belady_env_t *)env)->reloads;
619 if (pset_find_ptr(rlds, irn))
620 pset_remove_ptr(rlds, irn);
623 void be_spill_belady(const be_main_session_env_t *session, const arch_register_class_t *cls) {
626 dbg = firm_dbg_register("ir.be.spillbelady");
627 firm_dbg_set_mask(dbg, DEBUG_LVL);
629 /* init belady env */
630 belady_env_t *bel = alloca(sizeof(*bel));
631 obstack_init(&bel->ob);
632 bel->session = session;
633 bel->factory = session->main_env->node_factory;
634 bel->arch = session->main_env->arch_env;
636 bel->n_regs = arch_register_class_n_regs(cls);
637 bel->ws = new_workset(&bel->ob, bel);
638 bel->uses = be_begin_uses(session->irg, session->main_env->arch_env, cls);
639 bel->spills = new_set(set_cmp_spillinfo, 32);
640 bel->spill_ctxs = new_set(set_cmp_spillctx, 32);
641 bel->mem_phis = pset_new_ptr_default();
642 bel->reloads = pset_new_ptr_default();
645 irg_block_walk_graph(session->irg, decide, NULL, bel);
646 irg_block_walk_graph(session->irg, fix_block_borders, NULL, bel);
647 insert_spills_reloads(session->irg, bel);
649 /* find all unused reloads and remove them from the schedule */
650 irg_walk_graph(session->irg, rescue_used_reloads, NULL, bel);
651 for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads))
655 del_pset(bel->reloads);
656 del_pset(bel->mem_phis);
657 del_set(bel->spill_ctxs);
658 del_set(bel->spills);
659 be_end_uses(bel->uses);
660 obstack_free(&bel->ob, NULL);