4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
17 #include "iredges_t.h"
21 #include "bespillbelady.h"
23 #include "besched_t.h"
28 #define MIN(a,b) (((a)<(b))?(a):(b))
36 #define DEBUG_LVL (DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
37 static firm_dbg_module_t *dbg = NULL;
39 typedef struct _workset_t workset_t;
40 typedef struct _block_info_t block_info_t;
41 typedef struct _belady_env_t belady_env_t;
45 int i; /**< used for iteration */
46 int len; /**< current length */
47 loc_t vals[1]; /**< inlined array of the values/distances in this working set */
50 struct _block_info_t {
51 workset_t *ws_start, *ws_end;
54 struct _belady_env_t {
56 const be_node_factory_t *factory;
57 const arch_env_t *arch;
58 const arch_register_class_t *cls;
59 int n_regs; /** number of regs in this reg-class */
61 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
62 be_uses_t *uses; /**< env for the next-use magic */
63 ir_node *instr; /**< current instruction */
64 unsigned instr_nr; /**< current instruction number (relative to block start) */
65 pset *used; /**< holds the values used (so far) in the current BB */
67 spill_env_t *senv; /* see bespill.h */
68 pset *reloads; /**< all reload nodes placed */
72 * Alloc a new workset on obstack @p ob with maximum size @p max
74 static INLINE workset_t *new_workset(struct obstack *ob, belady_env_t *bel) {
76 size_t size = sizeof(*res) + (bel->n_regs-1)*sizeof(res->vals[0]);
77 res = obstack_alloc(ob, size);
83 static INLINE workset_t *workset_clone(struct obstack *ob, workset_t *ws) {
85 size_t size = sizeof(*res) + (ws->bel->n_regs-1)*sizeof(res->vals[0]);
86 res = obstack_alloc(ob, size);
87 memcpy(res, ws, size);
92 * Inserts the value @p val into the workset, iff it is not
93 * already contained. The workset must not be full.
95 static INLINE void workset_insert(workset_t *ws, ir_node *val) {
97 assert(ws->len < ws->bel->n_regs && "Workset already full!");
98 /* check for current regclass */
99 if (arch_get_irn_reg_class(ws->bel->arch, val, 0) != ws->bel->cls) {
100 DBG((dbg, 0, "Dropped %+F\n", val));
104 /* check if val is already contained */
105 for(i=0; i<ws->len; ++i)
106 if (ws->vals[i].irn == val)
110 ws->vals[ws->len++].irn = val;
114 * Inserts all values in array @p vals of length @p cnt
115 * into the workset. There must be enough space for the
118 static INLINE void workset_bulk_insert(workset_t *ws, int cnt, ir_node **vals) {
120 assert(ws->len + cnt <= ws->bel->n_regs && "Workset does not have enough room!");
122 for(o=0; o<cnt; ++o) {
123 ir_node *val = vals[o];
124 DBG((dbg, DBG_TRACE, "Bulk insert %+F\n", val));
125 /* check for current regclass */
126 if (arch_get_irn_reg_class(ws->bel->arch, val, 0) != ws->bel->cls) {
127 DBG((dbg, DBG_TRACE, "Wrong reg class\n"));
131 /* check if val is already contained */
132 for(i=0; i<ws->len; ++i)
133 if (ws->vals[i].irn == val) {
134 DBG((dbg, DBG_TRACE, "Already contained\n"));
139 ws->vals[ws->len++].irn = val;
140 DBG((dbg, DBG_TRACE, "Inserted\n"));
143 /*epsilon statement :)*/;
148 * Overwrites the current contents of @p ws with the
149 * locations given in @p locs
151 #define workset_bulk_fill(ws, count, locs) memcpy(&(ws)->vals[0], locs, ((ws)->len=count)*sizeof(locs[0]));
154 * Removes all entries from this workset
156 #define workset_clear(ws) (ws)->len = 0;
159 * Removes the value @p val from the workset if present.
161 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
163 for(i=0; i<ws->len; ++i)
164 if (ws->vals[i].irn == val) {
165 ws->vals[i] = ws->vals[--ws->len];
170 static INLINE int workset_contains(const workset_t *ws, const ir_node *val) {
172 for(i=0; i<ws->len; ++i)
173 if (ws->vals[i].irn == val)
178 #define workset_foreach(ws, v) for(ws->i=0; \
179 v=(ws->i < ws->len) ? ws->vals[ws->i].irn : NULL, ws->i < ws->len; \
182 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
183 #define workset_set_length(ws, length) (ws)->len = length
184 #define workset_get_length(ws) ((ws)->len)
185 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
186 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
190 * Collects all values live-in at block @p blk and all phi results in this block.
191 * Then it adds the best values (at most n_regs) to the ws.
193 static void build_start_set(belady_env_t *bel, ir_node *blk) {
194 workset_t *ws = bel->ws;
195 ir_node *irn, *first;
198 loc_t loc, *starters;
204 first = sched_first(blk);
206 sched_foreach(blk, irn)
207 if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, 0) == bel->cls) {
209 loc.time = be_get_next_use(bel->uses, first, 0, irn, 0);
210 DBG((dbg, DBG_START, " %+F next-use %d\n", loc.irn, loc.time));
211 obstack_grow(&ob, &loc, sizeof(loc));
216 live_foreach(blk, li)
217 if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, 0) == bel->cls) {
218 loc.irn = (ir_node *)li->irn;
219 loc.time = be_get_next_use(bel->uses, first, 0, li->irn, 0);
220 DBG((dbg, DBG_START, " %+F next-use %d\n", loc.irn, loc.time));
221 obstack_grow(&ob, &loc, sizeof(loc));
224 starters = obstack_finish(&ob);
226 /* sort all values */
227 qsort(starters, count, sizeof(starters[0]), loc_compare);
229 /* copy the best ones to the ws */
230 count = MIN(count, ws->bel->n_regs);
231 workset_bulk_fill(ws, count, starters);
233 obstack_free(&ob, NULL);
237 * Performs the actions neccessary to grant the request that:
238 * - new_vals can be held in registers
239 * - as few as possible other values are disposed
240 * - the worst values get disposed
242 * @p is_usage indicates that the values in new_vals are used (not defined)
243 * In this case reloads must be performed
245 static void displace(belady_env_t *bel, workset_t *new_vals, int is_usage) {
247 int i, len, max_allowed, demand;
248 workset_t *ws = bel->ws;
249 ir_node **to_insert = alloca(bel->n_regs * sizeof(*to_insert));
252 * 1. Identify the number of needed slots and the values to reload
255 workset_foreach(new_vals, val) {
256 /* mark value as used */
258 pset_insert_ptr(bel->used, val);
260 if (!workset_contains(ws, val)) {
261 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
262 to_insert[demand++] = val;
264 be_add_spill(bel->senv, val, bel->instr);
266 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
268 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
272 * 2. Make room for at least 'demand' slots
274 len = workset_get_length(ws);
275 max_allowed = bel->n_regs - demand;
277 /* Only make more free room if we do not have enough */
278 if (len > max_allowed) {
279 /* get current next-use distance */
280 for (i=0; i<ws->len; ++i)
281 workset_set_time(ws, i, be_get_next_use(bel->uses, bel->instr, bel->instr_nr, workset_get_val(ws, i), is_usage));
283 /* sort entries by increasing nextuse-distance*/
286 /* Logic for not needed live-ins: If a value is disposed
287 before its first usage, remove it from start workset */
288 for (i=max_allowed; i<ws->len; ++i) {
289 ir_node *irn = ws->vals[i].irn;
290 if (!pset_find_ptr(bel->used, irn)) {
291 ir_node *curr_bb = get_nodes_block(bel->instr);
292 workset_t *ws_start = ((block_info_t *) get_irn_link(curr_bb))->ws_start;
293 workset_remove(ws_start, irn);
295 DBG((dbg, DBG_DECIDE, " dispose %+F dumb\n", irn));
297 DBG((dbg, DBG_DECIDE, " dispose %+F\n", irn));
300 /* kill the last 'demand' entries in the array */
301 workset_set_length(ws, max_allowed);
305 * 3. Insert the new values into the workset
307 workset_bulk_insert(bel->ws, demand, to_insert);
311 * For the given block @p blk, decide for each values
312 * whether it is used from a register or is reloaded
315 static void decide(ir_node *blk, void *env) {
316 belady_env_t *bel = env;
319 block_info_t *blk_info = obstack_alloc(&bel->ob, sizeof(*blk_info));
320 set_irn_link(blk, blk_info);
322 DBG((dbg, DBG_DECIDE, "\n"));
323 DBG((dbg, DBG_DECIDE, "Decide for %+F\n", blk));
325 /* build starting-workset for this block */
326 build_start_set(bel, blk);
327 blk_info->ws_start = workset_clone(&bel->ob, bel->ws);
328 DBG((dbg, DBG_WSETS, "Initial start workset for %+F:\n", blk));
329 workset_foreach(blk_info->ws_start, irn)
330 DBG((dbg, DBG_WSETS, " %+F\n", irn));
332 /* process the block from start to end */
333 DBG((dbg, DBG_WSETS, "Processing...\n"));
334 bel->used = pset_new_ptr(32);
336 new_vals = new_workset(&bel->ob, bel);
337 sched_foreach(blk, irn) {
339 DBG((dbg, DBG_WSETS, "Current workset for %+F:\n", blk));
340 workset_foreach(bel->ws, iii)
341 DBG((dbg, DBG_WSETS, " %+F\n", iii));
342 assert(workset_get_length(bel->ws) <= bel->n_regs && "Too much values in workset!");
344 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
346 /* projs are handled with the tuple value.
347 * Phis are no real instr (see insert_starters)
348 * instr_nr does not increase */
349 if (is_Proj(irn) || is_Phi(irn))
352 /* set instruction in the workset */
355 /* allocate all values _used_ by this instruction */
356 workset_clear(new_vals);
357 workset_bulk_insert(new_vals, get_irn_arity(irn)+1, get_irn_in(irn));
358 displace(bel, new_vals, 1);
360 /* allocate all values _defined_ by this instruction */
361 workset_clear(new_vals);
362 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
364 for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
365 workset_insert(new_vals, proj);
367 workset_insert(new_vals, irn);
369 displace(bel, new_vals, 0);
375 /* Remember end-workset for this block */
376 blk_info->ws_end = workset_clone(&bel->ob, bel->ws);
377 DBG((dbg, DBG_WSETS, "Start workset for %+F:\n", blk));
378 workset_foreach(blk_info->ws_start, irn)
379 DBG((dbg, DBG_WSETS, " %+F\n", irn));
380 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", blk));
381 workset_foreach(blk_info->ws_end, irn)
382 DBG((dbg, DBG_WSETS, " %+F\n", irn));
386 * 'decide' is block-local and makes assumtions
387 * about the set of live-ins. Thus we must adapt the
388 * live-outs to the live-ins at each block-border.
390 static void fix_block_borders(ir_node *blk, void *env) {
391 belady_env_t *bel = env;
394 DBG((dbg, DBG_FIX, "\n"));
395 DBG((dbg, DBG_FIX, "Fixing %+F\n", blk));
397 workset_t *wsb = ((block_info_t *)get_irn_link(blk))->ws_start;
399 /* process all pred blocks */
400 for (i=0, max=get_irn_arity(blk); i<max; ++i) {
401 ir_node *irnb, *irnp, *pred = get_Block_cfgpred_block(blk, i);
402 workset_t *wsp = ((block_info_t *)get_irn_link(pred))->ws_end;
404 DBG((dbg, DBG_FIX, " Pred %+F\n", pred));
406 workset_foreach(wsb, irnb) {
407 /* if irnb is a phi of the current block we reload
408 * the corresponding argument, else irnb itself */
409 if(is_Phi(irnb) && blk == get_nodes_block(irnb))
410 irnb = get_irn_n(irnb, i);
412 /* check if irnb is in a register at end of pred */
413 workset_foreach(wsp, irnp)
417 /* irnb is in memory at the end of pred, so we have to reload it */
418 be_add_spill_on_edge(bel->senv, irnb, blk, i);
421 /*epsilon statement :)*/;
427 * Removes all used reloads from bel->reloads.
428 * The remaining nodes in bel->reloads will be removed from the graph.
430 static void rescue_used_reloads(ir_node *irn, void *env) {
431 pset *rlds = ((belady_env_t *)env)->reloads;
432 if (pset_find_ptr(rlds, irn)) {
433 DBG((dbg, DBG_SPILL, "Removing %+F in %+F\n", irn, get_nodes_block(irn)));
434 pset_remove_ptr(rlds, irn);
438 static int is_mem_phi(const ir_node *irn, void *data) {
439 ir_node *blk = get_nodes_block(irn);
440 workset_t *sws = ((block_info_t *)get_irn_link(blk))->ws_start;
441 return !workset_contains(sws, irn);
444 void be_spill_belady(const be_main_session_env_t *session, const arch_register_class_t *cls) {
447 dbg = firm_dbg_register("ir.be.spillbelady");
448 firm_dbg_set_mask(dbg, DEBUG_LVL);
450 /* init belady env */
451 belady_env_t *bel = alloca(sizeof(*bel));
452 obstack_init(&bel->ob);
453 bel->factory = session->main_env->node_factory;
454 bel->arch = session->main_env->arch_env;
456 bel->n_regs = arch_register_class_n_regs(cls);
457 bel->ws = new_workset(&bel->ob, bel);
458 bel->uses = be_begin_uses(session->irg, session->main_env->arch_env, cls);
459 bel->senv = be_new_spill_env(dbg, session, cls);
460 bel->reloads = pset_new_ptr_default();
463 irg_block_walk_graph(session->irg, decide, NULL, bel);
464 irg_block_walk_graph(session->irg, fix_block_borders, NULL, bel);
465 be_insert_spills_reloads(bel->senv, bel->reloads, is_mem_phi, NULL);
467 /* find all unused reloads and remove them from the schedule */
468 irg_walk_graph(session->irg, rescue_used_reloads, NULL, bel);
469 for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads))
473 del_pset(bel->reloads);
474 be_delete_spill_env(bel->senv);
475 be_end_uses(bel->uses);
476 obstack_free(&bel->ob, NULL);